From 15c0b25d011f37e7c20aeca9eaf461f78285b8d9 Mon Sep 17 00:00:00 2001 From: Alex Pilon Date: Fri, 22 Feb 2019 18:24:37 -0500 Subject: deps: github.com/hashicorp/terraform@sdk-v0.11-with-go-modules Updated via: go get github.com/hashicorp/terraform@sdk-v0.11-with-go-modules and go mod tidy --- vendor/github.com/agext/levenshtein/.gitignore | 2 + vendor/github.com/agext/levenshtein/.travis.yml | 70 + vendor/github.com/agext/levenshtein/DCO | 36 + vendor/github.com/agext/levenshtein/LICENSE | 201 + vendor/github.com/agext/levenshtein/MAINTAINERS | 1 + vendor/github.com/agext/levenshtein/NOTICE | 5 + vendor/github.com/agext/levenshtein/README.md | 38 + vendor/github.com/agext/levenshtein/levenshtein.go | 290 + vendor/github.com/agext/levenshtein/params.go | 152 + .../github.com/apparentlymart/go-cidr/cidr/cidr.go | 102 +- .../github.com/apparentlymart/go-textseg/LICENSE | 95 + .../go-textseg/textseg/all_tokens.go | 30 + .../apparentlymart/go-textseg/textseg/generate.go | 7 + .../go-textseg/textseg/grapheme_clusters.go | 5276 ++++++++++++++++++ .../go-textseg/textseg/grapheme_clusters.rl | 132 + .../go-textseg/textseg/grapheme_clusters_table.rl | 1583 ++++++ .../go-textseg/textseg/make_tables.go | 307 ++ .../go-textseg/textseg/make_test_tables.go | 212 + .../apparentlymart/go-textseg/textseg/tables.go | 5700 ++++++++++++++++++++ .../go-textseg/textseg/unicode2ragel.rb | 335 ++ .../apparentlymart/go-textseg/textseg/utf8_seqs.go | 19 + vendor/github.com/armon/go-radix/.gitignore | 22 + vendor/github.com/armon/go-radix/.travis.yml | 3 + vendor/github.com/armon/go-radix/LICENSE | 20 + vendor/github.com/armon/go-radix/README.md | 38 + vendor/github.com/armon/go-radix/radix.go | 496 ++ .../github.com/aws/aws-sdk-go/aws/client/client.go | 10 +- .../aws/aws-sdk-go/aws/client/default_retryer.go | 80 +- .../github.com/aws/aws-sdk-go/aws/client/logger.go | 106 +- .../aws-sdk-go/aws/client/metadata/client_info.go | 1 + vendor/github.com/aws/aws-sdk-go/aws/config.go | 26 +- .../github.com/aws/aws-sdk-go/aws/context_1_6.go | 6 +- .../github.com/aws/aws-sdk-go/aws/convert_types.go | 18 + .../aws/aws-sdk-go/aws/corehandlers/handlers.go | 28 +- .../aws/aws-sdk-go/aws/corehandlers/user_agent.go | 37 + .../aws/aws-sdk-go/aws/credentials/credentials.go | 18 +- .../credentials/ec2rolecreds/ec2_role_provider.go | 6 +- vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go | 46 + vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go | 67 + vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go | 51 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 54 + .../github.com/aws/aws-sdk-go/aws/csm/reporter.go | 231 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 56 +- .../aws/aws-sdk-go/aws/ec2metadata/api.go | 8 +- .../aws/aws-sdk-go/aws/ec2metadata/service.go | 24 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1067 +++- .../github.com/aws/aws-sdk-go/aws/endpoints/doc.go | 4 +- .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 22 +- .../aws/aws-sdk-go/aws/endpoints/v3model.go | 12 +- vendor/github.com/aws/aws-sdk-go/aws/logger.go | 10 +- .../aws/aws-sdk-go/aws/request/handlers.go | 18 + .../aws/aws-sdk-go/aws/request/offset_reader.go | 4 +- .../aws/aws-sdk-go/aws/request/request.go | 210 +- .../aws/aws-sdk-go/aws/request/request_1_7.go | 2 +- .../aws/aws-sdk-go/aws/request/request_1_8.go | 2 +- .../aws-sdk-go/aws/request/request_pagination.go | 40 +- .../aws/aws-sdk-go/aws/request/retryer.go | 8 +- .../aws/aws-sdk-go/aws/request/validation.go | 2 +- .../aws/aws-sdk-go/aws/request/waiter.go | 16 +- .../github.com/aws/aws-sdk-go/aws/session/doc.go | 2 +- .../aws/aws-sdk-go/aws/session/env_config.go | 35 +- .../aws/aws-sdk-go/aws/session/session.go | 64 +- .../github.com/aws/aws-sdk-go/aws/signer/v4/v4.go | 173 +- vendor/github.com/aws/aws-sdk-go/aws/types.go | 83 + vendor/github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 10 + .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 12 + .../aws-sdk-go/internal/sdkrand/locked_source.go | 29 + .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 + .../private/protocol/eventstream/debug.go | 144 + .../private/protocol/eventstream/decode.go | 199 + .../private/protocol/eventstream/encode.go | 114 + .../private/protocol/eventstream/error.go | 23 + .../protocol/eventstream/eventstreamapi/api.go | 196 + .../protocol/eventstream/eventstreamapi/error.go | 24 + .../private/protocol/eventstream/header.go | 166 + .../private/protocol/eventstream/header_value.go | 501 ++ .../private/protocol/eventstream/message.go | 103 + .../aws/aws-sdk-go/private/protocol/jsonvalue.go | 76 + .../aws/aws-sdk-go/private/protocol/payload.go | 81 + .../aws/aws-sdk-go/private/protocol/query/build.go | 2 +- .../private/protocol/query/queryutil/queryutil.go | 11 +- .../aws/aws-sdk-go/private/protocol/rest/build.go | 33 +- .../aws-sdk-go/private/protocol/rest/unmarshal.go | 20 +- .../aws/aws-sdk-go/private/protocol/timestamp.go | 72 + .../private/protocol/xml/xmlutil/build.go | 18 +- .../private/protocol/xml/xmlutil/unmarshal.go | 20 +- .../private/protocol/xml/xmlutil/xml_to_struct.go | 1 + vendor/github.com/aws/aws-sdk-go/service/s3/api.go | 5653 ++++++++++++++----- .../aws/aws-sdk-go/service/s3/body_hash.go | 249 + .../aws/aws-sdk-go/service/s3/content_md5.go | 36 - .../aws/aws-sdk-go/service/s3/customizations.go | 24 + vendor/github.com/aws/aws-sdk-go/service/s3/doc.go | 64 +- .../aws/aws-sdk-go/service/s3/doc_custom.go | 4 +- .../aws/aws-sdk-go/service/s3/host_style_bucket.go | 13 +- .../aws/aws-sdk-go/service/s3/service.go | 8 +- vendor/github.com/aws/aws-sdk-go/service/s3/sse.go | 18 +- .../aws/aws-sdk-go/service/s3/statusok_error.go | 3 +- .../aws/aws-sdk-go/service/s3/waiters.go | 8 +- .../github.com/aws/aws-sdk-go/service/sts/api.go | 317 +- .../github.com/aws/aws-sdk-go/service/sts/doc.go | 64 +- .../aws/aws-sdk-go/service/sts/service.go | 6 +- vendor/github.com/bgentry/speakeasy/.gitignore | 2 + .../github.com/bgentry/speakeasy/LICENSE_WINDOWS | 201 + vendor/github.com/bgentry/speakeasy/Readme.md | 30 + vendor/github.com/bgentry/speakeasy/speakeasy.go | 49 + .../github.com/bgentry/speakeasy/speakeasy_unix.go | 93 + .../bgentry/speakeasy/speakeasy_windows.go | 41 + vendor/github.com/davecgh/go-spew/LICENSE | 2 +- vendor/github.com/davecgh/go-spew/spew/bypass.go | 187 +- .../github.com/davecgh/go-spew/spew/bypasssafe.go | 2 +- vendor/github.com/davecgh/go-spew/spew/common.go | 2 +- vendor/github.com/davecgh/go-spew/spew/dump.go | 10 +- vendor/github.com/davecgh/go-spew/spew/format.go | 4 +- vendor/github.com/go-ini/ini/.travis.yml | 13 +- vendor/github.com/go-ini/ini/README.md | 8 +- vendor/github.com/go-ini/ini/README_ZH.md | 6 + vendor/github.com/go-ini/ini/ini.go | 54 +- vendor/github.com/go-ini/ini/key.go | 104 +- vendor/github.com/go-ini/ini/parser.go | 14 +- vendor/github.com/go-ini/ini/section.go | 25 +- vendor/github.com/go-ini/ini/struct.go | 37 +- vendor/github.com/golang/protobuf/AUTHORS | 3 + vendor/github.com/golang/protobuf/CONTRIBUTORS | 3 + vendor/github.com/golang/protobuf/LICENSE | 28 + vendor/github.com/golang/protobuf/proto/clone.go | 253 + vendor/github.com/golang/protobuf/proto/decode.go | 428 ++ vendor/github.com/golang/protobuf/proto/discard.go | 350 ++ vendor/github.com/golang/protobuf/proto/encode.go | 203 + vendor/github.com/golang/protobuf/proto/equal.go | 300 ++ .../github.com/golang/protobuf/proto/extensions.go | 543 ++ vendor/github.com/golang/protobuf/proto/lib.go | 979 ++++ .../golang/protobuf/proto/message_set.go | 314 ++ .../golang/protobuf/proto/pointer_reflect.go | 357 ++ .../golang/protobuf/proto/pointer_unsafe.go | 308 ++ .../github.com/golang/protobuf/proto/properties.go | 544 ++ .../golang/protobuf/proto/table_marshal.go | 2767 ++++++++++ .../golang/protobuf/proto/table_merge.go | 654 +++ .../golang/protobuf/proto/table_unmarshal.go | 2051 +++++++ vendor/github.com/golang/protobuf/proto/text.go | 843 +++ .../golang/protobuf/proto/text_parser.go | 880 +++ vendor/github.com/golang/protobuf/ptypes/any.go | 141 + .../golang/protobuf/ptypes/any/any.pb.go | 191 + .../golang/protobuf/ptypes/any/any.proto | 149 + vendor/github.com/golang/protobuf/ptypes/doc.go | 35 + .../github.com/golang/protobuf/ptypes/duration.go | 102 + .../golang/protobuf/ptypes/duration/duration.pb.go | 159 + .../golang/protobuf/ptypes/duration/duration.proto | 117 + .../github.com/golang/protobuf/ptypes/timestamp.go | 134 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 175 + .../protobuf/ptypes/timestamp/timestamp.proto | 133 + .../github.com/hashicorp/go-cleanhttp/cleanhttp.go | 1 + vendor/github.com/hashicorp/go-cleanhttp/go.mod | 1 + .../github.com/hashicorp/go-cleanhttp/handlers.go | 43 + vendor/github.com/hashicorp/go-getter/.travis.yml | 8 +- vendor/github.com/hashicorp/go-getter/README.md | 58 +- vendor/github.com/hashicorp/go-getter/appveyor.yml | 2 +- vendor/github.com/hashicorp/go-getter/client.go | 31 +- .../github.com/hashicorp/go-getter/decompress.go | 29 + .../hashicorp/go-getter/decompress_gzip.go | 2 +- .../hashicorp/go-getter/decompress_tar.go | 138 + .../hashicorp/go-getter/decompress_tbz2.go | 64 +- .../hashicorp/go-getter/decompress_testing.go | 36 +- .../hashicorp/go-getter/decompress_tgz.go | 62 +- .../hashicorp/go-getter/decompress_txz.go | 39 + .../hashicorp/go-getter/decompress_xz.go | 49 + .../hashicorp/go-getter/decompress_zip.go | 5 + vendor/github.com/hashicorp/go-getter/detect.go | 6 + .../github.com/hashicorp/go-getter/detect_file.go | 2 +- vendor/github.com/hashicorp/go-getter/get.go | 9 +- vendor/github.com/hashicorp/go-getter/get_git.go | 36 +- vendor/github.com/hashicorp/go-getter/get_hg.go | 10 +- vendor/github.com/hashicorp/go-getter/get_http.go | 49 +- vendor/github.com/hashicorp/go-getter/get_s3.go | 75 +- vendor/github.com/hashicorp/go-getter/source.go | 26 + vendor/github.com/hashicorp/go-hclog/LICENSE | 21 + vendor/github.com/hashicorp/go-hclog/README.md | 123 + vendor/github.com/hashicorp/go-hclog/global.go | 34 + vendor/github.com/hashicorp/go-hclog/int.go | 385 ++ vendor/github.com/hashicorp/go-hclog/log.go | 138 + vendor/github.com/hashicorp/go-hclog/stacktrace.go | 108 + vendor/github.com/hashicorp/go-hclog/stdlog.go | 62 + vendor/github.com/hashicorp/go-plugin/README.md | 49 +- vendor/github.com/hashicorp/go-plugin/client.go | 293 +- .../github.com/hashicorp/go-plugin/grpc_broker.go | 455 ++ .../hashicorp/go-plugin/grpc_broker.pb.go | 190 + .../hashicorp/go-plugin/grpc_broker.proto | 14 + .../github.com/hashicorp/go-plugin/grpc_client.go | 107 + .../github.com/hashicorp/go-plugin/grpc_server.go | 132 + vendor/github.com/hashicorp/go-plugin/log_entry.go | 73 + vendor/github.com/hashicorp/go-plugin/plugin.go | 33 + vendor/github.com/hashicorp/go-plugin/protocol.go | 45 + .../github.com/hashicorp/go-plugin/rpc_client.go | 47 + .../github.com/hashicorp/go-plugin/rpc_server.go | 20 +- vendor/github.com/hashicorp/go-plugin/server.go | 135 +- vendor/github.com/hashicorp/go-plugin/testing.go | 86 +- vendor/github.com/hashicorp/go-safetemp/LICENSE | 362 ++ vendor/github.com/hashicorp/go-safetemp/README.md | 10 + .../github.com/hashicorp/go-safetemp/safetemp.go | 40 + vendor/github.com/hashicorp/go-uuid/.travis.yml | 12 + vendor/github.com/hashicorp/go-uuid/README.md | 4 +- vendor/github.com/hashicorp/go-uuid/go.mod | 1 + vendor/github.com/hashicorp/go-uuid/uuid.go | 16 +- vendor/github.com/hashicorp/go-version/.travis.yml | 2 + .../github.com/hashicorp/go-version/constraint.go | 34 +- vendor/github.com/hashicorp/go-version/go.mod | 1 + vendor/github.com/hashicorp/go-version/version.go | 59 +- vendor/github.com/hashicorp/hcl2/LICENSE | 353 ++ vendor/github.com/hashicorp/hcl2/gohcl/decode.go | 304 ++ vendor/github.com/hashicorp/hcl2/gohcl/doc.go | 49 + vendor/github.com/hashicorp/hcl2/gohcl/schema.go | 174 + vendor/github.com/hashicorp/hcl2/gohcl/types.go | 16 + vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go | 103 + .../hashicorp/hcl2/hcl/diagnostic_text.go | 168 + vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go | 24 + vendor/github.com/hashicorp/hcl2/hcl/doc.go | 1 + .../github.com/hashicorp/hcl2/hcl/eval_context.go | 25 + vendor/github.com/hashicorp/hcl2/hcl/expr_call.go | 46 + vendor/github.com/hashicorp/hcl2/hcl/expr_list.go | 37 + vendor/github.com/hashicorp/hcl2/hcl/expr_map.go | 44 + .../github.com/hashicorp/hcl2/hcl/expr_unwrap.go | 68 + .../hashicorp/hcl2/hcl/hclsyntax/didyoumean.go | 24 + .../github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go | 7 + .../hashicorp/hcl2/hcl/hclsyntax/expression.go | 1275 +++++ .../hashicorp/hcl2/hcl/hclsyntax/expression_ops.go | 258 + .../hcl2/hcl/hclsyntax/expression_template.go | 192 + .../hcl2/hcl/hclsyntax/expression_vars.go | 76 + .../hcl2/hcl/hclsyntax/expression_vars_gen.go | 99 + .../hashicorp/hcl2/hcl/hclsyntax/file.go | 20 + .../hashicorp/hcl2/hcl/hclsyntax/generate.go | 9 + .../hashicorp/hcl2/hcl/hclsyntax/keywords.go | 21 + .../hashicorp/hcl2/hcl/hclsyntax/navigation.go | 41 + .../hashicorp/hcl2/hcl/hclsyntax/node.go | 22 + .../hashicorp/hcl2/hcl/hclsyntax/parser.go | 1836 +++++++ .../hcl2/hcl/hclsyntax/parser_template.go | 728 +++ .../hcl2/hcl/hclsyntax/parser_traversal.go | 159 + .../hashicorp/hcl2/hcl/hclsyntax/peeker.go | 212 + .../hashicorp/hcl2/hcl/hclsyntax/public.go | 171 + .../hcl2/hcl/hclsyntax/scan_string_lit.go | 301 ++ .../hcl2/hcl/hclsyntax/scan_string_lit.rl | 105 + .../hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go | 5443 +++++++++++++++++++ .../hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl | 376 ++ .../hashicorp/hcl2/hcl/hclsyntax/spec.md | 923 ++++ .../hashicorp/hcl2/hcl/hclsyntax/structure.go | 379 ++ .../hashicorp/hcl2/hcl/hclsyntax/token.go | 272 + .../hcl2/hcl/hclsyntax/token_type_string.go | 69 + .../hashicorp/hcl2/hcl/hclsyntax/unicode2ragel.rb | 335 ++ .../hcl2/hcl/hclsyntax/unicode_derived.rl | 2135 ++++++++ .../hashicorp/hcl2/hcl/hclsyntax/variables.go | 86 + .../hashicorp/hcl2/hcl/hclsyntax/walk.go | 77 + vendor/github.com/hashicorp/hcl2/hcl/json/ast.go | 121 + .../hashicorp/hcl2/hcl/json/didyoumean.go | 33 + vendor/github.com/hashicorp/hcl2/hcl/json/doc.go | 8 + .../hashicorp/hcl2/hcl/json/navigation.go | 70 + .../github.com/hashicorp/hcl2/hcl/json/parser.go | 491 ++ .../github.com/hashicorp/hcl2/hcl/json/peeker.go | 25 + .../github.com/hashicorp/hcl2/hcl/json/public.go | 94 + .../github.com/hashicorp/hcl2/hcl/json/scanner.go | 293 + vendor/github.com/hashicorp/hcl2/hcl/json/spec.md | 405 ++ .../hashicorp/hcl2/hcl/json/structure.go | 616 +++ .../hashicorp/hcl2/hcl/json/tokentype_string.go | 29 + vendor/github.com/hashicorp/hcl2/hcl/merged.go | 226 + vendor/github.com/hashicorp/hcl2/hcl/ops.go | 147 + vendor/github.com/hashicorp/hcl2/hcl/pos.go | 262 + .../github.com/hashicorp/hcl2/hcl/pos_scanner.go | 148 + vendor/github.com/hashicorp/hcl2/hcl/schema.go | 21 + vendor/github.com/hashicorp/hcl2/hcl/spec.md | 691 +++ .../github.com/hashicorp/hcl2/hcl/static_expr.go | 40 + vendor/github.com/hashicorp/hcl2/hcl/structure.go | 151 + vendor/github.com/hashicorp/hcl2/hcl/traversal.go | 352 ++ .../hashicorp/hcl2/hcl/traversal_for_expr.go | 121 + .../hashicorp/hcl2/hcldec/block_labels.go | 21 + vendor/github.com/hashicorp/hcl2/hcldec/decode.go | 36 + vendor/github.com/hashicorp/hcl2/hcldec/doc.go | 12 + vendor/github.com/hashicorp/hcl2/hcldec/gob.go | 23 + vendor/github.com/hashicorp/hcl2/hcldec/public.go | 78 + vendor/github.com/hashicorp/hcl2/hcldec/schema.go | 36 + vendor/github.com/hashicorp/hcl2/hcldec/spec.go | 998 ++++ .../github.com/hashicorp/hcl2/hcldec/variables.go | 34 + .../github.com/hashicorp/hcl2/hclparse/parser.go | 123 + vendor/github.com/hashicorp/hil/scanner/scanner.go | 6 + .../hashicorp/terraform/config/append.go | 6 + .../hashicorp/terraform/config/config.go | 396 +- .../hashicorp/terraform/config/config_string.go | 40 + .../terraform/config/configschema/decoder_spec.go | 97 + .../hashicorp/terraform/config/configschema/doc.go | 14 + .../terraform/config/configschema/implied_type.go | 21 + .../config/configschema/internal_validate.go | 92 + .../config/configschema/nestingmode_string.go | 16 + .../terraform/config/configschema/schema.go | 107 + .../hashicorp/terraform/config/hcl2_shim_util.go | 134 + .../terraform/config/hcl2shim/single_attr_body.go | 85 + .../hashicorp/terraform/config/hcl2shim/values.go | 246 + .../hashicorp/terraform/config/import_tree.go | 54 +- .../hashicorp/terraform/config/interpolate.go | 55 +- .../terraform/config/interpolate_funcs.go | 429 +- .../hashicorp/terraform/config/interpolate_walk.go | 4 +- .../hashicorp/terraform/config/loader.go | 5 +- .../hashicorp/terraform/config/loader_hcl.go | 126 +- .../hashicorp/terraform/config/loader_hcl2.go | 473 ++ .../github.com/hashicorp/terraform/config/merge.go | 11 + .../hashicorp/terraform/config/module/get.go | 20 +- .../hashicorp/terraform/config/module/inode.go | 2 +- .../hashicorp/terraform/config/module/module.go | 6 +- .../hashicorp/terraform/config/module/storage.go | 365 ++ .../hashicorp/terraform/config/module/testing.go | 6 +- .../hashicorp/terraform/config/module/tree.go | 314 +- .../config/module/validate_provider_alias.go | 2 +- .../hashicorp/terraform/config/module/versions.go | 95 + .../hashicorp/terraform/config/raw_config.go | 125 +- .../terraform/config/resource_mode_string.go | 4 +- .../hashicorp/terraform/config/testing.go | 2 + vendor/github.com/hashicorp/terraform/dag/dag.go | 21 +- .../github.com/hashicorp/terraform/dag/marshal.go | 12 + vendor/github.com/hashicorp/terraform/dag/walk.go | 16 +- .../terraform/helper/experiment/experiment.go | 154 - .../hashicorp/terraform/helper/experiment/id.go | 34 - .../terraform/helper/hashcode/hashcode.go | 13 + .../hashicorp/terraform/helper/logging/logging.go | 8 +- .../terraform/helper/logging/transport.go | 21 +- .../hashicorp/terraform/helper/resource/id.go | 5 + .../hashicorp/terraform/helper/resource/state.go | 2 +- .../hashicorp/terraform/helper/resource/testing.go | 348 +- .../terraform/helper/resource/testing_config.go | 41 +- .../helper/resource/testing_import_state.go | 15 +- .../hashicorp/terraform/helper/resource/wait.go | 2 +- .../hashicorp/terraform/helper/schema/backend.go | 2 +- .../terraform/helper/schema/core_schema.go | 155 + .../helper/schema/data_source_resource_shim.go | 2 +- .../terraform/helper/schema/field_reader.go | 11 +- .../terraform/helper/schema/field_reader_config.go | 2 +- .../terraform/helper/schema/field_reader_diff.go | 43 +- .../terraform/helper/schema/field_reader_map.go | 2 +- .../terraform/helper/schema/field_writer_map.go | 32 + .../terraform/helper/schema/getsource_string.go | 6 +- .../hashicorp/terraform/helper/schema/provider.go | 40 +- .../terraform/helper/schema/provisioner.go | 4 +- .../hashicorp/terraform/helper/schema/resource.go | 98 +- .../terraform/helper/schema/resource_data.go | 39 +- .../terraform/helper/schema/resource_diff.go | 559 ++ .../hashicorp/terraform/helper/schema/schema.go | 252 +- .../hashicorp/terraform/helper/schema/set.go | 31 + .../hashicorp/terraform/helper/schema/testing.go | 4 +- .../terraform/helper/schema/valuetype_string.go | 4 +- .../hashicorp/terraform/helper/shadow/closer.go | 83 - .../terraform/helper/shadow/compared_value.go | 128 - .../terraform/helper/shadow/keyed_value.go | 151 - .../terraform/helper/shadow/ordered_value.go | 66 - .../hashicorp/terraform/helper/shadow/value.go | 87 - .../hashicorp/terraform/httpclient/client.go | 18 + .../hashicorp/terraform/httpclient/useragent.go | 40 + .../hashicorp/terraform/plugin/client.go | 9 + .../hashicorp/terraform/plugin/discovery/find.go | 27 +- .../hashicorp/terraform/plugin/discovery/get.go | 134 +- .../terraform/plugin/discovery/get_cache.go | 48 + .../terraform/plugin/resource_provider.go | 39 + .../hashicorp/terraform/registry/client.go | 227 + .../hashicorp/terraform/registry/errors.go | 23 + .../terraform/registry/regsrc/friendly_host.go | 140 + .../hashicorp/terraform/registry/regsrc/module.go | 205 + .../hashicorp/terraform/registry/regsrc/regsrc.go | 8 + .../terraform/registry/response/module.go | 93 + .../terraform/registry/response/module_list.go | 7 + .../terraform/registry/response/module_provider.go | 14 + .../terraform/registry/response/module_versions.go | 32 + .../terraform/registry/response/pagination.go | 65 + .../terraform/registry/response/redirect.go | 6 + .../hashicorp/terraform/svchost/auth/cache.go | 45 + .../terraform/svchost/auth/credentials.go | 63 + .../hashicorp/terraform/svchost/auth/from_map.go | 18 + .../terraform/svchost/auth/helper_program.go | 80 + .../hashicorp/terraform/svchost/auth/static.go | 28 + .../terraform/svchost/auth/token_credentials.go | 25 + .../hashicorp/terraform/svchost/disco/disco.go | 259 + .../hashicorp/terraform/svchost/disco/host.go | 264 + .../hashicorp/terraform/svchost/label_iter.go | 69 + .../hashicorp/terraform/svchost/svchost.go | 207 + .../hashicorp/terraform/terraform/context.go | 189 +- .../terraform/terraform/context_import.go | 2 +- .../hashicorp/terraform/terraform/diff.go | 15 +- .../hashicorp/terraform/terraform/eval.go | 4 +- .../hashicorp/terraform/terraform/eval_apply.go | 27 +- .../hashicorp/terraform/terraform/eval_context.go | 13 +- .../terraform/terraform/eval_context_builtin.go | 102 +- .../terraform/terraform/eval_context_mock.go | 38 +- .../hashicorp/terraform/terraform/eval_diff.go | 26 +- .../terraform/terraform/eval_interpolate.go | 40 +- .../hashicorp/terraform/terraform/eval_local.go | 86 + .../hashicorp/terraform/terraform/eval_output.go | 25 +- .../hashicorp/terraform/terraform/eval_provider.go | 59 +- .../hashicorp/terraform/terraform/eval_state.go | 35 +- .../hashicorp/terraform/terraform/eval_validate.go | 4 + .../terraform/terraform/evaltree_provider.go | 41 +- .../hashicorp/terraform/terraform/features.go | 7 + .../hashicorp/terraform/terraform/graph.go | 8 +- .../terraform/terraform/graph_builder_apply.go | 29 +- .../terraform/terraform/graph_builder_import.go | 7 +- .../terraform/terraform/graph_builder_plan.go | 23 +- .../terraform/terraform/graph_builder_refresh.go | 10 +- .../terraform/terraform/graph_walk_context.go | 3 - .../terraform/terraform/graphtype_string.go | 4 +- .../terraform/terraform/instancetype_string.go | 4 +- .../hashicorp/terraform/terraform/interpolate.go | 84 +- .../terraform/terraform/module_dependencies.go | 1 - .../terraform/terraform/node_data_refresh.go | 11 +- .../hashicorp/terraform/terraform/node_local.go | 66 + .../terraform/terraform/node_module_destroy.go | 29 - .../terraform/terraform/node_module_removed.go | 77 + .../terraform/terraform/node_module_variable.go | 19 +- .../hashicorp/terraform/terraform/node_output.go | 80 +- .../terraform/terraform/node_output_orphan.go | 5 + .../hashicorp/terraform/terraform/node_provider.go | 2 +- .../terraform/terraform/node_provider_abstract.go | 24 +- .../terraform/terraform/node_provider_disabled.go | 6 +- .../terraform/terraform/node_resource_abstract.go | 15 +- .../terraform/terraform/node_resource_apply.go | 55 +- .../terraform/terraform/node_resource_destroy.go | 13 +- .../terraform/terraform/node_resource_plan.go | 2 + .../terraform/node_resource_plan_instance.go | 8 +- .../terraform/terraform/node_resource_refresh.go | 17 +- .../terraform/terraform/node_resource_validate.go | 3 +- .../hashicorp/terraform/terraform/path.go | 18 +- .../hashicorp/terraform/terraform/plan.go | 52 +- .../hashicorp/terraform/terraform/resource.go | 42 +- .../terraform/terraform/resource_address.go | 37 +- .../terraform/terraform/resource_provider.go | 23 + .../terraform/terraform/resource_provider_mock.go | 20 +- .../hashicorp/terraform/terraform/schemas.go | 34 + .../hashicorp/terraform/terraform/shadow.go | 28 - .../terraform/terraform/shadow_components.go | 273 - .../terraform/terraform/shadow_context.go | 158 - .../terraform/shadow_resource_provider.go | 815 --- .../terraform/shadow_resource_provisioner.go | 282 - .../hashicorp/terraform/terraform/state.go | 108 +- .../hashicorp/terraform/terraform/test_failure | 9 - .../hashicorp/terraform/terraform/transform.go | 5 + .../terraform/transform_attach_config_provider.go | 62 - .../terraform/terraform/transform_deposed.go | 38 +- .../terraform/terraform/transform_destroy_edge.go | 10 +- .../terraform/terraform/transform_import_state.go | 45 +- .../terraform/terraform/transform_local.go | 40 + .../terraform/terraform/transform_orphan_output.go | 45 +- .../terraform/terraform/transform_output.go | 46 +- .../terraform/terraform/transform_provider.go | 574 +- .../terraform/transform_provider_disable.go | 50 - .../terraform/terraform/transform_reference.go | 100 +- .../terraform/transform_removed_modules.go | 32 + .../terraform/transform_resource_count.go | 4 +- .../terraform/terraform/transform_targets.go | 56 +- .../terraform/terraform/ui_output_mock.go | 5 + .../hashicorp/terraform/terraform/user_agent.go | 11 +- .../hashicorp/terraform/terraform/version.go | 27 +- .../terraform/terraform/version_required.go | 12 +- .../terraform/terraform/walkoperation_string.go | 4 +- .../hashicorp/terraform/tfdiags/diagnostic.go | 26 + .../hashicorp/terraform/tfdiags/diagnostics.go | 181 + .../github.com/hashicorp/terraform/tfdiags/doc.go | 16 + .../hashicorp/terraform/tfdiags/error.go | 23 + .../github.com/hashicorp/terraform/tfdiags/hcl.go | 77 + .../hashicorp/terraform/tfdiags/rpc_friendly.go | 53 + .../hashicorp/terraform/tfdiags/severity_string.go | 21 + .../hashicorp/terraform/tfdiags/simple_warning.go | 25 + .../hashicorp/terraform/tfdiags/source_range.go | 35 + .../hashicorp/terraform/version/version.go | 36 + vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 37 + vendor/github.com/mattn/go-isatty/doc.go | 2 + .../github.com/mattn/go-isatty/isatty_appengine.go | 9 + vendor/github.com/mattn/go-isatty/isatty_bsd.go | 18 + vendor/github.com/mattn/go-isatty/isatty_linux.go | 18 + .../github.com/mattn/go-isatty/isatty_solaris.go | 16 + .../github.com/mattn/go-isatty/isatty_windows.go | 19 + vendor/github.com/mitchellh/cli/.travis.yml | 13 + vendor/github.com/mitchellh/cli/LICENSE | 354 ++ vendor/github.com/mitchellh/cli/Makefile | 20 + vendor/github.com/mitchellh/cli/README.md | 67 + vendor/github.com/mitchellh/cli/autocomplete.go | 43 + vendor/github.com/mitchellh/cli/cli.go | 715 +++ vendor/github.com/mitchellh/cli/command.go | 67 + vendor/github.com/mitchellh/cli/command_mock.go | 63 + vendor/github.com/mitchellh/cli/help.go | 79 + vendor/github.com/mitchellh/cli/ui.go | 187 + vendor/github.com/mitchellh/cli/ui_colored.go | 69 + vendor/github.com/mitchellh/cli/ui_concurrent.go | 54 + vendor/github.com/mitchellh/cli/ui_mock.go | 111 + vendor/github.com/mitchellh/cli/ui_writer.go | 18 + .../mitchellh/copystructure/copystructure.go | 87 +- .../mitchellh/go-testing-interface/.travis.yml | 12 + .../mitchellh/go-testing-interface/LICENSE | 21 + .../mitchellh/go-testing-interface/README.md | 52 + .../mitchellh/go-testing-interface/testing.go | 84 + .../mitchellh/go-testing-interface/testing_go19.go | 80 + vendor/github.com/mitchellh/go-wordwrap/LICENSE.md | 21 + vendor/github.com/mitchellh/go-wordwrap/README.md | 39 + .../github.com/mitchellh/go-wordwrap/wordwrap.go | 73 + .../github.com/mitchellh/reflectwalk/location.go | 2 + .../mitchellh/reflectwalk/location_string.go | 8 +- .../mitchellh/reflectwalk/reflectwalk.go | 108 +- vendor/github.com/oklog/run/.gitignore | 14 + vendor/github.com/oklog/run/.travis.yml | 12 + vendor/github.com/oklog/run/LICENSE | 201 + vendor/github.com/oklog/run/README.md | 73 + vendor/github.com/oklog/run/group.go | 62 + vendor/github.com/posener/complete/.gitignore | 2 + vendor/github.com/posener/complete/.travis.yml | 17 + vendor/github.com/posener/complete/LICENSE.txt | 21 + vendor/github.com/posener/complete/args.go | 102 + vendor/github.com/posener/complete/cmd/cmd.go | 128 + .../posener/complete/cmd/install/bash.go | 32 + .../posener/complete/cmd/install/install.go | 92 + .../posener/complete/cmd/install/utils.go | 118 + .../github.com/posener/complete/cmd/install/zsh.go | 39 + vendor/github.com/posener/complete/command.go | 111 + vendor/github.com/posener/complete/complete.go | 95 + vendor/github.com/posener/complete/log.go | 23 + vendor/github.com/posener/complete/match/file.go | 19 + vendor/github.com/posener/complete/match/match.go | 6 + vendor/github.com/posener/complete/match/prefix.go | 9 + vendor/github.com/posener/complete/metalinter.json | 21 + vendor/github.com/posener/complete/predict.go | 41 + .../github.com/posener/complete/predict_files.go | 108 + vendor/github.com/posener/complete/predict_set.go | 12 + vendor/github.com/posener/complete/readme.md | 116 + vendor/github.com/posener/complete/test.sh | 12 + vendor/github.com/posener/complete/utils.go | 46 + vendor/github.com/satori/go.uuid/.travis.yml | 21 - vendor/github.com/satori/go.uuid/LICENSE | 20 - vendor/github.com/satori/go.uuid/README.md | 65 - vendor/github.com/satori/go.uuid/uuid.go | 481 -- vendor/github.com/ulikunitz/xz/.gitignore | 25 + vendor/github.com/ulikunitz/xz/LICENSE | 26 + vendor/github.com/ulikunitz/xz/README.md | 71 + vendor/github.com/ulikunitz/xz/TODO.md | 315 ++ vendor/github.com/ulikunitz/xz/bits.go | 74 + vendor/github.com/ulikunitz/xz/crc.go | 54 + vendor/github.com/ulikunitz/xz/example.go | 40 + vendor/github.com/ulikunitz/xz/format.go | 728 +++ vendor/github.com/ulikunitz/xz/fox.xz | Bin 0 -> 104 bytes .../ulikunitz/xz/internal/hash/cyclic_poly.go | 181 + .../github.com/ulikunitz/xz/internal/hash/doc.go | 14 + .../ulikunitz/xz/internal/hash/rabin_karp.go | 66 + .../ulikunitz/xz/internal/hash/roller.go | 29 + .../github.com/ulikunitz/xz/internal/xlog/xlog.go | 457 ++ vendor/github.com/ulikunitz/xz/lzma/bintree.go | 523 ++ vendor/github.com/ulikunitz/xz/lzma/bitops.go | 45 + vendor/github.com/ulikunitz/xz/lzma/breader.go | 39 + vendor/github.com/ulikunitz/xz/lzma/buffer.go | 171 + vendor/github.com/ulikunitz/xz/lzma/bytewriter.go | 37 + vendor/github.com/ulikunitz/xz/lzma/decoder.go | 277 + vendor/github.com/ulikunitz/xz/lzma/decoderdict.go | 135 + vendor/github.com/ulikunitz/xz/lzma/directcodec.go | 49 + vendor/github.com/ulikunitz/xz/lzma/distcodec.go | 156 + vendor/github.com/ulikunitz/xz/lzma/encoder.go | 268 + vendor/github.com/ulikunitz/xz/lzma/encoderdict.go | 149 + vendor/github.com/ulikunitz/xz/lzma/fox.lzma | Bin 0 -> 67 bytes vendor/github.com/ulikunitz/xz/lzma/hashtable.go | 309 ++ vendor/github.com/ulikunitz/xz/lzma/header.go | 167 + vendor/github.com/ulikunitz/xz/lzma/header2.go | 398 ++ vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go | 129 + .../github.com/ulikunitz/xz/lzma/literalcodec.go | 132 + .../github.com/ulikunitz/xz/lzma/matchalgorithm.go | 52 + vendor/github.com/ulikunitz/xz/lzma/operation.go | 80 + vendor/github.com/ulikunitz/xz/lzma/prob.go | 53 + vendor/github.com/ulikunitz/xz/lzma/properties.go | 69 + vendor/github.com/ulikunitz/xz/lzma/rangecodec.go | 248 + vendor/github.com/ulikunitz/xz/lzma/reader.go | 100 + vendor/github.com/ulikunitz/xz/lzma/reader2.go | 232 + vendor/github.com/ulikunitz/xz/lzma/state.go | 151 + vendor/github.com/ulikunitz/xz/lzma/treecodecs.go | 133 + vendor/github.com/ulikunitz/xz/lzma/writer.go | 209 + vendor/github.com/ulikunitz/xz/lzma/writer2.go | 305 ++ vendor/github.com/ulikunitz/xz/lzmafilter.go | 117 + vendor/github.com/ulikunitz/xz/make-docs | 5 + vendor/github.com/ulikunitz/xz/reader.go | 373 ++ vendor/github.com/ulikunitz/xz/writer.go | 386 ++ vendor/github.com/zclconf/go-cty/LICENSE | 21 + vendor/github.com/zclconf/go-cty/cty/capsule.go | 89 + vendor/github.com/zclconf/go-cty/cty/collection.go | 34 + .../zclconf/go-cty/cty/convert/compare_types.go | 165 + .../zclconf/go-cty/cty/convert/conversion.go | 120 + .../go-cty/cty/convert/conversion_collection.go | 226 + .../go-cty/cty/convert/conversion_dynamic.go | 33 + .../go-cty/cty/convert/conversion_primitive.go | 50 + .../github.com/zclconf/go-cty/cty/convert/doc.go | 15 + .../zclconf/go-cty/cty/convert/public.go | 83 + .../zclconf/go-cty/cty/convert/sort_types.go | 69 + .../github.com/zclconf/go-cty/cty/convert/unify.go | 66 + vendor/github.com/zclconf/go-cty/cty/doc.go | 18 + .../zclconf/go-cty/cty/element_iterator.go | 191 + vendor/github.com/zclconf/go-cty/cty/error.go | 55 + .../zclconf/go-cty/cty/function/argument.go | 50 + .../github.com/zclconf/go-cty/cty/function/doc.go | 6 + .../zclconf/go-cty/cty/function/error.go | 50 + .../zclconf/go-cty/cty/function/function.go | 291 + .../zclconf/go-cty/cty/function/stdlib/bool.go | 73 + .../zclconf/go-cty/cty/function/stdlib/bytes.go | 112 + .../go-cty/cty/function/stdlib/collection.go | 140 + .../zclconf/go-cty/cty/function/stdlib/csv.go | 93 + .../zclconf/go-cty/cty/function/stdlib/doc.go | 13 + .../zclconf/go-cty/cty/function/stdlib/format.go | 496 ++ .../go-cty/cty/function/stdlib/format_fsm.go | 358 ++ .../go-cty/cty/function/stdlib/format_fsm.rl | 182 + .../zclconf/go-cty/cty/function/stdlib/general.go | 107 + .../zclconf/go-cty/cty/function/stdlib/json.go | 72 + .../zclconf/go-cty/cty/function/stdlib/number.go | 428 ++ .../zclconf/go-cty/cty/function/stdlib/sequence.go | 130 + .../zclconf/go-cty/cty/function/stdlib/set.go | 195 + .../zclconf/go-cty/cty/function/stdlib/string.go | 234 + .../zclconf/go-cty/cty/function/unpredictable.go | 31 + vendor/github.com/zclconf/go-cty/cty/gob.go | 125 + vendor/github.com/zclconf/go-cty/cty/gocty/doc.go | 7 + .../github.com/zclconf/go-cty/cty/gocty/helpers.go | 43 + vendor/github.com/zclconf/go-cty/cty/gocty/in.go | 528 ++ vendor/github.com/zclconf/go-cty/cty/gocty/out.go | 705 +++ .../zclconf/go-cty/cty/gocty/type_implied.go | 108 + vendor/github.com/zclconf/go-cty/cty/helper.go | 99 + vendor/github.com/zclconf/go-cty/cty/json.go | 176 + vendor/github.com/zclconf/go-cty/cty/json/doc.go | 11 + .../github.com/zclconf/go-cty/cty/json/marshal.go | 189 + .../github.com/zclconf/go-cty/cty/json/simple.go | 41 + vendor/github.com/zclconf/go-cty/cty/json/type.go | 23 + .../zclconf/go-cty/cty/json/type_implied.go | 171 + .../zclconf/go-cty/cty/json/unmarshal.go | 459 ++ vendor/github.com/zclconf/go-cty/cty/json/value.go | 65 + vendor/github.com/zclconf/go-cty/cty/list_type.go | 68 + vendor/github.com/zclconf/go-cty/cty/map_type.go | 68 + vendor/github.com/zclconf/go-cty/cty/null.go | 14 + .../github.com/zclconf/go-cty/cty/object_type.go | 135 + vendor/github.com/zclconf/go-cty/cty/path.go | 186 + .../zclconf/go-cty/cty/primitive_type.go | 122 + vendor/github.com/zclconf/go-cty/cty/set/gob.go | 76 + .../github.com/zclconf/go-cty/cty/set/iterator.go | 36 + vendor/github.com/zclconf/go-cty/cty/set/ops.go | 199 + vendor/github.com/zclconf/go-cty/cty/set/rules.go | 25 + vendor/github.com/zclconf/go-cty/cty/set/set.go | 62 + vendor/github.com/zclconf/go-cty/cty/set_helper.go | 126 + .../github.com/zclconf/go-cty/cty/set_internals.go | 146 + vendor/github.com/zclconf/go-cty/cty/set_type.go | 66 + vendor/github.com/zclconf/go-cty/cty/tuple_type.go | 121 + vendor/github.com/zclconf/go-cty/cty/type.go | 95 + .../github.com/zclconf/go-cty/cty/type_conform.go | 142 + .../zclconf/go-cty/cty/types_to_register.go | 57 + vendor/github.com/zclconf/go-cty/cty/unknown.go | 79 + vendor/github.com/zclconf/go-cty/cty/value.go | 98 + vendor/github.com/zclconf/go-cty/cty/value_init.go | 276 + vendor/github.com/zclconf/go-cty/cty/value_ops.go | 1071 ++++ vendor/github.com/zclconf/go-cty/cty/walk.go | 182 + 647 files changed, 95533 insertions(+), 6988 deletions(-) create mode 100644 vendor/github.com/agext/levenshtein/.gitignore create mode 100644 vendor/github.com/agext/levenshtein/.travis.yml create mode 100644 vendor/github.com/agext/levenshtein/DCO create mode 100644 vendor/github.com/agext/levenshtein/LICENSE create mode 100644 vendor/github.com/agext/levenshtein/MAINTAINERS create mode 100644 vendor/github.com/agext/levenshtein/NOTICE create mode 100644 vendor/github.com/agext/levenshtein/README.md create mode 100644 vendor/github.com/agext/levenshtein/levenshtein.go create mode 100644 vendor/github.com/agext/levenshtein/params.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/LICENSE create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/generate.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/make_tables.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/tables.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go create mode 100644 vendor/github.com/armon/go-radix/.gitignore create mode 100644 vendor/github.com/armon/go-radix/.travis.yml create mode 100644 vendor/github.com/armon/go-radix/LICENSE create mode 100644 vendor/github.com/armon/go-radix/README.md create mode 100644 vendor/github.com/armon/go-radix/radix.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go create mode 100644 vendor/github.com/bgentry/speakeasy/.gitignore create mode 100644 vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS create mode 100644 vendor/github.com/bgentry/speakeasy/Readme.md create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy.go create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_unix.go create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_windows.go create mode 100644 vendor/github.com/golang/protobuf/AUTHORS create mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/golang/protobuf/LICENSE create mode 100644 vendor/github.com/golang/protobuf/proto/clone.go create mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/discard.go create mode 100644 vendor/github.com/golang/protobuf/proto/encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/equal.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/golang/protobuf/proto/lib.go create mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go create mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/go.mod create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/handlers.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_tar.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_txz.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_xz.go create mode 100644 vendor/github.com/hashicorp/go-hclog/LICENSE create mode 100644 vendor/github.com/hashicorp/go-hclog/README.md create mode 100644 vendor/github.com/hashicorp/go-hclog/global.go create mode 100644 vendor/github.com/hashicorp/go-hclog/int.go create mode 100644 vendor/github.com/hashicorp/go-hclog/log.go create mode 100644 vendor/github.com/hashicorp/go-hclog/stacktrace.go create mode 100644 vendor/github.com/hashicorp/go-hclog/stdlog.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_broker.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_broker.proto create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_client.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_server.go create mode 100644 vendor/github.com/hashicorp/go-plugin/log_entry.go create mode 100644 vendor/github.com/hashicorp/go-plugin/protocol.go create mode 100644 vendor/github.com/hashicorp/go-safetemp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-safetemp/README.md create mode 100644 vendor/github.com/hashicorp/go-safetemp/safetemp.go create mode 100644 vendor/github.com/hashicorp/go-uuid/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-uuid/go.mod create mode 100644 vendor/github.com/hashicorp/go-version/go.mod create mode 100644 vendor/github.com/hashicorp/hcl2/LICENSE create mode 100644 vendor/github.com/hashicorp/hcl2/gohcl/decode.go create mode 100644 vendor/github.com/hashicorp/hcl2/gohcl/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/gohcl/schema.go create mode 100644 vendor/github.com/hashicorp/hcl2/gohcl/types.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/eval_context.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/expr_call.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/expr_list.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/expr_map.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode2ragel.rb create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode_derived.rl create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/ast.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/didyoumean.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/navigation.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/parser.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/public.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/spec.md create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/structure.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/merged.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/ops.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/pos.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/schema.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/spec.md create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/static_expr.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/structure.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/traversal.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/decode.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/gob.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/public.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/schema.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/spec.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/variables.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclparse/parser.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/schema.go create mode 100644 vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go create mode 100644 vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go create mode 100644 vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go create mode 100644 vendor/github.com/hashicorp/terraform/config/loader_hcl2.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/storage.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/versions.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/experiment/id.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go create mode 100644 vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/closer.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go delete mode 100644 vendor/github.com/hashicorp/terraform/helper/shadow/value.go create mode 100644 vendor/github.com/hashicorp/terraform/httpclient/client.go create mode 100644 vendor/github.com/hashicorp/terraform/httpclient/useragent.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/client.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/errors.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/regsrc/module.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/module.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/module_list.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/module_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/module_versions.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/pagination.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/redirect.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/cache.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/static.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/disco/disco.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/disco/host.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/label_iter.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/svchost.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_local.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/features.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_local.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/schemas.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/shadow.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/shadow_components.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/shadow_context.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/test_failure create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_local.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/error.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/hcl.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/source_range.go create mode 100644 vendor/github.com/hashicorp/terraform/version/version.go create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_appengine.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_linux.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/mitchellh/cli/.travis.yml create mode 100644 vendor/github.com/mitchellh/cli/LICENSE create mode 100644 vendor/github.com/mitchellh/cli/Makefile create mode 100644 vendor/github.com/mitchellh/cli/README.md create mode 100644 vendor/github.com/mitchellh/cli/autocomplete.go create mode 100644 vendor/github.com/mitchellh/cli/cli.go create mode 100644 vendor/github.com/mitchellh/cli/command.go create mode 100644 vendor/github.com/mitchellh/cli/command_mock.go create mode 100644 vendor/github.com/mitchellh/cli/help.go create mode 100644 vendor/github.com/mitchellh/cli/ui.go create mode 100644 vendor/github.com/mitchellh/cli/ui_colored.go create mode 100644 vendor/github.com/mitchellh/cli/ui_concurrent.go create mode 100644 vendor/github.com/mitchellh/cli/ui_mock.go create mode 100644 vendor/github.com/mitchellh/cli/ui_writer.go create mode 100644 vendor/github.com/mitchellh/go-testing-interface/.travis.yml create mode 100644 vendor/github.com/mitchellh/go-testing-interface/LICENSE create mode 100644 vendor/github.com/mitchellh/go-testing-interface/README.md create mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing.go create mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing_go19.go create mode 100644 vendor/github.com/mitchellh/go-wordwrap/LICENSE.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/README.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/wordwrap.go create mode 100644 vendor/github.com/oklog/run/.gitignore create mode 100644 vendor/github.com/oklog/run/.travis.yml create mode 100644 vendor/github.com/oklog/run/LICENSE create mode 100644 vendor/github.com/oklog/run/README.md create mode 100644 vendor/github.com/oklog/run/group.go create mode 100644 vendor/github.com/posener/complete/.gitignore create mode 100644 vendor/github.com/posener/complete/.travis.yml create mode 100644 vendor/github.com/posener/complete/LICENSE.txt create mode 100644 vendor/github.com/posener/complete/args.go create mode 100644 vendor/github.com/posener/complete/cmd/cmd.go create mode 100644 vendor/github.com/posener/complete/cmd/install/bash.go create mode 100644 vendor/github.com/posener/complete/cmd/install/install.go create mode 100644 vendor/github.com/posener/complete/cmd/install/utils.go create mode 100644 vendor/github.com/posener/complete/cmd/install/zsh.go create mode 100644 vendor/github.com/posener/complete/command.go create mode 100644 vendor/github.com/posener/complete/complete.go create mode 100644 vendor/github.com/posener/complete/log.go create mode 100644 vendor/github.com/posener/complete/match/file.go create mode 100644 vendor/github.com/posener/complete/match/match.go create mode 100644 vendor/github.com/posener/complete/match/prefix.go create mode 100644 vendor/github.com/posener/complete/metalinter.json create mode 100644 vendor/github.com/posener/complete/predict.go create mode 100644 vendor/github.com/posener/complete/predict_files.go create mode 100644 vendor/github.com/posener/complete/predict_set.go create mode 100644 vendor/github.com/posener/complete/readme.md create mode 100644 vendor/github.com/posener/complete/test.sh create mode 100644 vendor/github.com/posener/complete/utils.go delete mode 100644 vendor/github.com/satori/go.uuid/.travis.yml delete mode 100644 vendor/github.com/satori/go.uuid/LICENSE delete mode 100644 vendor/github.com/satori/go.uuid/README.md delete mode 100644 vendor/github.com/satori/go.uuid/uuid.go create mode 100644 vendor/github.com/ulikunitz/xz/.gitignore create mode 100644 vendor/github.com/ulikunitz/xz/LICENSE create mode 100644 vendor/github.com/ulikunitz/xz/README.md create mode 100644 vendor/github.com/ulikunitz/xz/TODO.md create mode 100644 vendor/github.com/ulikunitz/xz/bits.go create mode 100644 vendor/github.com/ulikunitz/xz/crc.go create mode 100644 vendor/github.com/ulikunitz/xz/example.go create mode 100644 vendor/github.com/ulikunitz/xz/format.go create mode 100644 vendor/github.com/ulikunitz/xz/fox.xz create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/doc.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/roller.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bintree.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bitops.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/breader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/buffer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bytewriter.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/directcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/distcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/fox.lzma create mode 100644 vendor/github.com/ulikunitz/xz/lzma/hashtable.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/literalcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/operation.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/prob.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/properties.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/rangecodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/state.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/treecodecs.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzmafilter.go create mode 100644 vendor/github.com/ulikunitz/xz/make-docs create mode 100644 vendor/github.com/ulikunitz/xz/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/writer.go create mode 100644 vendor/github.com/zclconf/go-cty/LICENSE create mode 100644 vendor/github.com/zclconf/go-cty/cty/capsule.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/collection.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/public.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/unify.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/element_iterator.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/error.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/argument.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/error.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/function.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gob.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/in.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/out.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/helper.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/marshal.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/simple.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/type_implied.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/value.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/list_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/map_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/null.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/object_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/path.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/primitive_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/gob.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/iterator.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/ops.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/rules.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/set.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set_helper.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set_internals.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/tuple_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/type_conform.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/types_to_register.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/unknown.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/value.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/value_init.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/value_ops.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/walk.go (limited to 'vendor/github.com') diff --git a/vendor/github.com/agext/levenshtein/.gitignore b/vendor/github.com/agext/levenshtein/.gitignore new file mode 100644 index 0000000..404365f --- /dev/null +++ b/vendor/github.com/agext/levenshtein/.gitignore @@ -0,0 +1,2 @@ +README.html +coverage.out diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml new file mode 100644 index 0000000..95be94a --- /dev/null +++ b/vendor/github.com/agext/levenshtein/.travis.yml @@ -0,0 +1,70 @@ +language: go +sudo: false +go: + - 1.8 + - 1.7.5 + - 1.7.4 + - 1.7.3 + - 1.7.2 + - 1.7.1 + - 1.7 + - tip + - 1.6.4 + - 1.6.3 + - 1.6.2 + - 1.6.1 + - 1.6 + - 1.5.4 + - 1.5.3 + - 1.5.2 + - 1.5.1 + - 1.5 + - 1.4.3 + - 1.4.2 + - 1.4.1 + - 1.4 + - 1.3.3 + - 1.3.2 + - 1.3.1 + - 1.3 + - 1.2.2 + - 1.2.1 + - 1.2 + - 1.1.2 + - 1.1.1 + - 1.1 +before_install: + - go get github.com/mattn/goveralls +script: + - $HOME/gopath/bin/goveralls -service=travis-ci +notifications: + email: + on_success: never +matrix: + fast_finish: true + allow_failures: + - go: tip + - go: 1.6.4 + - go: 1.6.3 + - go: 1.6.2 + - go: 1.6.1 + - go: 1.6 + - go: 1.5.4 + - go: 1.5.3 + - go: 1.5.2 + - go: 1.5.1 + - go: 1.5 + - go: 1.4.3 + - go: 1.4.2 + - go: 1.4.1 + - go: 1.4 + - go: 1.3.3 + - go: 1.3.2 + - go: 1.3.1 + - go: 1.3 + - go: 1.2.2 + - go: 1.2.1 + - go: 1.2 + - go: 1.1.2 + - go: 1.1.1 + - go: 1.1 diff --git a/vendor/github.com/agext/levenshtein/DCO b/vendor/github.com/agext/levenshtein/DCO new file mode 100644 index 0000000..716561d --- /dev/null +++ b/vendor/github.com/agext/levenshtein/DCO @@ -0,0 +1,36 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/agext/levenshtein/LICENSE b/vendor/github.com/agext/levenshtein/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/agext/levenshtein/MAINTAINERS b/vendor/github.com/agext/levenshtein/MAINTAINERS new file mode 100644 index 0000000..726c2af --- /dev/null +++ b/vendor/github.com/agext/levenshtein/MAINTAINERS @@ -0,0 +1 @@ +Alex Bucataru (@AlexBucataru) diff --git a/vendor/github.com/agext/levenshtein/NOTICE b/vendor/github.com/agext/levenshtein/NOTICE new file mode 100644 index 0000000..eaffaab --- /dev/null +++ b/vendor/github.com/agext/levenshtein/NOTICE @@ -0,0 +1,5 @@ +Alrux Go EXTensions (AGExt) - package levenshtein +Copyright 2016 ALRUX Inc. + +This product includes software developed at ALRUX Inc. +(http://www.alrux.com/). diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md new file mode 100644 index 0000000..90509c2 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/README.md @@ -0,0 +1,38 @@ +# A Go package for calculating the Levenshtein distance between two strings + +[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein)  +[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein) +[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein) +[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein) + + +This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org). + +## Project Status + +v1.2.1 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. + +This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. + +## Overview + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. + +## Installation + +``` +go get github.com/agext/levenshtein +``` + +## License + +Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go new file mode 100644 index 0000000..df69ce7 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/levenshtein.go @@ -0,0 +1,290 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure. + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. +*/ +package levenshtein + +// Calculate determines the Levenshtein distance between two strings, using +// the given costs for each edit operation. It returns the distance along with +// the lengths of the longest common prefix and suffix. +// +// If maxCost is non-zero, the calculation stops as soon as the distance is determined +// to be greater than maxCost. Therefore, any return value higher than maxCost is a +// lower bound for the actual distance. +func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) { + l1, l2 := len(str1), len(str2) + // trim common prefix, if any, as it doesn't affect the distance + for ; prefixLen < l1 && prefixLen < l2; prefixLen++ { + if str1[prefixLen] != str2[prefixLen] { + break + } + } + str1, str2 = str1[prefixLen:], str2[prefixLen:] + l1 -= prefixLen + l2 -= prefixLen + // trim common suffix, if any, as it doesn't affect the distance + for 0 < l1 && 0 < l2 { + if str1[l1-1] != str2[l2-1] { + str1, str2 = str1[:l1], str2[:l2] + break + } + l1-- + l2-- + suffixLen++ + } + // if the first string is empty, the distance is the length of the second string times the cost of insertion + if l1 == 0 { + dist = l2 * insCost + return + } + // if the second string is empty, the distance is the length of the first string times the cost of deletion + if l2 == 0 { + dist = l1 * delCost + return + } + + // variables used in inner "for" loops + var y, dy, c, l int + + // if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited' + if maxCost > 0 { + if subCost < delCost+insCost { + if maxCost >= l1*subCost+(l2-l1)*insCost { + maxCost = 0 + } + } else { + if maxCost >= l1*delCost+l2*insCost { + maxCost = 0 + } + } + } + + if maxCost > 0 { + // prefer the longer string first, to minimize time; + // a swap also transposes the meanings of insertion and deletion. + if l1 < l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + + // the length differential times cost of deletion is a lower bound for the cost; + // if it is higher than the maxCost, there is no point going into the main calculation. + if dist = (l1 - l2) * delCost; dist > maxCost { + return + } + + d := make([]int, l1+1) + + // offset and length of d in the current row + doff, dlen := 0, 1 + for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ { + d[y] = dy + y++ + dy = y * delCost + } + // fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + + for x := 0; x < l2; x++ { + dy, d[doff] = d[doff], d[doff]+insCost + for d[doff] > maxCost && dlen > 0 { + if str1[doff] != str2[x] { + dy += subCost + } + doff++ + dlen-- + if c = d[doff] + insCost; c < dy { + dy = c + } + dy, d[doff] = d[doff], dy + } + for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + if y < l1 { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy { + y++ + dlen++ + } + } + // fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + if dlen == 0 { + dist = maxCost + 1 + return + } + } + if doff+dlen-1 < l1 { + dist = maxCost + 1 + return + } + dist = d[l1] + } else { + // ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is + // worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space + // http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html + + // prefer the shorter string first, to minimize space; time is O(l1*l2) anyway; + // a swap also transposes the meanings of insertion and deletion. + if l1 > l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + d := make([]int, l1+1) + + for y = 1; y <= l1; y++ { + d[y] = y * delCost + } + for x := 0; x < l2; x++ { + dy, d[0] = d[0], d[0]+insCost + for y = 0; y < l1; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + } + dist = d[l1] + } + + return +} + +// Distance returns the Levenshtein distance between str1 and str2, using the +// default or provided cost values. Pass nil for the third argument to use the +// default cost of 1 for all three operations, with no maximum. +func Distance(str1, str2 string, p *Params) int { + if p == nil { + p = defaultParams + } + dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost) + return dist +} + +// Similarity returns a score in the range of 0..1 for how similar the two strings are. +// A score of 1 means the strings are identical, and 0 means they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Similarity(str1, str2 string, p *Params) float64 { + return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus +} + +// Match returns a similarity score adjusted by the same method as proposed by Winkler for +// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their +// similarity score is already over a threshold. +// +// The score is in the range of 0..1, with 1 meaning the strings are identical, +// and 0 meaning they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations, maximum length of +// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Match(str1, str2 string, p *Params) float64 { + s1, s2 := []rune(str1), []rune(str2) + l1, l2 := len(s1), len(s2) + // two empty strings are identical; shortcut also avoids divByZero issues later on. + if l1 == 0 && l2 == 0 { + return 1 + } + + if p == nil { + p = defaultParams + } + + // a min over 1 can never be satisfied, so the score is 0. + if p.minScore > 1 { + return 0 + } + + insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0 + if l1 > l2 { + l1, l2, insCost, delCost = l2, l1, delCost, insCost + } + + if p.subCost < delCost+insCost { + maxDist = l1*p.subCost + (l2-l1)*insCost + } else { + maxDist = l1*delCost + l2*insCost + } + + // a zero min is always satisfied, so no need to set a max cost. + if p.minScore > 0 { + // if p.minScore is lower than p.bonusThreshold, we can use a simplified formula + // for the max cost, because a sim score below min cannot receive a bonus. + if p.minScore < p.bonusThreshold { + // round down the max - a cost equal to a rounded up max would already be under min. + max = int((1 - p.minScore) * float64(maxDist)) + } else { + // p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim) + // p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist)) + // p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist + // 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist + // (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist + max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale)) + } + } + + dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost) + if max > 0 && dist > max { + return 0 + } + sim := 1 - float64(dist)/float64(maxDist) + + if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 { + if pl > p.bonusPrefix { + pl = p.bonusPrefix + } + sim += float64(pl) * p.bonusScale * (1 - sim) + } + + if sim < p.minScore { + return 0 + } + + return sim +} diff --git a/vendor/github.com/agext/levenshtein/params.go b/vendor/github.com/agext/levenshtein/params.go new file mode 100644 index 0000000..a85727b --- /dev/null +++ b/vendor/github.com/agext/levenshtein/params.go @@ -0,0 +1,152 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package levenshtein + +// Params represents a set of parameter values for the various formulas involved +// in the calculation of the Levenshtein string metrics. +type Params struct { + insCost int + subCost int + delCost int + maxCost int + minScore float64 + bonusPrefix int + bonusScale float64 + bonusThreshold float64 +} + +var ( + defaultParams = NewParams() +) + +// NewParams creates a new set of parameters and initializes it with the default values. +func NewParams() *Params { + return &Params{ + insCost: 1, + subCost: 1, + delCost: 1, + maxCost: 0, + minScore: 0, + bonusPrefix: 4, + bonusScale: .1, + bonusThreshold: .7, + } +} + +// Clone returns a pointer to a copy of the receiver parameter set, or of a new +// default parameter set if the receiver is nil. +func (p *Params) Clone() *Params { + if p == nil { + return NewParams() + } + return &Params{ + insCost: p.insCost, + subCost: p.subCost, + delCost: p.delCost, + maxCost: p.maxCost, + minScore: p.minScore, + bonusPrefix: p.bonusPrefix, + bonusScale: p.bonusScale, + bonusThreshold: p.bonusThreshold, + } +} + +// InsCost overrides the default value of 1 for the cost of insertion. +// The new value must be zero or positive. +func (p *Params) InsCost(v int) *Params { + if v >= 0 { + p.insCost = v + } + return p +} + +// SubCost overrides the default value of 1 for the cost of substitution. +// The new value must be zero or positive. +func (p *Params) SubCost(v int) *Params { + if v >= 0 { + p.subCost = v + } + return p +} + +// DelCost overrides the default value of 1 for the cost of deletion. +// The new value must be zero or positive. +func (p *Params) DelCost(v int) *Params { + if v >= 0 { + p.delCost = v + } + return p +} + +// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost. +// The calculation of Distance() stops when the result is guaranteed to exceed +// this maximum, returning a lower-bound rather than exact value. +// The new value must be zero or positive. +func (p *Params) MaxCost(v int) *Params { + if v >= 0 { + p.maxCost = v + } + return p +} + +// MinScore overrides the default value of 0 for the minimum similarity score. +// Scores below this threshold are returned as 0 by Similarity() and Match(). +// The new value must be zero or positive. Note that a minimum greater than 1 +// can never be satisfied, resulting in a score of 0 for any pair of strings. +func (p *Params) MinScore(v float64) *Params { + if v >= 0 { + p.minScore = v + } + return p +} + +// BonusPrefix overrides the default value for the maximum length of +// common prefix to be considered for bonus by Match(). +// The new value must be zero or positive. +func (p *Params) BonusPrefix(v int) *Params { + if v >= 0 { + p.bonusPrefix = v + } + return p +} + +// BonusScale overrides the default value for the scaling factor used by Match() +// in calculating the bonus. +// The new value must be zero or positive. To guarantee that the similarity score +// remains in the interval 0..1, this scaling factor is not allowed to exceed +// 1 / BonusPrefix. +func (p *Params) BonusScale(v float64) *Params { + if v >= 0 { + p.bonusScale = v + } + + // the bonus cannot exceed (1-sim), or the score may become greater than 1. + if float64(p.bonusPrefix)*p.bonusScale > 1 { + p.bonusScale = 1 / float64(p.bonusPrefix) + } + + return p +} + +// BonusThreshold overrides the default value for the minimum similarity score +// for which Match() can assign a bonus. +// The new value must be zero or positive. Note that a threshold greater than 1 +// effectively makes Match() become the equivalent of Similarity(). +func (p *Params) BonusThreshold(v float64) *Params { + if v >= 0 { + p.bonusThreshold = v + } + return p +} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go index a31cdec..7534473 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -71,8 +71,13 @@ func Host(base *net.IPNet, num int) (net.IP, error) { if numUint64 > maxHostNum { return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) } - - return insertNumIntoIP(ip, num, 32), nil + var bitlength int + if ip.To4() != nil { + bitlength = 32 + } else { + bitlength = 128 + } + return insertNumIntoIP(ip, num, bitlength), nil } // AddressRange returns the first and last addresses in the given CIDR range. @@ -110,3 +115,96 @@ func AddressCount(network *net.IPNet) uint64 { prefixLen, bits := network.Mask.Size() return 1 << (uint64(bits) - uint64(prefixLen)) } + +//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies +//none of the subnets overlap and all subnets are in the supernet +//it returns an error if any of those conditions are not satisfied +func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { + firstLastIP := make([][]net.IP, len(subnets)) + for i, s := range subnets { + first, last := AddressRange(s) + firstLastIP[i] = []net.IP{first, last} + } + for i, s := range subnets { + if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { + return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) + } + for j := i + 1; j < len(subnets); j++ { + first := firstLastIP[j][0] + last := firstLastIP[j][1] + if s.Contains(first) || s.Contains(last) { + return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) + } + } + } + return nil +} + +// PreviousSubnet returns the subnet of the desired mask in the IP space +// just lower than the start of IPNet provided. If the IP space rolls over +// then the second return value is true +func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + startIP := checkIPv4(network.IP) + previousIP := make(net.IP, len(startIP)) + copy(previousIP, startIP) + cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) + previousIP = Dec(previousIP) + previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} + if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { + return previous, true + } + return previous, false +} + +// NextSubnet returns the next available subnet of the desired mask size +// starting for the maximum IP of the offset subnet +// If the IP exceeds the maxium IP then the second return value is true +func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + _, currentLast := AddressRange(network) + mask := net.CIDRMask(prefixLen, 8*len(currentLast)) + currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} + _, last := AddressRange(currentSubnet) + last = Inc(last) + next := &net.IPNet{IP: last.Mask(mask), Mask: mask} + if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { + return next, true + } + return next, false +} + +//Inc increases the IP by one this returns a new []byte for the IP +func Inc(IP net.IP) net.IP { + IP = checkIPv4(IP) + incIP := make([]byte, len(IP)) + copy(incIP, IP) + for j := len(incIP) - 1; j >= 0; j-- { + incIP[j]++ + if incIP[j] > 0 { + break + } + } + return incIP +} + +//Dec decreases the IP by one this returns a new []byte for the IP +func Dec(IP net.IP) net.IP { + IP = checkIPv4(IP) + decIP := make([]byte, len(IP)) + copy(decIP, IP) + decIP = checkIPv4(decIP) + for j := len(decIP) - 1; j >= 0; j-- { + decIP[j]-- + if decIP[j] < 255 { + break + } + } + return decIP +} + +func checkIPv4(ip net.IP) net.IP { + // Go for some reason allocs IPv6len for IPv4 so we have to correct it + if v4 := ip.To4(); v4 != nil { + return v4 + } + return ip +} diff --git a/vendor/github.com/apparentlymart/go-textseg/LICENSE b/vendor/github.com/apparentlymart/go-textseg/LICENSE new file mode 100644 index 0000000..684b03b --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/LICENSE @@ -0,0 +1,95 @@ +Copyright (c) 2017 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------- + +Unicode table generation programs are under a separate copyright and license: + +Copyright (c) 2014 Couchbase, Inc. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file +except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the +License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +either express or implied. See the License for the specific language governing permissions +and limitations under the License. + +--------- + +Grapheme break data is provided as part of the Unicode character database, +copright 2016 Unicode, Inc, which is provided with the following license: + +Unicode Data Files include all data files under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +Unicode Data Files do not include PDF online code charts under the +directory http://www.unicode.org/Public/. + +Software includes any source code published in the Unicode Standard +or under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +NOTICE TO USER: Carefully read the following legal agreement. +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +TERMS AND CONDITIONS OF THIS AGREEMENT. +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE +THE DATA FILES OR SOFTWARE. + +COPYRIGHT AND PERMISSION NOTICE + +Copyright © 1991-2017 Unicode, Inc. All rights reserved. +Distributed under the Terms of Use in http://www.unicode.org/copyright.html. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Unicode data files and any associated documentation +(the "Data Files") or Unicode software and any associated documentation +(the "Software") to deal in the Data Files or Software +without restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, and/or sell copies of +the Data Files or Software, and to permit persons to whom the Data Files +or Software are furnished to do so, provided that either +(a) this copyright and permission notice appear with all copies +of the Data Files or Software, or +(b) this copyright and permission notice appear in associated +Documentation. + +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT OF THIRD PARTY RIGHTS. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THE DATA FILES OR SOFTWARE. + +Except as contained in this notice, the name of a copyright holder +shall not be used in advertising or otherwise to promote the sale, +use or other dealings in these Data Files or Software without prior +written authorization of the copyright holder. diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go new file mode 100644 index 0000000..5752e9e --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go @@ -0,0 +1,30 @@ +package textseg + +import ( + "bufio" + "bytes" +) + +// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of +// all of the recognized tokens in the given buffer. +func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret [][]byte + for scanner.Scan() { + ret = append(ret, scanner.Bytes()) + } + return ret, scanner.Err() +} + +// TokenCount is a utility that uses a bufio.SplitFunc to count the number of +// recognized tokens in the given buffer. +func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret int + for scanner.Scan() { + ret++ + } + return ret, scanner.Err() +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go new file mode 100644 index 0000000..81f3a74 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go @@ -0,0 +1,7 @@ +package textseg + +//go:generate go run make_tables.go -output tables.go +//go:generate go run make_test_tables.go -output tables_test.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl +//go:generate ragel -Z grapheme_clusters.rl +//go:generate gofmt -w grapheme_clusters.go diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go new file mode 100644 index 0000000..012bc69 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go @@ -0,0 +1,5276 @@ + +// line 1 "grapheme_clusters.rl" +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT + +// line 13 "grapheme_clusters.go" +var _graphclust_actions []byte = []byte{ + 0, 1, 0, 1, 4, 1, 9, 1, 10, + 1, 11, 1, 12, 1, 13, 1, 14, + 1, 15, 1, 16, 1, 17, 1, 18, + 1, 19, 1, 20, 1, 21, 2, 1, + 7, 2, 1, 8, 2, 2, 3, 2, + 5, 1, 3, 0, 1, 8, 3, 5, + 0, 1, 3, 5, 1, 6, +} + +var _graphclust_key_offsets []int16 = []int16{ + 0, 0, 1, 3, 5, 7, 10, 15, + 17, 20, 28, 31, 33, 35, 37, 67, + 75, 77, 81, 84, 89, 94, 104, 116, + 122, 127, 137, 140, 147, 151, 159, 169, + 173, 181, 183, 191, 194, 196, 201, 203, + 210, 212, 220, 221, 242, 246, 252, 257, + 259, 263, 267, 269, 273, 275, 278, 282, + 284, 291, 293, 297, 301, 305, 307, 309, + 318, 322, 327, 329, 335, 337, 338, 340, + 341, 343, 345, 347, 349, 364, 368, 370, + 372, 377, 381, 385, 387, 389, 393, 397, + 399, 403, 410, 415, 419, 422, 423, 427, + 434, 439, 440, 441, 443, 452, 454, 477, + 481, 483, 487, 491, 492, 496, 500, 503, + 505, 510, 523, 525, 527, 529, 531, 535, + 539, 541, 543, 545, 549, 553, 557, 559, + 561, 563, 565, 566, 568, 574, 580, 586, + 588, 592, 596, 601, 604, 614, 616, 618, + 621, 623, 625, 627, 629, 632, 637, 639, + 642, 650, 653, 655, 657, 659, 690, 698, + 700, 704, 711, 723, 730, 744, 750, 768, + 779, 785, 797, 800, 809, 814, 824, 830, + 844, 850, 862, 874, 878, 880, 886, 888, + 895, 898, 906, 907, 928, 937, 945, 951, + 953, 957, 961, 966, 972, 974, 977, 990, + 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056, + 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085, + 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122, + 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171, + 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206, + 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259, + 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316, + 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364, + 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402, + 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435, + 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461, + 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480, + 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504, + 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532, + 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585, + 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641, + 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692, + 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722, + 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770, + 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798, + 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830, + 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848, + 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886, + 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916, + 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942, + 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992, + 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026, + 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046, + 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067, + 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102, + 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128, + 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156, + 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224, + 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301, + 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375, + 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408, + 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467, + 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521, + 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579, + 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607, + 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648, + 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690, + 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721, + 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781, + 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840, + 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893, + 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920, + 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956, + 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972, + 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991, + 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019, + 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105, + 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124, + 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196, + 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350, + 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390, + 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432, + 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598, + 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631, + 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668, + 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736, + 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773, + 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797, + 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824, + 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891, + 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946, + 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988, + 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039, + 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065, + 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094, + 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125, + 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155, + 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180, + 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210, + 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241, + 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287, + 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318, + 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344, + 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367, + 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403, + 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424, + 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477, + 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537, + 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611, + 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673, + 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732, + 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764, + 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835, + 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870, + 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901, + 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951, + 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992, + 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037, + 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089, + 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149, + 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187, + 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218, + 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247, + 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261, + 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327, + 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363, + 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439, + 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456, + 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628, + 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656, + 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709, + 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765, + 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816, + 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846, + 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894, + 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922, + 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954, + 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982, + 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007, + 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040, + 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069, + 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113, + 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146, + 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170, + 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191, + 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231, + 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252, + 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274, + 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301, + 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386, + 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456, + 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522, + 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579, + 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616, + 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680, + 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724, + 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754, + 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802, + 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842, + 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890, + 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937, + 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995, + 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040, + 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069, + 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100, + 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115, + 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140, + 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199, + 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226, + 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278, + 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327, + 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380, + 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414, + 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468, + 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493, + 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520, + 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551, + 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581, + 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608, + 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638, + 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688, + 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714, + 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746, + 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770, + 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797, + 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829, + 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847, + 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921, + 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986, + 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037, + 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095, + 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137, + 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209, + 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229, + 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272, + 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321, + 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349, + 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406, + 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462, + 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514, + 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545, + 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582, + 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609, + 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650, + 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726, + 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838, + 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876, + 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911, + 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938, + 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005, + 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060, + 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102, + 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153, + 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179, + 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208, + 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239, + 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269, + 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294, + 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324, + 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355, + 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401, + 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432, + 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458, + 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481, + 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517, + 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538, + 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591, + 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651, + 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725, + 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787, + 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846, + 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878, + 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949, + 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984, + 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015, + 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065, + 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106, + 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151, + 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203, + 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263, + 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301, + 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332, + 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361, + 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375, + 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401, + 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469, + 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538, + 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703, + 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869, + 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032, + 11052, 11072, 11092, 11112, +} + +var _graphclust_trans_keys []byte = []byte{ + 10, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 167, 169, 171, 173, 174, 175, + 176, 177, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 166, + 170, 172, 178, 150, 153, 155, 163, 165, + 167, 169, 173, 153, 155, 148, 161, 163, + 255, 189, 132, 185, 144, 152, 161, 164, + 255, 188, 129, 131, 190, 255, 133, 134, + 137, 138, 142, 150, 152, 161, 164, 255, + 131, 134, 137, 138, 142, 144, 146, 175, + 178, 180, 182, 255, 134, 138, 142, 161, + 164, 255, 188, 129, 131, 190, 191, 128, + 132, 135, 136, 139, 141, 150, 151, 162, + 163, 130, 190, 191, 151, 128, 130, 134, + 136, 138, 141, 128, 131, 190, 255, 133, + 137, 142, 148, 151, 161, 164, 255, 128, + 132, 134, 136, 138, 141, 149, 150, 162, + 163, 129, 131, 190, 255, 133, 137, 142, + 150, 152, 161, 164, 255, 130, 131, 138, + 150, 143, 148, 152, 159, 178, 179, 177, + 179, 186, 135, 142, 177, 179, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 177, 191, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 173, 183, 185, 190, 150, 153, + 158, 160, 177, 180, 130, 141, 157, 132, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 180, 255, 148, 156, 158, + 255, 139, 141, 169, 133, 134, 160, 171, + 176, 187, 151, 155, 160, 162, 191, 149, + 158, 165, 188, 176, 190, 128, 132, 180, + 255, 133, 170, 180, 255, 128, 130, 161, + 173, 166, 179, 164, 183, 173, 144, 146, + 148, 168, 178, 180, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 128, 131, 157, 179, 181, 183, 144, + 176, 164, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 163, + 167, 128, 129, 180, 255, 134, 159, 178, + 255, 166, 173, 135, 147, 128, 131, 179, + 255, 129, 164, 166, 255, 169, 182, 131, + 188, 140, 141, 176, 178, 180, 183, 184, + 190, 191, 129, 171, 175, 181, 182, 163, + 170, 172, 173, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 128, 130, 184, + 255, 135, 190, 131, 175, 187, 255, 128, + 130, 167, 180, 179, 128, 130, 179, 255, + 129, 137, 141, 255, 190, 172, 183, 159, + 170, 188, 128, 131, 190, 191, 151, 128, + 132, 135, 136, 139, 141, 162, 163, 166, + 172, 176, 180, 181, 191, 128, 134, 176, + 255, 132, 255, 175, 181, 184, 255, 129, + 155, 158, 255, 129, 255, 171, 183, 157, + 171, 175, 182, 184, 191, 146, 167, 169, + 182, 171, 172, 189, 190, 176, 180, 176, + 182, 145, 190, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 128, 182, 187, 255, + 173, 180, 182, 255, 132, 155, 159, 161, + 175, 128, 163, 165, 128, 134, 136, 152, + 155, 161, 163, 164, 166, 170, 144, 150, + 132, 138, 145, 146, 151, 166, 169, 0, + 127, 176, 255, 131, 137, 191, 145, 189, + 135, 129, 130, 132, 133, 144, 154, 176, + 139, 159, 150, 156, 159, 164, 167, 168, + 170, 173, 145, 176, 255, 139, 255, 166, + 176, 171, 179, 160, 161, 163, 164, 165, + 166, 167, 169, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 168, 170, 150, 153, 155, 163, 165, 167, + 169, 173, 153, 155, 148, 161, 163, 255, + 131, 187, 189, 132, 185, 190, 255, 141, + 144, 129, 136, 145, 151, 152, 161, 162, + 163, 164, 255, 129, 188, 190, 130, 131, + 191, 255, 141, 151, 129, 132, 133, 134, + 137, 138, 142, 161, 162, 163, 164, 255, + 131, 188, 129, 130, 190, 255, 145, 181, + 129, 130, 131, 134, 135, 136, 137, 138, + 139, 141, 142, 175, 176, 177, 178, 255, + 134, 138, 141, 129, 136, 142, 161, 162, + 163, 164, 255, 129, 188, 130, 131, 190, + 191, 128, 141, 129, 132, 135, 136, 139, + 140, 150, 151, 162, 163, 130, 190, 191, + 128, 141, 151, 129, 130, 134, 136, 138, + 140, 128, 129, 131, 190, 255, 133, 137, + 129, 132, 142, 148, 151, 161, 164, 255, + 129, 188, 190, 191, 130, 131, 130, 134, + 128, 132, 135, 136, 138, 139, 140, 141, + 149, 150, 162, 163, 129, 190, 130, 131, + 191, 255, 133, 137, 141, 151, 129, 132, + 142, 161, 162, 163, 164, 255, 138, 143, + 150, 159, 144, 145, 146, 148, 152, 158, + 178, 179, 177, 179, 180, 186, 135, 142, + 177, 179, 180, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 191, + 177, 190, 128, 132, 134, 135, 141, 151, + 153, 188, 134, 128, 129, 130, 141, 156, + 157, 158, 159, 160, 162, 164, 168, 169, + 170, 172, 173, 174, 175, 176, 179, 183, + 177, 173, 183, 185, 186, 187, 188, 189, + 190, 150, 151, 152, 153, 158, 160, 177, + 180, 130, 132, 141, 157, 133, 134, 157, + 159, 146, 148, 178, 180, 146, 147, 178, + 179, 182, 180, 189, 190, 255, 134, 157, + 137, 147, 148, 255, 139, 141, 169, 133, + 134, 178, 160, 162, 163, 166, 167, 168, + 169, 171, 176, 184, 185, 187, 155, 151, + 152, 153, 154, 150, 160, 162, 191, 149, + 151, 152, 158, 165, 172, 173, 178, 179, + 188, 176, 190, 132, 181, 187, 128, 131, + 180, 188, 189, 255, 130, 133, 170, 171, + 179, 180, 255, 130, 161, 170, 128, 129, + 162, 165, 166, 167, 168, 173, 167, 173, + 166, 169, 170, 174, 175, 177, 178, 179, + 164, 171, 172, 179, 180, 181, 182, 183, + 161, 173, 180, 144, 146, 148, 168, 178, + 179, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 144, 176, + 175, 177, 191, 160, 191, 128, 130, 170, + 175, 153, 154, 153, 154, 155, 160, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 175, 175, 178, 180, 189, 158, 159, + 176, 177, 130, 134, 139, 167, 163, 164, + 165, 166, 132, 133, 134, 159, 160, 177, + 178, 255, 166, 173, 135, 145, 146, 147, + 131, 179, 188, 128, 130, 180, 181, 182, + 185, 186, 255, 165, 129, 255, 169, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 131, 140, 141, 188, 176, 178, 180, 183, + 184, 190, 191, 129, 171, 181, 182, 172, + 173, 174, 175, 165, 168, 172, 173, 163, + 170, 172, 184, 190, 158, 128, 143, 160, + 175, 144, 145, 150, 155, 157, 158, 159, + 135, 139, 141, 168, 171, 189, 160, 182, + 186, 191, 129, 131, 133, 134, 140, 143, + 184, 186, 165, 166, 128, 129, 130, 132, + 133, 134, 135, 136, 139, 140, 141, 144, + 145, 146, 147, 150, 151, 152, 153, 154, + 156, 176, 178, 129, 128, 130, 184, 255, + 135, 190, 130, 131, 175, 176, 178, 183, + 184, 187, 255, 172, 128, 130, 167, 180, + 179, 130, 128, 129, 179, 181, 182, 190, + 191, 255, 129, 137, 138, 140, 141, 255, + 180, 190, 172, 174, 175, 177, 178, 181, + 182, 183, 159, 160, 162, 163, 170, 188, + 190, 191, 128, 129, 130, 131, 128, 151, + 129, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 183, 184, 191, + 133, 128, 129, 130, 134, 176, 185, 189, + 177, 178, 179, 186, 187, 190, 191, 255, + 129, 132, 255, 175, 190, 176, 177, 178, + 181, 184, 187, 188, 255, 129, 155, 158, + 255, 189, 176, 178, 179, 186, 187, 190, + 191, 255, 129, 255, 172, 182, 171, 173, + 174, 175, 176, 183, 166, 157, 159, 160, + 161, 162, 171, 175, 190, 176, 182, 184, + 191, 169, 177, 180, 146, 167, 170, 182, + 171, 172, 189, 190, 176, 180, 176, 182, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 166, 173, 165, 169, 174, + 178, 187, 255, 131, 132, 140, 169, 174, + 255, 130, 132, 128, 182, 187, 255, 173, + 180, 182, 255, 132, 155, 159, 161, 175, + 128, 163, 165, 128, 134, 136, 152, 155, + 161, 163, 164, 166, 170, 144, 150, 132, + 138, 143, 187, 191, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 139, + 168, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 144, 145, 150, 155, + 157, 158, 128, 191, 173, 128, 159, 160, + 191, 156, 128, 133, 134, 191, 0, 127, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 128, + 191, 160, 172, 174, 191, 128, 133, 134, + 155, 157, 191, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 128, 255, 128, 129, 130, 132, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 160, 255, 128, 129, + 130, 133, 134, 135, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 160, 255, + 168, 255, 128, 129, 130, 134, 135, 141, + 156, 157, 158, 159, 160, 162, 164, 168, + 169, 170, 172, 173, 174, 175, 176, 179, + 183, 168, 255, 192, 255, 159, 139, 187, + 158, 159, 176, 255, 135, 138, 139, 187, + 188, 255, 168, 255, 153, 154, 155, 160, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 175, 177, 178, 179, 180, 181, + 182, 184, 185, 186, 187, 188, 189, 191, + 176, 190, 192, 255, 135, 147, 160, 188, + 128, 156, 184, 129, 255, 128, 129, 130, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 158, 159, 135, 255, + 148, 176, 140, 168, 132, 160, 188, 152, + 180, 144, 172, 136, 164, 192, 255, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 128, 156, 160, 255, 136, 164, + 175, 176, 255, 128, 141, 143, 191, 128, + 129, 152, 155, 156, 130, 191, 140, 141, + 128, 138, 144, 167, 175, 191, 128, 159, + 176, 191, 157, 128, 191, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 128, 156, 160, 255, 136, 164, 175, + 176, 255, 135, 138, 139, 187, 188, 191, + 192, 255, 187, 191, 128, 190, 128, 190, + 188, 128, 175, 190, 191, 145, 155, 157, + 159, 128, 191, 130, 135, 128, 191, 189, + 128, 191, 128, 129, 130, 131, 132, 191, + 178, 128, 191, 128, 159, 164, 191, 133, + 128, 191, 128, 178, 187, 191, 135, 142, + 143, 145, 146, 149, 150, 153, 154, 155, + 164, 128, 191, 128, 165, 166, 191, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 129, 135, 132, + 134, 128, 175, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 0, 127, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 166, 167, 169, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 168, 170, 150, 153, 155, + 163, 165, 167, 169, 173, 153, 155, 148, + 161, 163, 255, 131, 187, 189, 132, 185, + 190, 255, 141, 144, 129, 136, 145, 151, + 152, 161, 162, 163, 164, 255, 129, 188, + 190, 130, 131, 191, 255, 141, 151, 129, + 132, 133, 134, 137, 138, 142, 161, 162, + 163, 164, 255, 131, 188, 129, 130, 190, + 255, 145, 181, 129, 130, 131, 134, 135, + 136, 137, 138, 139, 141, 142, 175, 176, + 177, 178, 255, 134, 138, 141, 129, 136, + 142, 161, 162, 163, 164, 255, 129, 188, + 130, 131, 190, 191, 128, 141, 129, 132, + 135, 136, 139, 140, 150, 151, 162, 163, + 130, 190, 191, 128, 141, 151, 129, 130, + 134, 136, 138, 140, 128, 129, 131, 190, + 255, 133, 137, 129, 132, 142, 148, 151, + 161, 164, 255, 129, 188, 190, 191, 130, + 131, 130, 134, 128, 132, 135, 136, 138, + 139, 140, 141, 149, 150, 162, 163, 129, + 190, 130, 131, 191, 255, 133, 137, 141, + 151, 129, 132, 142, 161, 162, 163, 164, + 255, 138, 143, 150, 159, 144, 145, 146, + 148, 152, 158, 178, 179, 177, 179, 180, + 186, 135, 142, 177, 179, 180, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 191, 177, 190, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 177, 173, 183, 185, 186, + 187, 188, 189, 190, 150, 151, 152, 153, + 158, 160, 177, 180, 130, 132, 141, 157, + 133, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 182, 180, 189, 190, + 255, 134, 157, 137, 147, 148, 255, 139, + 141, 169, 133, 134, 178, 160, 162, 163, + 166, 167, 168, 169, 171, 176, 184, 185, + 187, 155, 151, 152, 153, 154, 150, 160, + 162, 191, 149, 151, 152, 158, 165, 172, + 173, 178, 179, 188, 176, 190, 132, 181, + 187, 128, 131, 180, 188, 189, 255, 130, + 133, 170, 171, 179, 180, 255, 130, 161, + 170, 128, 129, 162, 165, 166, 167, 168, + 173, 167, 173, 166, 169, 170, 174, 175, + 177, 178, 179, 164, 171, 172, 179, 180, + 181, 182, 183, 161, 173, 180, 144, 146, + 148, 168, 178, 179, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 144, 176, 175, 177, 191, 160, 191, + 128, 130, 170, 175, 153, 154, 153, 154, + 155, 160, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 175, 175, 178, 180, + 189, 158, 159, 176, 177, 130, 134, 139, + 167, 163, 164, 165, 166, 132, 133, 134, + 159, 160, 177, 178, 255, 166, 173, 135, + 145, 146, 147, 131, 179, 188, 128, 130, + 180, 181, 182, 185, 186, 255, 165, 129, + 255, 169, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 131, 140, 141, 188, 176, + 178, 180, 183, 184, 190, 191, 129, 171, + 181, 182, 172, 173, 174, 175, 165, 168, + 172, 173, 163, 170, 172, 184, 190, 158, + 128, 143, 160, 175, 144, 145, 150, 155, + 157, 158, 159, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 129, 128, + 130, 184, 255, 135, 190, 130, 131, 175, + 176, 178, 183, 184, 187, 255, 172, 128, + 130, 167, 180, 179, 130, 128, 129, 179, + 181, 182, 190, 191, 255, 129, 137, 138, + 140, 141, 255, 180, 190, 172, 174, 175, + 177, 178, 181, 182, 183, 159, 160, 162, + 163, 170, 188, 190, 191, 128, 129, 130, + 131, 128, 151, 129, 132, 135, 136, 139, + 141, 162, 163, 166, 172, 176, 180, 181, + 183, 184, 191, 133, 128, 129, 130, 134, + 176, 185, 189, 177, 178, 179, 186, 187, + 190, 191, 255, 129, 132, 255, 175, 190, + 176, 177, 178, 181, 184, 187, 188, 255, + 129, 155, 158, 255, 189, 176, 178, 179, + 186, 187, 190, 191, 255, 129, 255, 172, + 182, 171, 173, 174, 175, 176, 183, 166, + 157, 159, 160, 161, 162, 171, 175, 190, + 176, 182, 184, 191, 169, 177, 180, 146, + 167, 170, 182, 171, 172, 189, 190, 176, + 180, 176, 182, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 166, 173, + 165, 169, 174, 178, 187, 255, 131, 132, + 140, 169, 174, 255, 130, 132, 128, 182, + 187, 255, 173, 180, 182, 255, 132, 155, + 159, 161, 175, 128, 163, 165, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 144, 150, 132, 138, 143, 187, 191, 160, + 128, 129, 132, 135, 133, 134, 160, 255, + 192, 255, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 128, 129, 130, + 132, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 160, 255, 128, + 129, 130, 133, 134, 135, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 160, + 255, 168, 255, 128, 129, 130, 134, 135, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 168, 255, 192, 255, 159, 139, + 187, 158, 159, 176, 255, 135, 138, 139, + 187, 188, 255, 168, 255, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 177, 178, 179, 180, + 181, 182, 184, 185, 186, 187, 188, 189, + 191, 176, 190, 192, 255, 135, 147, 160, + 188, 128, 156, 184, 129, 255, 128, 129, + 130, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 158, 159, 135, + 255, 148, 176, 140, 168, 132, 160, 188, + 152, 180, 144, 172, 136, 164, 192, 255, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 128, 156, 160, 255, 136, + 164, 175, 176, 255, 142, 128, 191, 128, + 129, 152, 155, 156, 130, 191, 139, 141, + 128, 140, 142, 143, 144, 167, 168, 174, + 175, 191, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 167, 169, 171, 173, 174, + 175, 176, 177, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 166, 170, 172, 178, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 189, 132, 185, 144, 152, 161, + 164, 255, 188, 129, 131, 190, 255, 133, + 134, 137, 138, 142, 150, 152, 161, 164, + 255, 131, 134, 137, 138, 142, 144, 146, + 175, 178, 180, 182, 255, 134, 138, 142, + 161, 164, 255, 188, 129, 131, 190, 191, + 128, 132, 135, 136, 139, 141, 150, 151, + 162, 163, 130, 190, 191, 151, 128, 130, + 134, 136, 138, 141, 128, 131, 190, 255, + 133, 137, 142, 148, 151, 161, 164, 255, + 128, 132, 134, 136, 138, 141, 149, 150, + 162, 163, 129, 131, 190, 255, 133, 137, + 142, 150, 152, 161, 164, 255, 130, 131, + 138, 150, 143, 148, 152, 159, 178, 179, + 177, 179, 186, 135, 142, 177, 179, 185, + 187, 188, 136, 141, 181, 183, 185, 152, + 153, 190, 191, 177, 191, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 173, 183, 185, 190, 150, + 153, 158, 160, 177, 180, 130, 141, 157, + 132, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 180, 255, 148, 156, + 158, 255, 139, 141, 169, 133, 134, 160, + 171, 176, 187, 151, 155, 160, 162, 191, + 149, 158, 165, 188, 176, 190, 128, 132, + 180, 255, 133, 170, 180, 255, 128, 130, + 161, 173, 166, 179, 164, 183, 173, 144, + 146, 148, 168, 178, 180, 184, 185, 128, + 181, 187, 191, 128, 131, 179, 181, 183, + 140, 141, 144, 176, 175, 177, 191, 160, + 191, 128, 130, 170, 175, 153, 154, 153, + 154, 155, 160, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 175, 175, 178, + 180, 189, 158, 159, 176, 177, 130, 134, + 139, 163, 167, 128, 129, 180, 255, 134, + 159, 178, 255, 166, 173, 135, 147, 128, + 131, 179, 255, 129, 164, 166, 255, 169, + 182, 131, 188, 140, 141, 176, 178, 180, + 183, 184, 190, 191, 129, 171, 175, 181, + 182, 163, 170, 172, 173, 172, 184, 190, + 158, 128, 143, 160, 175, 144, 145, 150, + 155, 157, 158, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 128, 130, + 184, 255, 135, 190, 131, 175, 187, 255, + 128, 130, 167, 180, 179, 128, 130, 179, + 255, 129, 137, 141, 255, 190, 172, 183, + 159, 170, 188, 128, 131, 190, 191, 151, + 128, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 191, 128, 134, + 176, 255, 132, 255, 175, 181, 184, 255, + 129, 155, 158, 255, 129, 255, 171, 183, + 157, 171, 175, 182, 184, 191, 146, 167, + 169, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 145, 190, 143, 146, 178, 157, + 158, 133, 134, 137, 168, 169, 170, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 160, 128, 129, 132, 135, + 133, 134, 160, 255, 192, 255, 128, 131, + 157, 179, 181, 183, 164, 144, 145, 150, + 155, 157, 158, 159, 145, 146, 151, 166, + 169, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 166, 167, 169, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 168, 170, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 131, 187, 189, 132, 185, 190, + 255, 141, 144, 129, 136, 145, 151, 152, + 161, 162, 163, 164, 255, 129, 188, 190, + 130, 131, 191, 255, 141, 151, 129, 132, + 133, 134, 137, 138, 142, 161, 162, 163, + 164, 255, 131, 188, 129, 130, 190, 255, + 145, 181, 129, 130, 131, 134, 135, 136, + 137, 138, 139, 141, 142, 175, 176, 177, + 178, 255, 134, 138, 141, 129, 136, 142, + 161, 162, 163, 164, 255, 129, 188, 130, + 131, 190, 191, 128, 141, 129, 132, 135, + 136, 139, 140, 150, 151, 162, 163, 130, + 190, 191, 128, 141, 151, 129, 130, 134, + 136, 138, 140, 128, 129, 131, 190, 255, + 133, 137, 129, 132, 142, 148, 151, 161, + 164, 255, 129, 188, 190, 191, 130, 131, + 130, 134, 128, 132, 135, 136, 138, 139, + 140, 141, 149, 150, 162, 163, 129, 190, + 130, 131, 191, 255, 133, 137, 141, 151, + 129, 132, 142, 161, 162, 163, 164, 255, + 138, 143, 150, 159, 144, 145, 146, 148, + 152, 158, 178, 179, 177, 179, 180, 186, + 135, 142, 177, 179, 180, 185, 187, 188, + 136, 141, 181, 183, 185, 152, 153, 190, + 191, 191, 177, 190, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 177, 173, 183, 185, 186, 187, + 188, 189, 190, 150, 151, 152, 153, 158, + 160, 177, 180, 130, 132, 141, 157, 133, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 182, 180, 189, 190, 255, + 134, 157, 137, 147, 148, 255, 139, 141, + 169, 133, 134, 178, 160, 162, 163, 166, + 167, 168, 169, 171, 176, 184, 185, 187, + 155, 151, 152, 153, 154, 150, 160, 162, + 191, 149, 151, 152, 158, 165, 172, 173, + 178, 179, 188, 176, 190, 132, 181, 187, + 128, 131, 180, 188, 189, 255, 130, 133, + 170, 171, 179, 180, 255, 130, 161, 170, + 128, 129, 162, 165, 166, 167, 168, 173, + 167, 173, 166, 169, 170, 174, 175, 177, + 178, 179, 164, 171, 172, 179, 180, 181, + 182, 183, 161, 173, 180, 144, 146, 148, + 168, 178, 179, 184, 185, 128, 181, 187, + 191, 128, 131, 179, 181, 183, 140, 141, + 144, 176, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 167, + 163, 164, 165, 166, 132, 133, 134, 159, + 160, 177, 178, 255, 166, 173, 135, 145, + 146, 147, 131, 179, 188, 128, 130, 180, + 181, 182, 185, 186, 255, 165, 129, 255, + 169, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 131, 140, 141, 188, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 181, + 182, 172, 173, 174, 175, 165, 168, 172, + 173, 163, 170, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 129, 128, 130, + 184, 255, 135, 190, 130, 131, 175, 176, + 178, 183, 184, 187, 255, 172, 128, 130, + 167, 180, 179, 130, 128, 129, 179, 181, + 182, 190, 191, 255, 129, 137, 138, 140, + 141, 255, 180, 190, 172, 174, 175, 177, + 178, 181, 182, 183, 159, 160, 162, 163, + 170, 188, 190, 191, 128, 129, 130, 131, + 128, 151, 129, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 183, + 184, 191, 133, 128, 129, 130, 134, 176, + 185, 189, 177, 178, 179, 186, 187, 190, + 191, 255, 129, 132, 255, 175, 190, 176, + 177, 178, 181, 184, 187, 188, 255, 129, + 155, 158, 255, 189, 176, 178, 179, 186, + 187, 190, 191, 255, 129, 255, 172, 182, + 171, 173, 174, 175, 176, 183, 166, 157, + 159, 160, 161, 162, 171, 175, 190, 176, + 182, 184, 191, 169, 177, 180, 146, 167, + 170, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 143, 146, 178, 157, 158, 133, + 134, 137, 168, 169, 170, 166, 173, 165, + 169, 174, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 143, 187, 191, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 139, 168, 128, 159, 160, 175, 176, + 191, 157, 128, 191, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 141, 144, 129, + 136, 145, 151, 152, 161, 162, 163, 164, + 255, 129, 188, 190, 130, 131, 191, 255, + 141, 151, 129, 132, 133, 134, 137, 138, + 142, 161, 162, 163, 164, 255, 131, 188, + 129, 130, 190, 255, 145, 181, 129, 130, + 131, 134, 135, 136, 137, 138, 139, 141, + 142, 175, 176, 177, 178, 255, 134, 138, + 141, 129, 136, 142, 161, 162, 163, 164, + 255, 129, 188, 130, 131, 190, 191, 128, + 141, 129, 132, 135, 136, 139, 140, 150, + 151, 162, 163, 130, 190, 191, 128, 141, + 151, 129, 130, 134, 136, 138, 140, 128, + 129, 131, 190, 255, 133, 137, 129, 132, + 142, 148, 151, 161, 164, 255, 129, 188, + 190, 191, 130, 131, 130, 134, 128, 132, + 135, 136, 138, 139, 140, 141, 149, 150, + 162, 163, 129, 190, 130, 131, 191, 255, + 133, 137, 141, 151, 129, 132, 142, 161, + 162, 163, 164, 255, 138, 143, 150, 159, + 144, 145, 146, 148, 152, 158, 178, 179, + 177, 179, 180, 186, 135, 142, 177, 179, + 180, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 191, 177, 190, + 128, 132, 134, 135, 141, 151, 153, 188, + 134, 128, 129, 130, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 177, 173, + 183, 185, 186, 187, 188, 189, 190, 150, + 151, 152, 153, 158, 160, 177, 180, 130, + 132, 141, 157, 133, 134, 157, 159, 146, + 148, 178, 180, 146, 147, 178, 179, 182, + 180, 189, 190, 255, 134, 157, 137, 147, + 148, 255, 139, 141, 169, 133, 134, 178, + 160, 162, 163, 166, 167, 168, 169, 171, + 176, 184, 185, 187, 155, 151, 152, 153, + 154, 150, 160, 162, 191, 149, 151, 152, + 158, 165, 172, 173, 178, 179, 188, 176, + 190, 132, 181, 187, 128, 131, 180, 188, + 189, 255, 130, 133, 170, 171, 179, 180, + 255, 130, 161, 170, 128, 129, 162, 165, + 166, 167, 168, 173, 167, 173, 166, 169, + 170, 174, 175, 177, 178, 179, 164, 171, + 172, 179, 180, 181, 182, 183, 161, 173, + 180, 144, 146, 148, 168, 178, 179, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 144, 176, 175, 177, + 191, 160, 191, 128, 130, 170, 175, 153, + 154, 153, 154, 155, 160, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 175, + 175, 178, 180, 189, 158, 159, 176, 177, + 130, 134, 139, 167, 163, 164, 165, 166, + 132, 133, 134, 159, 160, 177, 178, 255, + 166, 173, 135, 145, 146, 147, 131, 179, + 188, 128, 130, 180, 181, 182, 185, 186, + 255, 165, 129, 255, 169, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 131, 140, + 141, 188, 176, 178, 180, 183, 184, 190, + 191, 129, 171, 181, 182, 172, 173, 174, + 175, 165, 168, 172, 173, 163, 170, 172, + 184, 190, 158, 128, 143, 160, 175, 144, + 145, 150, 155, 157, 158, 159, 135, 139, + 141, 168, 171, 189, 160, 182, 186, 191, + 129, 131, 133, 134, 140, 143, 184, 186, + 165, 166, 128, 129, 130, 132, 133, 134, + 135, 136, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 156, 176, + 178, 129, 128, 130, 184, 255, 135, 190, + 130, 131, 175, 176, 178, 183, 184, 187, + 255, 172, 128, 130, 167, 180, 179, 130, + 128, 129, 179, 181, 182, 190, 191, 255, + 129, 137, 138, 140, 141, 255, 180, 190, + 172, 174, 175, 177, 178, 181, 182, 183, + 159, 160, 162, 163, 170, 188, 190, 191, + 128, 129, 130, 131, 128, 151, 129, 132, + 135, 136, 139, 141, 162, 163, 166, 172, + 176, 180, 181, 183, 184, 191, 133, 128, + 129, 130, 134, 176, 185, 189, 177, 178, + 179, 186, 187, 190, 191, 255, 129, 132, + 255, 175, 190, 176, 177, 178, 181, 184, + 187, 188, 255, 129, 155, 158, 255, 189, + 176, 178, 179, 186, 187, 190, 191, 255, + 129, 255, 172, 182, 171, 173, 174, 175, + 176, 183, 166, 157, 159, 160, 161, 162, + 171, 175, 190, 176, 182, 184, 191, 169, + 177, 180, 146, 167, 170, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 166, 173, 165, 169, 174, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 143, + 187, 191, 160, 128, 129, 132, 135, 133, + 134, 160, 255, 192, 255, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 191, 128, 156, 161, 190, 192, + 255, 136, 164, 175, 176, 255, 135, 138, + 139, 187, 188, 191, 192, 255, 0, 127, + 192, 255, 187, 191, 128, 190, 191, 128, + 190, 188, 128, 175, 176, 189, 190, 191, + 145, 155, 157, 159, 128, 191, 130, 135, + 128, 191, 189, 128, 191, 128, 129, 130, + 131, 132, 191, 178, 128, 191, 128, 159, + 160, 163, 164, 191, 133, 128, 191, 128, + 178, 179, 186, 187, 191, 135, 142, 143, + 145, 146, 149, 150, 153, 154, 155, 164, + 128, 191, 128, 165, 166, 191, 128, 255, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 130, 131, 135, + 191, 129, 134, 136, 190, 128, 159, 160, + 191, 128, 175, 176, 255, 10, 13, 127, + 194, 216, 219, 220, 224, 225, 226, 234, + 235, 236, 237, 239, 240, 243, 0, 31, + 128, 191, 192, 223, 227, 238, 241, 247, + 248, 255, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 194, 216, + 219, 220, 224, 225, 226, 234, 235, 236, + 237, 239, 240, 243, 32, 126, 192, 223, + 227, 238, 241, 247, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 235, 236, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 237, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 235, 236, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 237, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, +} + +var _graphclust_single_lengths []byte = []byte{ + 0, 1, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 2, 3, 2, 2, 2, 3, + 2, 2, 3, 3, 1, 2, 4, 2, + 2, 4, 4, 2, 0, 2, 0, 3, + 1, 0, 1, 21, 1, 0, 4, 0, + 0, 0, 1, 2, 0, 1, 1, 1, + 4, 0, 3, 1, 3, 2, 0, 3, + 0, 5, 2, 0, 0, 1, 0, 2, + 0, 0, 15, 0, 0, 0, 4, 0, + 0, 0, 3, 1, 0, 4, 1, 4, + 4, 3, 1, 0, 7, 5, 1, 1, + 0, 1, 0, 23, 1, 0, 1, 1, + 1, 1, 0, 2, 1, 3, 2, 0, + 1, 3, 1, 2, 0, 1, 0, 2, + 1, 2, 3, 4, 0, 0, 0, 1, + 0, 6, 2, 0, 0, 0, 0, 1, + 3, 0, 0, 0, 1, 0, 1, 4, + 0, 0, 0, 1, 1, 1, 4, 0, + 0, 0, 6, 0, 1, 1, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 6, + 0, 1, 0, 1, 0, 2, 0, 0, + 15, 0, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 0, 2, 1, 1, 0, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 0, 0, 0, 0, 1, + 0, 0, 1, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 4, 0, 0, 0, 0, 1, 0, + 6, 0, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 3, 0, 0, 0, 0, + 1, 1, 0, 1, 0, 1, 0, 0, + 0, 29, 0, 0, 0, 3, 2, 3, + 2, 2, 2, 3, 2, 2, 3, 3, + 1, 2, 4, 2, 2, 4, 4, 2, + 0, 2, 0, 3, 1, 0, 1, 21, + 1, 0, 4, 0, 0, 0, 1, 2, + 0, 1, 1, 1, 4, 0, 3, 1, + 3, 2, 0, 3, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 4, 0, 0, 0, 3, 1, + 0, 4, 1, 4, 4, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 1, 0, 1, 1, 1, 1, 0, 2, + 1, 3, 2, 0, 1, 3, 1, 2, + 0, 1, 0, 2, 1, 2, 3, 4, + 0, 0, 0, 1, 0, 6, 2, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 1, 0, 1, 4, 0, 0, 0, 1, + 1, 1, 4, 0, 0, 0, 6, 0, + 0, 0, 1, 1, 2, 1, 1, 5, + 0, 24, 0, 24, 0, 0, 23, 0, + 0, 1, 0, 2, 0, 0, 0, 28, + 0, 3, 23, 2, 0, 2, 2, 3, + 2, 2, 2, 0, 54, 54, 27, 1, + 0, 5, 2, 0, 1, 1, 0, 0, + 14, 0, 3, 2, 2, 3, 2, 2, + 2, 54, 54, 27, 1, 0, 2, 0, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 7, 1, 0, 1, 0, + 2, 3, 2, 1, 0, 1, 1, 3, + 0, 1, 3, 0, 1, 1, 2, 1, + 1, 5, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 24, 0, 24, 0, + 0, 23, 0, 0, 1, 0, 2, 0, + 0, 0, 28, 0, 3, 23, 2, 0, + 2, 2, 3, 2, 2, 2, 0, 54, + 54, 27, 1, 1, 5, 2, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0, 2, 1, 1, 0, 3, 1, + 0, 6, 5, 1, 1, 0, 1, 0, + 23, 0, 0, 0, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 4, + 0, 0, 0, 0, 1, 0, 6, 0, + 0, 0, 0, 0, 1, 3, 0, 0, + 0, 1, 4, 0, 0, 0, 6, 1, + 7, 3, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 29, + 0, 0, 0, 3, 2, 3, 2, 2, + 2, 3, 2, 2, 3, 3, 1, 2, + 4, 2, 2, 4, 4, 2, 0, 2, + 0, 3, 1, 0, 1, 21, 1, 0, + 4, 0, 0, 0, 1, 2, 0, 1, + 1, 1, 4, 0, 3, 1, 3, 2, + 0, 3, 0, 5, 2, 0, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 4, 0, 0, 0, 3, 1, 0, 4, + 1, 4, 4, 3, 1, 0, 7, 5, + 1, 1, 0, 1, 0, 23, 1, 0, + 1, 1, 1, 1, 0, 2, 1, 3, + 2, 0, 1, 3, 1, 2, 0, 1, + 0, 2, 1, 2, 3, 4, 0, 0, + 0, 1, 0, 6, 2, 0, 0, 0, + 0, 1, 3, 0, 0, 0, 1, 0, + 1, 4, 0, 0, 0, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 1, 1, 1, 4, 0, 0, 0, + 6, 2, 3, 2, 2, 2, 3, 2, + 2, 3, 3, 1, 2, 4, 2, 2, + 4, 4, 2, 0, 2, 0, 3, 1, + 0, 1, 21, 1, 0, 4, 0, 0, + 0, 1, 2, 0, 1, 1, 1, 4, + 0, 3, 1, 3, 2, 0, 3, 0, + 5, 2, 0, 0, 1, 0, 2, 0, + 0, 15, 0, 0, 0, 4, 0, 0, + 0, 3, 1, 0, 4, 1, 4, 4, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 1, 0, 1, 1, 1, + 1, 0, 2, 1, 3, 2, 0, 1, + 3, 1, 2, 0, 1, 0, 2, 1, + 2, 3, 4, 0, 0, 0, 1, 0, + 6, 2, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 1, 0, 1, 4, 0, + 0, 0, 1, 0, 0, 14, 0, 3, + 2, 2, 3, 2, 2, 2, 54, 54, + 29, 1, 0, 0, 0, 0, 2, 1, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 7, 1, 0, 1, + 0, 2, 3, 2, 1, 0, 1, 1, + 3, 0, 1, 5, 0, 0, 17, 20, + 20, 20, 14, 20, 20, 20, 23, 21, + 21, 21, 20, 23, 20, 20, 20, 21, + 21, 21, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, +} + +var _graphclust_range_lengths []byte = []byte{ + 0, 0, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 5, 2, 6, 2, 8, 4, + 2, 5, 0, 3, 2, 4, 1, 6, + 2, 4, 4, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 4, 4, 1, 1, + 2, 2, 2, 2, 1, 1, 6, 2, + 5, 1, 3, 3, 4, 4, 4, 4, + 2, 0, 0, 1, 1, 0, 1, 0, + 1, 1, 0, 2, 1, 1, 2, 4, + 1, 2, 4, 1, 5, 0, 3, 2, + 1, 0, 0, 2, 0, 0, 0, 0, + 1, 4, 1, 0, 2, 1, 4, 2, + 0, 4, 3, 4, 2, 2, 6, 2, + 2, 4, 1, 4, 2, 4, 1, 3, + 3, 2, 2, 0, 1, 1, 1, 0, + 1, 0, 3, 3, 1, 2, 2, 2, + 0, 5, 1, 1, 0, 1, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 1, 0, 0, 1, 2, 2, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 0, + 1, 0, 1, 0, 1, 0, 1, 1, + 0, 2, 1, 1, 1, 2, 2, 1, + 1, 2, 2, 1, 1, 3, 2, 2, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 2, 2, 0, + 2, 2, 1, 1, 2, 6, 1, 1, + 1, 1, 2, 2, 1, 1, 1, 2, + 2, 0, 1, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 1, 1, 1, + 1, 2, 1, 1, 4, 1, 1, 1, + 1, 1, 4, 1, 2, 2, 5, 2, + 6, 2, 8, 4, 2, 5, 0, 3, + 2, 4, 1, 6, 2, 4, 4, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 4, 4, 1, 1, 2, 2, 2, 2, + 1, 1, 6, 2, 5, 1, 3, 3, + 4, 4, 4, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 2, 4, 1, 2, 4, 1, + 5, 0, 3, 2, 1, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 4, 2, 0, 4, 3, 4, + 2, 2, 6, 2, 2, 4, 1, 4, + 2, 4, 1, 3, 3, 2, 2, 0, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 0, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 1, + 2, 3, 1, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 1, 0, 1, + 1, 0, 1, 0, 1, 3, 1, 2, + 2, 1, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 1, 1, 2, 2, + 2, 1, 3, 2, 1, 1, 3, 1, + 3, 3, 1, 0, 0, 0, 0, 0, + 1, 1, 1, 2, 2, 4, 1, 1, + 2, 1, 1, 1, 3, 1, 2, 1, + 2, 1, 2, 0, 0, 1, 1, 5, + 9, 2, 1, 3, 5, 3, 1, 6, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 1, 0, 1, + 1, 0, 1, 1, 0, 1, 0, 1, + 3, 1, 2, 2, 1, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 1, + 1, 2, 2, 1, 1, 5, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 1, 2, 2, 1, 1, 2, + 2, 1, 1, 3, 2, 2, 0, 0, + 2, 0, 0, 0, 0, 1, 4, 1, + 0, 2, 1, 2, 2, 0, 2, 2, + 1, 1, 2, 6, 1, 1, 1, 1, + 2, 2, 1, 1, 1, 2, 2, 0, + 1, 1, 1, 1, 0, 1, 0, 3, + 3, 1, 2, 2, 2, 0, 5, 1, + 1, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 1, + 4, 1, 2, 2, 5, 2, 6, 2, + 8, 4, 2, 5, 0, 3, 2, 4, + 1, 6, 2, 4, 4, 1, 1, 2, + 1, 2, 1, 4, 0, 0, 4, 4, + 1, 1, 2, 2, 2, 2, 1, 1, + 6, 2, 5, 1, 3, 3, 4, 4, + 4, 4, 2, 0, 0, 1, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 2, 4, 1, 2, 4, 1, 5, 0, + 3, 2, 1, 0, 0, 2, 0, 0, + 0, 0, 1, 4, 1, 0, 2, 1, + 4, 2, 0, 4, 3, 4, 2, 2, + 6, 2, 2, 4, 1, 4, 2, 4, + 1, 3, 3, 2, 2, 0, 1, 1, + 1, 0, 1, 0, 3, 3, 1, 2, + 2, 2, 0, 5, 1, 1, 0, 1, + 0, 1, 1, 1, 0, 0, 0, 3, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 1, 0, + 0, 5, 2, 6, 2, 8, 4, 2, + 5, 0, 3, 2, 4, 1, 6, 2, + 4, 4, 1, 1, 2, 1, 2, 1, + 4, 0, 0, 4, 4, 1, 1, 2, + 2, 2, 2, 1, 1, 6, 2, 5, + 1, 3, 3, 4, 4, 4, 4, 2, + 0, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 2, 1, 1, 2, 4, 1, + 2, 4, 1, 5, 0, 3, 2, 1, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 4, 2, 0, + 4, 3, 4, 2, 2, 6, 2, 2, + 4, 1, 4, 2, 4, 1, 3, 3, + 2, 2, 0, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 0, 1, 1, + 1, 0, 1, 3, 1, 3, 3, 1, + 0, 0, 0, 0, 0, 1, 1, 1, + 3, 2, 4, 1, 0, 1, 1, 1, + 3, 1, 1, 1, 3, 1, 3, 1, + 3, 1, 2, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 5, 9, 2, 1, 3, 5, 3, 1, + 6, 1, 1, 2, 2, 2, 6, 0, + 0, 0, 4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 6, 8, 11, 15, + 17, 20, 25, 28, 30, 32, 34, 63, + 68, 70, 73, 76, 80, 84, 90, 97, + 102, 106, 112, 115, 120, 123, 129, 135, + 138, 144, 146, 152, 155, 157, 161, 163, + 169, 171, 176, 178, 200, 203, 207, 212, + 214, 217, 220, 222, 225, 227, 230, 233, + 235, 241, 243, 246, 249, 252, 254, 256, + 262, 265, 271, 274, 281, 283, 285, 287, + 289, 291, 294, 296, 298, 314, 317, 319, + 321, 326, 329, 332, 334, 336, 339, 342, + 344, 348, 353, 357, 360, 364, 366, 369, + 377, 383, 385, 387, 389, 395, 397, 421, + 424, 426, 429, 432, 434, 437, 440, 443, + 445, 449, 457, 459, 461, 463, 465, 468, + 471, 473, 475, 477, 480, 483, 488, 490, + 492, 494, 496, 498, 500, 507, 511, 515, + 517, 520, 523, 527, 531, 537, 539, 541, + 545, 547, 549, 551, 553, 556, 560, 562, + 565, 570, 573, 575, 577, 579, 610, 615, + 617, 620, 626, 634, 640, 649, 654, 665, + 673, 678, 686, 690, 697, 701, 708, 714, + 723, 728, 737, 746, 750, 752, 757, 759, + 765, 768, 773, 775, 797, 803, 808, 814, + 816, 819, 822, 826, 831, 833, 836, 844, + 848, 858, 860, 867, 872, 880, 887, 892, + 900, 903, 909, 912, 914, 916, 918, 920, + 923, 925, 927, 943, 946, 948, 950, 957, + 962, 964, 967, 975, 978, 984, 989, 994, + 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032, + 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080, + 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122, + 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158, + 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191, + 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220, + 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244, + 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267, + 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291, + 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315, + 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363, + 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402, + 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439, + 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465, + 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509, + 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533, + 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561, + 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, + 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619, + 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644, + 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674, + 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719, + 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746, + 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764, + 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785, + 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814, + 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838, + 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862, + 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921, + 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977, + 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033, + 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062, + 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113, + 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154, + 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199, + 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230, + 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262, + 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300, + 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331, + 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383, + 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427, + 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467, + 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495, + 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523, + 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542, + 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566, + 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591, + 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680, + 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699, + 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773, + 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930, + 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963, + 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005, + 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167, + 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196, + 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233, + 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280, + 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312, + 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336, + 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359, + 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415, + 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454, + 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486, + 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532, + 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555, + 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579, + 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610, + 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642, + 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664, + 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691, + 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722, + 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765, + 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790, + 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813, + 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836, + 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864, + 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885, + 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935, + 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979, + 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033, + 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082, + 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133, + 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161, + 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212, + 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243, + 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275, + 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314, + 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355, + 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399, + 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438, + 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481, + 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514, + 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542, + 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567, + 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586, + 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656, + 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695, + 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769, + 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792, + 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958, + 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982, + 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030, + 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069, + 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106, + 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132, + 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176, + 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200, + 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228, + 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259, + 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281, + 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309, + 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339, + 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381, + 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407, + 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427, + 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451, + 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481, + 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505, + 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530, + 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553, + 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623, + 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675, + 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726, + 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777, + 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807, + 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854, + 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890, + 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922, + 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958, + 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998, + 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046, + 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081, + 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124, + 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161, + 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188, + 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214, + 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233, + 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255, + 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308, + 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332, + 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380, + 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414, + 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452, + 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480, + 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529, + 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550, + 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573, + 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604, + 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636, + 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659, + 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686, + 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738, + 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760, + 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785, + 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807, + 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832, + 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858, + 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880, + 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935, + 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985, + 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027, + 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078, + 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110, + 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162, + 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185, + 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224, + 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263, + 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296, + 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346, + 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387, + 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426, + 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455, + 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486, + 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512, + 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548, + 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629, + 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736, + 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768, + 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800, + 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823, + 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879, + 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918, + 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950, + 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996, + 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019, + 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043, + 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074, + 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106, + 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128, + 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155, + 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186, + 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229, + 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254, + 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277, + 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300, + 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328, + 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349, + 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399, + 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443, + 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497, + 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546, + 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597, + 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625, + 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676, + 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707, + 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739, + 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778, + 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819, + 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863, + 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902, + 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945, + 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978, + 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006, + 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031, + 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050, + 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080, + 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127, + 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183, + 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352, + 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526, + 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697, + 9718, 9739, 9760, 9781, +} + +var _graphclust_indicies []int16 = []int16{ + 0, 1, 3, 2, 2, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 2, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 2, 2, 3, 3, 2, + 3, 2, 4, 5, 6, 7, 8, 10, + 11, 12, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 9, 13, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 2, 2, 3, 2, 2, 2, 3, + 3, 3, 3, 2, 2, 2, 2, 2, + 2, 3, 2, 2, 2, 2, 2, 2, + 3, 2, 2, 2, 2, 3, 3, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 3, 2, + 3, 3, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 2, 3, + 3, 2, 2, 2, 2, 2, 2, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 2, + 3, 2, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 3, 3, 2, 3, 2, 2, 2, + 3, 3, 2, 3, 3, 2, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 2, 2, 2, + 3, 3, 3, 2, 3, 2, 3, 2, + 3, 3, 3, 3, 3, 2, 3, 3, + 2, 53, 54, 55, 56, 57, 2, 3, + 58, 2, 53, 54, 59, 55, 56, 57, + 2, 3, 2, 3, 2, 3, 2, 3, + 2, 3, 2, 60, 61, 2, 3, 2, + 3, 2, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, + 76, 2, 3, 3, 2, 3, 2, 3, + 2, 3, 3, 3, 3, 2, 3, 3, + 2, 2, 2, 3, 3, 2, 3, 2, + 3, 3, 2, 2, 2, 3, 3, 2, + 3, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 3, 2, 3, 3, 2, + 77, 78, 63, 2, 3, 2, 3, 3, + 2, 79, 80, 81, 82, 83, 84, 85, + 2, 86, 87, 88, 89, 90, 2, 3, + 2, 3, 2, 3, 2, 3, 3, 3, + 3, 3, 2, 3, 2, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 104, 108, + 109, 110, 111, 112, 2, 3, 3, 2, + 2, 3, 2, 2, 3, 3, 3, 2, + 3, 2, 3, 3, 2, 2, 2, 3, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 3, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 3, 2, 2, + 3, 3, 3, 2, 2, 2, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 2, + 3, 3, 2, 113, 114, 115, 116, 2, + 3, 2, 3, 2, 3, 2, 3, 2, + 117, 2, 3, 2, 118, 119, 120, 121, + 122, 123, 2, 3, 3, 3, 2, 2, + 2, 2, 3, 3, 2, 3, 3, 2, + 2, 2, 3, 3, 3, 3, 2, 124, + 125, 126, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 127, 128, 129, + 2, 130, 2, 2, 130, 2, 130, 130, + 2, 130, 130, 2, 130, 130, 130, 2, + 130, 2, 130, 130, 2, 130, 130, 130, + 130, 2, 130, 130, 2, 2, 130, 130, + 2, 130, 2, 131, 132, 133, 134, 135, + 136, 137, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 22, 151, + 152, 153, 154, 155, 156, 157, 158, 159, + 138, 2, 130, 130, 130, 130, 2, 130, + 2, 130, 130, 2, 3, 3, 2, 2, + 3, 130, 130, 2, 130, 130, 2, 130, + 2, 3, 130, 130, 130, 3, 3, 2, + 130, 130, 130, 2, 2, 2, 130, 2, + 3, 3, 130, 130, 3, 2, 130, 130, + 130, 2, 130, 2, 130, 2, 130, 2, + 3, 2, 2, 130, 130, 2, 130, 2, + 3, 130, 130, 3, 130, 2, 3, 130, + 130, 3, 3, 130, 130, 2, 130, 130, + 3, 2, 130, 130, 130, 3, 3, 3, + 2, 130, 3, 130, 2, 2, 2, 3, + 2, 2, 2, 130, 130, 130, 3, 130, + 3, 2, 130, 130, 3, 3, 3, 130, + 130, 130, 2, 130, 130, 3, 3, 2, + 2, 2, 130, 130, 130, 2, 130, 2, + 3, 130, 130, 130, 130, 3, 130, 3, + 3, 2, 130, 3, 130, 2, 130, 2, + 130, 3, 130, 130, 2, 130, 2, 130, + 130, 130, 130, 3, 2, 3, 130, 2, + 130, 130, 130, 130, 2, 130, 2, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 2, 3, 130, 130, + 3, 130, 2, 3, 130, 130, 130, 2, + 130, 3, 130, 130, 130, 2, 130, 2, + 130, 130, 2, 130, 130, 2, 3, 130, + 3, 2, 130, 130, 130, 2, 3, 130, + 2, 130, 130, 2, 130, 130, 3, 130, + 3, 3, 130, 2, 130, 130, 3, 2, + 130, 130, 130, 130, 3, 130, 130, 3, + 130, 2, 130, 2, 3, 3, 3, 130, + 130, 3, 2, 130, 2, 130, 2, 3, + 3, 3, 3, 130, 130, 3, 130, 2, + 3, 130, 130, 3, 130, 3, 2, 3, + 130, 3, 130, 2, 3, 130, 130, 130, + 130, 3, 130, 2, 130, 130, 2, 181, + 182, 183, 184, 185, 2, 130, 58, 2, + 130, 2, 130, 2, 130, 2, 130, 2, + 186, 187, 2, 130, 2, 130, 2, 188, + 189, 190, 191, 66, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 2, 130, + 130, 2, 130, 2, 130, 2, 130, 130, + 130, 3, 3, 130, 2, 130, 2, 130, + 2, 3, 130, 2, 130, 3, 2, 3, + 130, 130, 130, 3, 130, 3, 2, 130, + 2, 3, 130, 3, 130, 3, 130, 2, + 130, 130, 3, 130, 2, 130, 130, 130, + 130, 2, 130, 3, 3, 130, 130, 3, + 2, 130, 130, 3, 130, 3, 2, 202, + 203, 189, 2, 130, 2, 130, 130, 2, + 204, 205, 206, 207, 208, 209, 210, 2, + 211, 212, 213, 214, 215, 2, 130, 2, + 130, 2, 130, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 2, 130, 3, 130, 2, + 2, 130, 3, 2, 3, 3, 2, 130, + 3, 130, 130, 2, 130, 2, 3, 130, + 3, 130, 3, 2, 2, 130, 2, 3, + 130, 130, 3, 130, 3, 130, 2, 130, + 3, 130, 2, 130, 130, 3, 130, 3, + 2, 130, 130, 3, 3, 3, 3, 130, + 130, 2, 3, 130, 2, 3, 3, 130, + 2, 130, 3, 130, 3, 130, 3, 130, + 2, 3, 2, 130, 130, 3, 3, 130, + 3, 130, 2, 2, 2, 130, 130, 3, + 130, 3, 130, 2, 2, 130, 3, 3, + 130, 3, 130, 2, 3, 130, 3, 130, + 2, 3, 3, 130, 130, 2, 3, 3, + 3, 130, 130, 2, 239, 240, 115, 241, + 2, 130, 2, 130, 2, 130, 2, 242, + 2, 130, 2, 243, 244, 245, 246, 247, + 248, 2, 3, 3, 130, 130, 130, 2, + 2, 2, 2, 130, 130, 2, 130, 130, + 2, 2, 2, 130, 130, 130, 130, 2, + 249, 250, 251, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 130, 2, 252, 2, + 3, 2, 253, 2, 254, 255, 256, 258, + 257, 2, 130, 2, 2, 130, 130, 3, + 2, 3, 2, 259, 2, 260, 261, 262, + 264, 263, 2, 3, 2, 2, 3, 3, + 79, 80, 81, 82, 83, 84, 2, 3, + 1, 265, 265, 3, 1, 265, 266, 3, + 1, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 267, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 267, 267, 268, 268, 267, 268, + 267, 269, 270, 271, 272, 273, 275, 276, + 277, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, + 294, 295, 296, 274, 278, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 267, 267, 268, 267, 267, 267, 268, 268, + 268, 268, 267, 267, 267, 267, 267, 267, + 268, 267, 267, 267, 267, 267, 267, 268, + 267, 267, 267, 267, 268, 268, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 268, 267, 268, + 268, 267, 267, 267, 267, 267, 267, 268, + 268, 268, 268, 268, 268, 267, 268, 268, + 267, 267, 267, 267, 267, 267, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 267, 268, + 267, 297, 298, 299, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, + 312, 313, 314, 315, 316, 317, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 268, 268, 267, 268, 267, 267, 267, 268, + 268, 267, 268, 268, 267, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 267, 267, 267, 268, + 268, 268, 267, 268, 267, 268, 267, 268, + 268, 268, 268, 268, 267, 268, 268, 267, + 318, 319, 320, 321, 322, 267, 268, 323, + 267, 318, 319, 324, 320, 321, 322, 267, + 268, 267, 268, 267, 268, 267, 268, 267, + 268, 267, 325, 326, 267, 268, 267, 268, + 267, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 267, 268, 268, 267, 268, 267, 268, 267, + 268, 268, 268, 268, 267, 268, 268, 267, + 267, 267, 268, 268, 267, 268, 267, 268, + 268, 267, 267, 267, 268, 268, 267, 268, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 268, 267, 268, 268, 267, 342, + 343, 328, 267, 268, 267, 268, 268, 267, + 344, 345, 346, 347, 348, 349, 350, 267, + 351, 352, 353, 354, 355, 267, 268, 267, + 268, 267, 268, 267, 268, 268, 268, 268, + 268, 267, 268, 267, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 366, 367, + 368, 369, 370, 371, 372, 369, 373, 374, + 375, 376, 377, 267, 268, 268, 267, 267, + 268, 267, 267, 268, 268, 268, 267, 268, + 267, 268, 268, 267, 267, 267, 268, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 268, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 268, 267, 267, 268, + 268, 268, 267, 267, 267, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 378, 379, 380, 381, 267, 268, + 267, 268, 267, 268, 267, 268, 267, 382, + 267, 268, 267, 383, 384, 385, 386, 387, + 388, 267, 268, 268, 268, 267, 267, 267, + 267, 268, 268, 267, 268, 268, 267, 267, + 267, 268, 268, 268, 268, 267, 389, 390, + 391, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 392, 393, 394, 267, + 395, 267, 395, 267, 267, 395, 395, 267, + 395, 395, 267, 395, 395, 395, 267, 395, + 267, 395, 395, 267, 395, 395, 395, 395, + 267, 395, 395, 267, 267, 395, 395, 267, + 395, 267, 396, 397, 398, 399, 400, 401, + 402, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 287, 416, 417, + 418, 419, 420, 421, 422, 423, 424, 403, + 267, 395, 395, 395, 395, 267, 395, 267, + 395, 395, 267, 268, 268, 267, 267, 268, + 395, 395, 267, 395, 395, 267, 395, 267, + 268, 395, 395, 395, 268, 268, 267, 395, + 395, 395, 267, 267, 267, 395, 267, 268, + 268, 395, 395, 268, 267, 395, 395, 395, + 267, 395, 267, 395, 267, 395, 267, 268, + 267, 267, 395, 395, 267, 395, 267, 268, + 395, 395, 268, 395, 267, 268, 395, 395, + 268, 268, 395, 395, 267, 395, 395, 268, + 267, 395, 395, 395, 268, 268, 268, 267, + 395, 268, 395, 267, 267, 267, 268, 267, + 267, 267, 395, 395, 395, 268, 395, 268, + 267, 395, 395, 268, 268, 268, 395, 395, + 395, 267, 395, 395, 268, 268, 267, 267, + 267, 395, 395, 395, 267, 395, 267, 268, + 395, 395, 395, 395, 268, 395, 268, 268, + 267, 395, 268, 395, 267, 395, 267, 395, + 268, 395, 395, 267, 395, 267, 395, 395, + 395, 395, 268, 267, 268, 395, 267, 395, + 395, 395, 395, 267, 395, 267, 425, 426, + 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, + 443, 444, 445, 267, 268, 395, 395, 268, + 395, 267, 268, 395, 395, 395, 267, 395, + 268, 395, 395, 395, 267, 395, 267, 395, + 395, 267, 395, 395, 267, 268, 395, 268, + 267, 395, 395, 395, 267, 268, 395, 267, + 395, 395, 267, 395, 395, 268, 395, 268, + 268, 395, 267, 395, 395, 268, 267, 395, + 395, 395, 395, 268, 395, 395, 268, 395, + 267, 395, 267, 268, 268, 268, 395, 395, + 268, 267, 395, 267, 395, 267, 268, 268, + 268, 268, 395, 395, 268, 395, 267, 268, + 395, 395, 268, 395, 268, 267, 268, 395, + 268, 395, 267, 268, 395, 395, 395, 395, + 268, 395, 267, 395, 395, 267, 446, 447, + 448, 449, 450, 267, 395, 323, 267, 395, + 267, 395, 267, 395, 267, 395, 267, 451, + 452, 267, 395, 267, 395, 267, 453, 454, + 455, 456, 331, 457, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 267, 395, 395, + 267, 395, 267, 395, 267, 395, 395, 395, + 268, 268, 395, 267, 395, 267, 395, 267, + 268, 395, 267, 395, 268, 267, 268, 395, + 395, 395, 268, 395, 268, 267, 395, 267, + 268, 395, 268, 395, 268, 395, 267, 395, + 395, 268, 395, 267, 395, 395, 395, 395, + 267, 395, 268, 268, 395, 395, 268, 267, + 395, 395, 268, 395, 268, 267, 467, 468, + 454, 267, 395, 267, 395, 395, 267, 469, + 470, 471, 472, 473, 474, 475, 267, 476, + 477, 478, 479, 480, 267, 395, 267, 395, + 267, 395, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 481, 482, 483, 484, 485, + 486, 487, 488, 489, 490, 491, 492, 493, + 494, 495, 496, 497, 498, 499, 500, 501, + 502, 503, 267, 395, 268, 395, 267, 267, + 395, 268, 267, 268, 268, 267, 395, 268, + 395, 395, 267, 395, 267, 268, 395, 268, + 395, 268, 267, 267, 395, 267, 268, 395, + 395, 268, 395, 268, 395, 267, 395, 268, + 395, 267, 395, 395, 268, 395, 268, 267, + 395, 395, 268, 268, 268, 268, 395, 395, + 267, 268, 395, 267, 268, 268, 395, 267, + 395, 268, 395, 268, 395, 268, 395, 267, + 268, 267, 395, 395, 268, 268, 395, 268, + 395, 267, 267, 267, 395, 395, 268, 395, + 268, 395, 267, 267, 395, 268, 268, 395, + 268, 395, 267, 268, 395, 268, 395, 267, + 268, 268, 395, 395, 267, 268, 268, 268, + 395, 395, 267, 504, 505, 380, 506, 267, + 395, 267, 395, 267, 395, 267, 507, 267, + 395, 267, 508, 509, 510, 511, 512, 513, + 267, 268, 268, 395, 395, 395, 267, 267, + 267, 267, 395, 395, 267, 395, 395, 267, + 267, 267, 395, 395, 395, 395, 267, 514, + 515, 516, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 395, 267, 517, 267, 268, + 267, 518, 267, 519, 520, 521, 523, 522, + 267, 395, 267, 267, 395, 395, 268, 267, + 268, 267, 524, 267, 525, 526, 527, 529, + 528, 267, 268, 267, 267, 268, 268, 344, + 345, 346, 347, 348, 349, 267, 268, 267, + 268, 268, 267, 266, 268, 268, 267, 266, + 268, 267, 266, 268, 267, 531, 532, 530, + 267, 266, 268, 267, 266, 268, 267, 533, + 534, 535, 536, 537, 530, 267, 538, 267, + 297, 298, 299, 533, 534, 539, 300, 301, + 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311, 312, 313, 314, 315, 316, 317, + 267, 540, 538, 297, 298, 299, 541, 535, + 536, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, 317, 267, 540, 267, 542, 540, + 297, 298, 299, 543, 536, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, 317, 267, + 542, 267, 267, 542, 544, 267, 542, 267, + 545, 546, 267, 540, 267, 267, 542, 267, + 540, 267, 540, 327, 328, 329, 330, 331, + 332, 333, 547, 335, 336, 337, 338, 339, + 340, 341, 549, 550, 551, 552, 553, 554, + 549, 550, 551, 552, 553, 554, 549, 548, + 555, 267, 268, 538, 267, 556, 556, 556, + 542, 267, 297, 298, 299, 541, 539, 300, + 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, + 317, 267, 545, 557, 267, 267, 540, 556, + 556, 542, 556, 556, 542, 556, 556, 556, + 542, 556, 556, 542, 556, 556, 542, 556, + 556, 267, 542, 542, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 550, 555, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 551, 555, 553, 554, 548, 549, + 550, 551, 553, 554, 548, 549, 550, 551, + 553, 554, 548, 549, 550, 551, 553, 554, + 548, 549, 550, 551, 553, 558, 557, 552, + 267, 555, 556, 267, 540, 542, 268, 268, + 267, 559, 560, 561, 562, 563, 530, 267, + 268, 323, 268, 268, 268, 267, 268, 268, + 267, 395, 268, 267, 395, 268, 267, 268, + 395, 268, 267, 530, 267, 564, 566, 567, + 568, 569, 570, 571, 566, 567, 568, 569, + 570, 571, 566, 530, 565, 555, 267, 268, + 538, 268, 267, 540, 540, 540, 542, 267, + 540, 540, 542, 540, 540, 542, 540, 540, + 540, 542, 540, 540, 542, 540, 540, 542, + 540, 540, 267, 542, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 567, 555, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 568, 555, 570, 571, 565, 566, + 567, 568, 570, 571, 565, 566, 567, 568, + 570, 571, 565, 566, 567, 568, 570, 571, + 565, 566, 567, 568, 570, 572, 573, 569, + 267, 555, 540, 268, 540, 542, 268, 542, + 268, 267, 540, 574, 575, 530, 267, 268, + 267, 268, 268, 268, 267, 577, 578, 579, + 580, 576, 267, 581, 582, 530, 267, 266, + 268, 267, 268, 266, 268, 267, 583, 530, + 267, 268, 268, 267, 584, 530, 267, 268, + 268, 267, 585, 586, 587, 588, 589, 590, + 591, 592, 593, 594, 595, 530, 267, 268, + 596, 267, 344, 345, 346, 347, 348, 349, + 597, 267, 598, 267, 268, 267, 395, 268, + 267, 268, 395, 268, 395, 268, 267, 395, + 395, 268, 395, 268, 395, 268, 395, 268, + 395, 268, 267, 268, 268, 395, 395, 268, + 267, 395, 395, 268, 267, 395, 268, 395, + 268, 267, 268, 395, 268, 395, 268, 267, + 395, 268, 395, 268, 267, 395, 268, 267, + 395, 395, 268, 268, 395, 268, 395, 268, + 395, 267, 576, 267, 599, 576, 267, 322, + 530, 600, 530, 267, 268, 267, 266, 3, + 1, 266, 3, 1, 602, 603, 601, 1, + 266, 3, 1, 266, 3, 1, 604, 605, + 606, 607, 608, 601, 1, 609, 610, 612, + 611, 611, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 611, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 611, 611, 612, 612, 611, 612, 611, 613, + 614, 615, 616, 617, 619, 620, 621, 623, + 624, 625, 626, 627, 628, 629, 630, 631, + 632, 633, 634, 635, 636, 637, 638, 639, + 640, 618, 622, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 611, 611, + 612, 611, 611, 611, 612, 612, 612, 612, + 611, 611, 611, 611, 611, 611, 612, 611, + 611, 611, 611, 611, 611, 612, 611, 611, + 611, 611, 612, 612, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 612, 611, 612, 612, 611, + 611, 611, 611, 611, 611, 612, 612, 612, + 612, 612, 612, 611, 612, 612, 611, 611, + 611, 611, 611, 611, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 611, 612, 611, 641, + 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 612, 612, + 611, 612, 611, 611, 611, 612, 612, 611, + 612, 612, 611, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 611, 611, 611, 612, 612, 612, + 611, 612, 611, 612, 611, 612, 612, 612, + 612, 612, 611, 612, 612, 611, 662, 663, + 664, 665, 666, 611, 612, 667, 611, 662, + 663, 668, 664, 665, 666, 611, 612, 611, + 612, 611, 612, 611, 612, 611, 612, 611, + 669, 670, 611, 612, 611, 612, 611, 671, + 672, 673, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 683, 684, 685, 611, 612, + 612, 611, 612, 611, 612, 611, 612, 612, + 612, 612, 611, 612, 612, 611, 611, 611, + 612, 612, 611, 612, 611, 612, 612, 611, + 611, 611, 612, 612, 611, 612, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 612, 611, 612, 612, 611, 686, 687, 672, + 611, 612, 611, 612, 612, 611, 688, 689, + 690, 691, 692, 693, 694, 611, 695, 696, + 697, 698, 699, 611, 612, 611, 612, 611, + 612, 611, 612, 612, 612, 612, 612, 611, + 612, 611, 700, 701, 702, 703, 704, 705, + 706, 707, 708, 709, 710, 711, 712, 713, + 714, 715, 716, 713, 717, 718, 719, 720, + 721, 611, 612, 612, 611, 611, 612, 611, + 611, 612, 612, 612, 611, 612, 611, 612, + 612, 611, 611, 611, 612, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 612, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 612, 611, 611, 612, 612, 612, + 611, 611, 611, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 611, 612, 612, 611, + 722, 723, 724, 725, 611, 612, 611, 612, + 611, 612, 611, 612, 611, 726, 611, 612, + 611, 727, 728, 729, 730, 731, 732, 611, + 612, 612, 612, 611, 611, 611, 611, 612, + 612, 611, 612, 612, 611, 611, 611, 612, + 612, 612, 612, 611, 733, 734, 735, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 736, 737, 738, 611, 739, 611, + 739, 611, 611, 739, 739, 611, 739, 739, + 611, 739, 739, 739, 611, 739, 611, 739, + 739, 611, 739, 739, 739, 739, 611, 739, + 739, 611, 611, 739, 739, 611, 739, 611, + 740, 741, 742, 743, 744, 745, 746, 748, + 749, 750, 751, 752, 753, 754, 755, 756, + 757, 758, 759, 631, 760, 761, 762, 763, + 764, 765, 766, 767, 768, 747, 611, 739, + 739, 739, 739, 611, 739, 611, 739, 739, + 611, 612, 612, 611, 611, 612, 739, 739, + 611, 739, 739, 611, 739, 611, 612, 739, + 739, 739, 612, 612, 611, 739, 739, 739, + 611, 611, 611, 739, 611, 612, 612, 739, + 739, 612, 611, 739, 739, 739, 611, 739, + 611, 739, 611, 739, 611, 612, 611, 611, + 739, 739, 611, 739, 611, 612, 739, 739, + 612, 739, 611, 612, 739, 739, 612, 612, + 739, 739, 611, 739, 739, 612, 611, 739, + 739, 739, 612, 612, 612, 611, 739, 612, + 739, 611, 611, 611, 612, 611, 611, 611, + 739, 739, 739, 612, 739, 612, 611, 739, + 739, 612, 612, 612, 739, 739, 739, 611, + 739, 739, 612, 612, 611, 611, 611, 739, + 739, 739, 611, 739, 611, 612, 739, 739, + 739, 739, 612, 739, 612, 612, 611, 739, + 612, 739, 611, 739, 611, 739, 612, 739, + 739, 611, 739, 611, 739, 739, 739, 739, + 612, 611, 612, 739, 611, 739, 739, 739, + 739, 611, 739, 611, 769, 770, 771, 772, + 773, 774, 775, 776, 777, 778, 779, 780, + 781, 782, 783, 784, 785, 786, 787, 788, + 789, 611, 612, 739, 739, 612, 739, 611, + 612, 739, 739, 739, 611, 739, 612, 739, + 739, 739, 611, 739, 611, 739, 739, 611, + 739, 739, 611, 612, 739, 612, 611, 739, + 739, 739, 611, 612, 739, 611, 739, 739, + 611, 739, 739, 612, 739, 612, 612, 739, + 611, 739, 739, 612, 611, 739, 739, 739, + 739, 612, 739, 739, 612, 739, 611, 739, + 611, 612, 612, 612, 739, 739, 612, 611, + 739, 611, 739, 611, 612, 612, 612, 612, + 739, 739, 612, 739, 611, 612, 739, 739, + 612, 739, 612, 611, 612, 739, 612, 739, + 611, 612, 739, 739, 739, 739, 612, 739, + 611, 739, 739, 611, 790, 791, 792, 793, + 794, 611, 739, 667, 611, 739, 611, 739, + 611, 739, 611, 739, 611, 795, 796, 611, + 739, 611, 739, 611, 797, 798, 799, 800, + 675, 801, 802, 803, 804, 805, 806, 807, + 808, 809, 810, 611, 739, 739, 611, 739, + 611, 739, 611, 739, 739, 739, 612, 612, + 739, 611, 739, 611, 739, 611, 612, 739, + 611, 739, 612, 611, 612, 739, 739, 739, + 612, 739, 612, 611, 739, 611, 612, 739, + 612, 739, 612, 739, 611, 739, 739, 612, + 739, 611, 739, 739, 739, 739, 611, 739, + 612, 612, 739, 739, 612, 611, 739, 739, + 612, 739, 612, 611, 811, 812, 798, 611, + 739, 611, 739, 739, 611, 813, 814, 815, + 816, 817, 818, 819, 611, 820, 821, 822, + 823, 824, 611, 739, 611, 739, 611, 739, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 825, 826, 827, 828, 829, 830, 831, + 832, 833, 834, 835, 836, 837, 838, 839, + 840, 841, 842, 843, 844, 845, 846, 847, + 611, 739, 612, 739, 611, 611, 739, 612, + 611, 612, 612, 611, 739, 612, 739, 739, + 611, 739, 611, 612, 739, 612, 739, 612, + 611, 611, 739, 611, 612, 739, 739, 612, + 739, 612, 739, 611, 739, 612, 739, 611, + 739, 739, 612, 739, 612, 611, 739, 739, + 612, 612, 612, 612, 739, 739, 611, 612, + 739, 611, 612, 612, 739, 611, 739, 612, + 739, 612, 739, 612, 739, 611, 612, 611, + 739, 739, 612, 612, 739, 612, 739, 611, + 611, 611, 739, 739, 612, 739, 612, 739, + 611, 611, 739, 612, 612, 739, 612, 739, + 611, 612, 739, 612, 739, 611, 612, 612, + 739, 739, 611, 612, 612, 612, 739, 739, + 611, 848, 849, 724, 850, 611, 739, 611, + 739, 611, 739, 611, 851, 611, 739, 611, + 852, 853, 854, 855, 856, 857, 611, 612, + 612, 739, 739, 739, 611, 611, 611, 611, + 739, 739, 611, 739, 739, 611, 611, 611, + 739, 739, 739, 739, 611, 858, 859, 860, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 739, 611, 861, 611, 612, 611, 862, + 611, 863, 864, 865, 867, 866, 611, 739, + 611, 611, 739, 739, 612, 611, 612, 611, + 868, 611, 869, 870, 871, 873, 872, 611, + 612, 611, 611, 612, 612, 688, 689, 690, + 691, 692, 693, 611, 641, 642, 643, 604, + 605, 874, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 875, 610, 641, + 642, 643, 876, 606, 607, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 611, + 875, 611, 877, 875, 641, 642, 643, 878, + 607, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 611, 877, 611, 609, 877, + 879, 611, 877, 611, 880, 881, 611, 875, + 611, 611, 877, 611, 875, 611, 875, 671, + 672, 673, 674, 675, 676, 677, 882, 679, + 680, 681, 682, 683, 684, 685, 884, 885, + 886, 887, 888, 889, 884, 885, 886, 887, + 888, 889, 884, 883, 890, 611, 612, 610, + 611, 891, 891, 891, 877, 611, 641, 642, + 643, 876, 874, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 611, 880, 892, + 611, 611, 875, 891, 891, 877, 891, 891, + 877, 891, 891, 891, 877, 891, 891, 877, + 891, 891, 877, 891, 891, 611, 877, 877, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 885, 890, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 886, 890, + 888, 889, 883, 884, 885, 886, 888, 889, + 883, 884, 885, 886, 888, 889, 883, 884, + 885, 886, 888, 889, 883, 884, 885, 886, + 888, 893, 892, 887, 611, 890, 891, 611, + 875, 877, 265, 3, 1, 894, 895, 896, + 897, 898, 601, 1, 265, 899, 3, 265, + 3, 265, 3, 1, 901, 900, 900, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 900, 901, 901, 900, 901, 901, + 901, 901, 900, 901, 901, 900, 900, 901, + 901, 900, 901, 900, 902, 903, 904, 905, + 906, 908, 909, 910, 912, 913, 914, 915, + 916, 917, 918, 919, 920, 921, 922, 923, + 924, 925, 926, 927, 928, 929, 907, 911, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 900, 900, 901, 900, 900, + 900, 901, 901, 901, 901, 900, 900, 900, + 900, 900, 900, 901, 900, 900, 900, 900, + 900, 900, 901, 900, 900, 900, 900, 901, + 901, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 900, 900, 900, 900, + 900, 900, 901, 901, 901, 901, 901, 901, + 900, 901, 901, 900, 900, 900, 900, 900, + 900, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 900, 901, 900, 930, 931, 932, 933, + 934, 935, 936, 937, 938, 939, 940, 941, + 942, 943, 944, 945, 946, 947, 948, 949, + 950, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 901, 901, 900, 901, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 901, 900, + 901, 901, 900, 951, 952, 953, 954, 955, + 900, 901, 899, 900, 901, 900, 901, 900, + 901, 900, 901, 900, 956, 957, 900, 901, + 900, 901, 900, 958, 959, 960, 961, 962, + 963, 964, 965, 966, 967, 968, 969, 970, + 971, 972, 900, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 900, 901, + 901, 900, 900, 900, 901, 901, 900, 901, + 900, 901, 901, 900, 900, 900, 901, 901, + 900, 901, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 901, 900, 901, 901, + 900, 973, 974, 959, 900, 901, 900, 901, + 901, 900, 975, 976, 977, 978, 979, 980, + 900, 981, 982, 983, 984, 985, 900, 901, + 900, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 1001, 1002, 999, 1003, + 1004, 1005, 1006, 1007, 900, 901, 901, 900, + 900, 901, 900, 900, 901, 901, 901, 900, + 901, 900, 901, 901, 900, 900, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 901, 900, 900, + 901, 901, 901, 900, 900, 900, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 900, + 901, 901, 900, 1008, 1009, 1010, 1011, 900, + 901, 900, 901, 900, 901, 900, 901, 900, + 1012, 900, 901, 900, 1013, 1014, 1015, 1016, + 1017, 1018, 900, 901, 901, 901, 900, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 901, 900, 1019, + 1020, 1021, 900, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 1022, 900, 1023, + 1024, 1025, 1027, 1026, 900, 901, 900, 900, + 901, 901, 951, 952, 1028, 953, 954, 955, + 900, 901, 900, 975, 976, 977, 978, 979, + 980, 1029, 900, 1030, 1031, 1032, 900, 1033, + 900, 1033, 900, 900, 1033, 1033, 900, 1033, + 1033, 900, 1033, 1033, 1033, 900, 1033, 900, + 1033, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 1033, 900, 900, 1033, 1033, 900, 1033, + 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040, + 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, + 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056, + 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900, + 1033, 1033, 1033, 1033, 900, 1033, 900, 1033, + 1033, 900, 901, 901, 900, 900, 901, 1033, + 1033, 900, 1033, 1033, 900, 1033, 900, 901, + 1033, 1033, 1033, 901, 901, 900, 1033, 1033, + 1033, 900, 900, 900, 1033, 900, 901, 901, + 1033, 1033, 901, 900, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 901, 900, + 900, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 901, 1033, 900, 901, 1033, 1033, 901, + 901, 1033, 1033, 900, 1033, 1033, 901, 900, + 1033, 1033, 1033, 901, 901, 901, 900, 1033, + 901, 1033, 900, 900, 900, 901, 900, 900, + 900, 1033, 1033, 1033, 901, 1033, 901, 900, + 1033, 1033, 901, 901, 901, 1033, 1033, 1033, + 900, 1033, 1033, 901, 901, 900, 900, 900, + 1033, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 1033, 1033, 901, 1033, 901, 901, 900, + 1033, 901, 1033, 900, 1033, 900, 1033, 901, + 1033, 1033, 900, 1033, 900, 1033, 1033, 1033, + 1033, 901, 900, 901, 1033, 900, 1033, 1033, + 1033, 1033, 900, 1033, 900, 1063, 1064, 1065, + 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, + 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, + 1082, 1083, 900, 901, 1033, 1033, 901, 1033, + 900, 901, 1033, 1033, 1033, 900, 1033, 901, + 1033, 1033, 1033, 900, 1033, 900, 1033, 1033, + 900, 1033, 1033, 900, 901, 1033, 901, 900, + 1033, 1033, 1033, 900, 901, 1033, 900, 1033, + 1033, 900, 1033, 1033, 901, 1033, 901, 901, + 1033, 900, 1033, 1033, 901, 900, 1033, 1033, + 1033, 1033, 901, 1033, 1033, 901, 1033, 900, + 1033, 900, 901, 901, 901, 1033, 1033, 901, + 900, 1033, 900, 1033, 900, 901, 901, 901, + 901, 1033, 1033, 901, 1033, 900, 901, 1033, + 1033, 901, 1033, 901, 900, 901, 1033, 901, + 1033, 900, 901, 1033, 1033, 1033, 1033, 901, + 1033, 900, 1033, 1033, 900, 1084, 1085, 1086, + 1087, 1088, 900, 1033, 899, 900, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 1089, 1090, + 900, 1033, 900, 1033, 900, 1091, 1092, 1093, + 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100, + 1101, 1102, 1103, 1104, 900, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 1033, 1033, 901, + 901, 1033, 900, 1033, 900, 1033, 900, 901, + 1033, 900, 1033, 901, 900, 901, 1033, 1033, + 1033, 901, 1033, 901, 900, 1033, 900, 901, + 1033, 901, 1033, 901, 1033, 900, 1033, 1033, + 901, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 901, 901, 1033, 1033, 901, 900, 1033, + 1033, 901, 1033, 901, 900, 1105, 1106, 1092, + 900, 1033, 900, 1033, 1033, 900, 1107, 1108, + 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115, + 1116, 1117, 1118, 900, 1033, 900, 1033, 900, + 1033, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124, + 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, + 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, + 1141, 900, 1033, 901, 1033, 900, 900, 1033, + 901, 900, 901, 901, 900, 1033, 901, 1033, + 1033, 900, 1033, 900, 901, 1033, 901, 1033, + 901, 900, 900, 1033, 900, 901, 1033, 1033, + 901, 1033, 901, 1033, 900, 1033, 901, 1033, + 900, 1033, 1033, 901, 1033, 901, 900, 1033, + 1033, 901, 901, 901, 901, 1033, 1033, 900, + 901, 1033, 900, 901, 901, 1033, 900, 1033, + 901, 1033, 901, 1033, 901, 1033, 900, 901, + 900, 1033, 1033, 901, 901, 1033, 901, 1033, + 900, 900, 900, 1033, 1033, 901, 1033, 901, + 1033, 900, 900, 1033, 901, 901, 1033, 901, + 1033, 900, 901, 1033, 901, 1033, 900, 901, + 901, 1033, 1033, 900, 901, 901, 901, 1033, + 1033, 900, 1142, 1143, 1010, 1144, 900, 1033, + 900, 1033, 900, 1033, 900, 1145, 900, 1033, + 900, 1146, 1147, 1148, 1149, 1150, 1151, 900, + 901, 901, 1033, 1033, 1033, 900, 900, 900, + 900, 1033, 1033, 900, 1033, 1033, 900, 900, + 900, 1033, 1033, 1033, 1033, 900, 1152, 1153, + 1154, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1155, 900, 901, 900, + 1156, 900, 1157, 1158, 1159, 1161, 1160, 900, + 1033, 900, 900, 1033, 1033, 901, 900, 901, + 900, 3, 265, 3, 1, 1162, 3, 1, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163, + 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172, + 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, + 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, + 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195, + 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184, + 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, + 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194, + 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194, + 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163, + 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194, + 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163, + 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244, + 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243, + 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163, + 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252, + 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253, + 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270, + 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277, + 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163, + 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301, + 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163, + 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194, + 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194, + 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194, + 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163, + 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194, + 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194, + 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163, + 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322, + 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194, + 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163, + 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194, + 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162, + 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194, + 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163, + 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194, + 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163, + 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162, + 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163, + 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162, + 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163, + 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163, + 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194, + 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162, + 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162, + 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328, + 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, + 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, + 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162, + 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163, + 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194, + 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162, + 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163, + 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194, + 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194, + 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194, + 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349, + 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353, + 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356, + 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194, + 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194, + 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162, + 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194, + 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162, + 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163, + 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162, + 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355, + 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378, + 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163, + 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387, + 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, + 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, + 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162, + 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162, + 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162, + 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162, + 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162, + 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162, + 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163, + 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162, + 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194, + 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162, + 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194, + 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194, + 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162, + 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162, + 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162, + 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163, + 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163, + 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163, + 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416, + 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163, + 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163, + 1162, 1163, 1163, 1162, 1162, 1162, 3, 1, + 3, 1162, 3, 1, 601, 1, 1425, 1427, + 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429, + 1430, 1431, 1432, 1427, 601, 1426, 890, 1, + 3, 610, 3, 1, 875, 875, 875, 877, + 1, 875, 875, 877, 875, 875, 877, 875, + 875, 875, 877, 875, 875, 877, 875, 875, + 877, 875, 875, 1, 877, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426, + 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428, + 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431, + 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434, + 1435, 1437, 1430, 1436, 1, 890, 875, 3, + 875, 877, 3, 877, 3, 1, 875, 1, + 265, 265, 1, 265, 1438, 1439, 601, 1, + 265, 3, 1, 3, 3, 265, 3, 1, + 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446, + 601, 1, 266, 3, 1, 3, 266, 3, + 1, 1447, 601, 1, 3, 265, 3, 1, + 1448, 601, 1, 3, 265, 3, 1, 1449, + 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, + 1458, 1459, 601, 1, 3, 1460, 1, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463, + 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473, + 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, + 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462, + 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461, + 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462, + 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461, + 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491, + 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, + 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, + 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462, + 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461, + 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513, + 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512, + 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461, + 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521, + 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, + 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522, + 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539, + 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546, + 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461, + 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, + 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570, + 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461, + 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462, + 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462, + 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462, + 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461, + 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462, + 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462, + 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461, + 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598, + 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, + 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613, + 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589, + 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589, + 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589, + 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589, + 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589, + 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589, + 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462, + 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589, + 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462, + 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461, + 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589, + 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461, + 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589, + 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589, + 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589, + 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589, + 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622, + 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, + 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, + 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461, + 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589, + 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461, + 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589, + 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589, + 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589, + 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589, + 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461, + 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462, + 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589, + 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643, + 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461, + 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650, + 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657, + 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462, + 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589, + 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589, + 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589, + 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589, + 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461, + 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665, + 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672, + 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681, + 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, + 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, + 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462, + 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589, + 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462, + 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462, + 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461, + 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589, + 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462, + 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462, + 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461, + 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461, + 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589, + 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462, + 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589, + 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461, + 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461, + 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462, + 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461, + 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461, + 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712, + 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589, + 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461, + 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461, + 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540, + 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541, + 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461, + 1162, 3, 1, 3, 1162, 3, 1162, 3, + 1, 1162, 1162, 3, 1162, 3, 1162, 3, + 1162, 3, 1162, 3, 1, 3, 3, 1162, + 1162, 3, 1, 1162, 1162, 3, 1, 1162, + 3, 1162, 3, 1, 3, 1162, 3, 1162, + 3, 1, 1162, 3, 1162, 3, 1, 1162, + 3, 1, 1162, 1162, 3, 3, 1162, 3, + 1162, 3, 1162, 1, 1440, 1, 1726, 1440, + 1, 1727, 1435, 1437, 1728, 1437, 601, 1436, + 1, 265, 3, 1, 3, 265, 1, 1, + 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738, + 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748, + 1729, 1, 1732, 1740, 1747, 1, 1731, 262, + 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756, + 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, + 1765, 1766, 1767, 1749, 262, 264, 1750, 1751, + 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, + 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767, + 1749, 256, 258, 1770, 1771, 1772, 1773, 1774, + 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, + 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790, + 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, + 1799, 1800, 1801, 1803, 268, 530, 576, 1802, + 1788, 527, 529, 1804, 1805, 1806, 1807, 1808, + 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819, + 1823, 1821, 1788, 521, 523, 1824, 1825, 1826, + 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, + 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817, + 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817, + 1818, 1848, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850, + 1819, 1820, 1821, 1788, 527, 529, 1804, 1805, + 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, + 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820, + 1821, 1788, 527, 529, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, + 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, + 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875, + 1854, 865, 867, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, + 1893, 1894, 1895, 1896, 1897, 1854, 871, 873, + 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, + 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874, + 1875, 1854, 871, 873, 1855, 1856, 1857, 1858, + 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902, + 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854, + 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910, + 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, + 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924, + 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928, + 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, + 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422, + 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950, + 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, + 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963, + 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, + 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, + 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966, + 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980, + 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723, + 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, + 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985, + 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, + 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982, + 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988, + 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996, + 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004, + 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, + 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, + 2021, 1982, +} + +var _graphclust_trans_targs []int16 = []int16{ + 1974, 0, 1974, 1975, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 66, 68, 70, + 71, 72, 1976, 69, 74, 75, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 93, 94, 96, + 102, 125, 130, 132, 139, 143, 97, 98, + 99, 100, 101, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, + 124, 126, 127, 128, 129, 131, 133, 134, + 135, 136, 137, 138, 140, 141, 142, 144, + 291, 292, 1977, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 210, 211, 212, + 213, 214, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 234, 235, 237, 243, 267, 271, + 273, 280, 284, 238, 239, 240, 241, 242, + 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 268, + 269, 270, 272, 274, 275, 276, 277, 278, + 279, 281, 282, 283, 285, 287, 288, 289, + 145, 290, 146, 294, 295, 296, 2, 297, + 3, 1974, 1978, 1974, 1979, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, + 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 342, 344, 345, 346, 347, 348, 349, 350, + 351, 352, 353, 354, 355, 356, 357, 358, + 359, 360, 361, 362, 363, 364, 366, 368, + 370, 371, 372, 1980, 369, 374, 375, 377, + 378, 379, 380, 381, 382, 383, 384, 385, + 386, 387, 388, 389, 390, 391, 393, 394, + 396, 402, 425, 430, 432, 439, 443, 397, + 398, 399, 400, 401, 403, 404, 405, 406, + 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 420, 421, 422, + 423, 424, 426, 427, 428, 429, 431, 433, + 434, 435, 436, 437, 438, 440, 441, 442, + 444, 591, 592, 1981, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 467, 468, 469, + 470, 471, 472, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 483, 484, 485, + 486, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, + 503, 504, 505, 506, 507, 508, 510, 511, + 512, 513, 514, 516, 517, 519, 520, 521, + 522, 523, 524, 525, 526, 527, 528, 529, + 530, 531, 532, 534, 535, 537, 543, 567, + 571, 573, 580, 584, 538, 539, 540, 541, + 542, 544, 545, 546, 547, 548, 549, 550, + 551, 552, 553, 554, 555, 556, 557, 558, + 559, 560, 561, 562, 563, 564, 565, 566, + 568, 569, 570, 572, 574, 575, 576, 577, + 578, 579, 581, 582, 583, 585, 587, 588, + 589, 445, 590, 446, 594, 595, 596, 302, + 597, 303, 599, 605, 606, 608, 610, 613, + 616, 640, 1982, 622, 1983, 612, 1984, 615, + 618, 620, 621, 624, 625, 629, 630, 631, + 632, 633, 634, 635, 1985, 628, 639, 642, + 643, 644, 645, 646, 649, 650, 651, 652, + 653, 654, 655, 656, 660, 661, 663, 664, + 647, 666, 669, 671, 673, 667, 668, 670, + 672, 674, 678, 679, 680, 681, 682, 683, + 684, 685, 686, 687, 1986, 676, 677, 690, + 691, 299, 695, 696, 698, 997, 1000, 1003, + 1027, 1974, 1987, 1974, 1988, 712, 713, 714, + 715, 716, 717, 718, 719, 720, 721, 722, + 723, 724, 725, 726, 727, 728, 729, 730, + 731, 732, 733, 734, 735, 736, 737, 738, + 739, 741, 742, 743, 744, 745, 746, 747, + 748, 749, 750, 751, 752, 753, 754, 755, + 756, 757, 758, 759, 760, 761, 763, 765, + 767, 768, 769, 1989, 766, 771, 772, 774, + 775, 776, 777, 778, 779, 780, 781, 782, + 783, 784, 785, 786, 787, 788, 790, 791, + 793, 799, 822, 827, 829, 836, 840, 794, + 795, 796, 797, 798, 800, 801, 802, 803, + 804, 805, 806, 807, 808, 809, 810, 811, + 812, 813, 814, 815, 816, 817, 818, 819, + 820, 821, 823, 824, 825, 826, 828, 830, + 831, 832, 833, 834, 835, 837, 838, 839, + 841, 988, 989, 1990, 855, 856, 857, 858, + 859, 860, 861, 862, 863, 864, 865, 866, + 867, 868, 869, 870, 871, 872, 873, 874, + 875, 876, 877, 878, 879, 880, 881, 882, + 883, 885, 886, 887, 888, 889, 890, 891, + 892, 893, 894, 895, 896, 897, 898, 899, + 900, 901, 902, 903, 904, 905, 907, 908, + 909, 910, 911, 913, 914, 916, 917, 918, + 919, 920, 921, 922, 923, 924, 925, 926, + 927, 928, 929, 931, 932, 934, 940, 964, + 968, 970, 977, 981, 935, 936, 937, 938, + 939, 941, 942, 943, 944, 945, 946, 947, + 948, 949, 950, 951, 952, 953, 954, 955, + 956, 957, 958, 959, 960, 961, 962, 963, + 965, 966, 967, 969, 971, 972, 973, 974, + 975, 976, 978, 979, 980, 982, 984, 985, + 986, 842, 987, 843, 991, 992, 993, 699, + 994, 700, 1009, 1991, 999, 1992, 1002, 1005, + 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019, + 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327, + 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044, + 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, + 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, + 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, + 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094, + 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104, + 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, + 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122, + 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125, + 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, + 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, + 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, + 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161, + 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171, + 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325, + 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197, + 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, + 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247, + 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257, + 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, + 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307, + 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278, + 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303, + 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313, + 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179, + 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345, + 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, + 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, + 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519, + 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380, + 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389, + 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, + 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, + 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416, + 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424, + 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, + 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469, + 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444, + 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453, + 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, + 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470, + 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480, + 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490, + 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524, + 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, + 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, + 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549, + 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559, + 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, + 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615, + 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580, + 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, + 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, + 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605, + 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616, + 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625, + 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636, + 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648, + 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654, + 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964, + 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672, + 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, + 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, + 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, + 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705, + 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, + 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, + 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731, + 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740, + 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, + 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796, + 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761, + 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, + 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, + 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, + 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797, + 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, + 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, + 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, + 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849, + 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, + 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, + 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876, + 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894, + 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896, + 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, + 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932, + 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942, + 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952, + 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972, + 1973, 1974, 1, 1975, 299, 300, 301, 692, + 693, 694, 697, 1028, 1628, 1629, 1638, 1639, + 1640, 1646, 1649, 1969, 1970, 1974, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, + 14, 43, 65, 73, 76, 92, 298, 293, + 67, 95, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 187, 209, 215, + 218, 233, 236, 286, 1974, 600, 601, 602, + 603, 604, 607, 641, 648, 657, 658, 659, + 662, 665, 688, 689, 304, 305, 306, 307, + 308, 309, 310, 311, 312, 313, 314, 343, + 365, 373, 376, 392, 598, 593, 367, 395, + 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 487, 509, 515, 518, 533, + 536, 586, 609, 623, 636, 637, 638, 611, + 619, 614, 617, 626, 627, 675, 1974, 701, + 702, 703, 704, 705, 706, 707, 708, 709, + 710, 711, 996, 762, 770, 1010, 1023, 1024, + 1025, 789, 995, 990, 740, 773, 764, 792, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 884, 906, 912, 915, 930, + 933, 983, 998, 1006, 1001, 1004, 1013, 1014, + 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038, + 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102, + 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183, + 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, + 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974, + 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, + 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568, + 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353, + 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417, + 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661, + 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, + 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956, + 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844, + 1866, 1872, 1875, 1890, 1893, 1943, +} + +var _graphclust_trans_actions []byte = []byte{ + 31, 0, 27, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 34, 40, 25, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 40, 0, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 29, 51, 17, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 51, 0, 51, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 40, 21, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 40, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 19, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 40, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 23, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 43, 1, 47, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 15, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 13, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _graphclust_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 37, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_eof_trans []int16 = []int16{ + 0, 0, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 0, 0, 0, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 0, 0, 0, 0, + 0, 0, 610, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 610, 612, 612, + 610, 612, 612, 610, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 610, 612, + 612, 612, 612, 0, 0, 0, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 0, + 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1750, + 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789, + 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855, + 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944, + 1983, 1983, 1983, 1983, +} + +const graphclust_start int = 1974 +const graphclust_first_final int = 1974 +const graphclust_error int = 0 + +const graphclust_en_main int = 1974 + + +// line 14 "grapheme_clusters.rl" + + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + +// line 4976 "grapheme_clusters.go" + { + cs = graphclust_start + ts = 0 + te = 0 + act = 0 + } + +// line 4984 "grapheme_clusters.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } +_resume: + _acts = int(_graphclust_from_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts - 1] { + case 4: +// line 1 "NONE" + +ts = p + +// line 5008 "grapheme_clusters.go" + } + } + + _keys = int(_graphclust_key_offsets[cs]) + _trans = int(_graphclust_index_offsets[cs]) + + _klen = int(_graphclust_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _graphclust_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_graphclust_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _graphclust_trans_keys[_mid + 1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + +_match: + _trans = int(_graphclust_indicies[_trans]) +_eof_trans: + cs = int(_graphclust_trans_targs[_trans]) + + if _graphclust_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_graphclust_trans_actions[_trans]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 0: +// line 46 "grapheme_clusters.rl" + + + startPos = p + + case 1: +// line 50 "grapheme_clusters.rl" + + + endPos = p + + case 5: +// line 1 "NONE" + +te = p+1 + + case 6: +// line 54 "grapheme_clusters.rl" + +act = 3; + case 7: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 8: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 9: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 10: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 11: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 12: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 13: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 14: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 15: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 16: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 17: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 18: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 19: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 20: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 21: +// line 1 "NONE" + + switch act { + case 0: + {cs = 0 +goto _again +} + case 3: + {p = (te) - 1 + + return endPos+1, data[startPos:endPos+1], nil + } + } + +// line 5218 "grapheme_clusters.go" + } + } + +_again: + _acts = int(_graphclust_to_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 2: +// line 1 "NONE" + +ts = 0 + + case 3: +// line 1 "NONE" + +act = 0 + +// line 5238 "grapheme_clusters.go" + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: {} + if p == eof { + if _graphclust_eof_trans[cs] > 0 { + _trans = int(_graphclust_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: {} + } + +// line 116 "grapheme_clusters.rl" + + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl new file mode 100644 index 0000000..003ffbf --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl @@ -0,0 +1,132 @@ +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT +%%{ + # (except you are actually in grapheme_clusters.rl here, so edit away!) + + machine graphclust; + write data; +}%% + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + %%{ + include GraphemeCluster "grapheme_clusters_table.rl"; + + action start { + startPos = p + } + + action end { + endPos = p + } + + action emit { + return endPos+1, data[startPos:endPos+1], nil + } + + ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?; + AnyExtender = Extend | ZWJGlue | SpacingMark; + Extension = AnyExtender*; + ReplacementChar = (0xEF 0xBF 0xBD); + + CRLFSeq = CR LF; + ControlSeq = Control | ReplacementChar; + HangulSeq = ( + L+ (((LV? V+ | LVT) T*)?|LV?) | + LV V* T* | + V+ T* | + LVT T* | + T+ + ) Extension; + EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension; + ZWJSeq = ZWJGlue Extension; + EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + + # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension + OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension; + + # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break + PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?; + + CRLFTok = CRLFSeq >start @end; + ControlTok = ControlSeq >start @end; + HangulTok = HangulSeq >start @end; + EmojiTok = EmojiSeq >start @end; + ZWJTok = ZWJSeq >start @end; + EmojiFlagTok = EmojiFlagSeq >start @end; + OtherTok = OtherSeq >start @end; + PrependTok = PrependSeq >start @end; + + main := |* + CRLFTok => emit; + ControlTok => emit; + HangulTok => emit; + EmojiTok => emit; + ZWJTok => emit; + EmojiFlagTok => emit; + PrependTok => emit; + OtherTok => emit; + + # any single valid UTF-8 character would also be valid per spec, + # but we'll handle that separately after the loop so we can deal + # with requesting more bytes if we're not at EOF. + *|; + + write init; + write exec; + }%% + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl new file mode 100644 index 0000000..fb45118 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl @@ -0,0 +1,1583 @@ +# The following Ragel file was autogenerated with unicode2ragel.rb +# from: http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt +# +# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "E_Base", "E_Modifier", "ZWJ", "Glue_After_Zwj", "E_Base_GAZ"]. +# +# To use this, make sure that your alphtype is set to byte, +# and that your input is in utf8. + +%%{ + machine GraphemeCluster; + + Prepend = + 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... + | 0xDB 0x9D #Cf ARABIC END OF AYAH + | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK + | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH + | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH + | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN + | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... + ; + + CR = + 0x0D #Cc + ; + + LF = + 0x0A #Cc + ; + + Control = + 0x00..0x09 #Cc [10] .. + | 0x0B..0x0C #Cc [2] .. + | 0x0E..0x1F #Cc [18] .. + | 0x7F #Cc [33] .. + | 0xC2 0x80..0x9F # + | 0xC2 0xAD #Cf SOFT HYPHEN + | 0xD8 0x9C #Cf ARABIC LETTER MARK + | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR + | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE + | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... + | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR + | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR + | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... + | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS + | 0xE2 0x81 0xA5 #Cn + | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... + | 0xED 0xA0 0x80..0xFF #Cs [2048] .... + | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... + | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... + | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... + | 0xF3 0xA0 0x80 0x80 #Cn + | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG + | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. + | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. + | 0xF3 0xA0 0x83 0x00..0xBF # + | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] .. 0 { + line = line[0:commentStart] + } + pieces := strings.Split(line, sep) + if len(pieces) != 2 { + log.Printf("unexpected %d pieces in %s", len(pieces), line) + return + } + + propertyName := strings.TrimSpace(pieces[1]) + + rangeTable, ok := propertyRanges[propertyName] + if !ok { + rangeTable = &unicode.RangeTable{ + LatinOffset: 0, + } + propertyRanges[propertyName] = rangeTable + } + + codepointRange := strings.TrimSpace(pieces[0]) + rngeIndex := strings.Index(codepointRange, rnge) + + if rngeIndex < 0 { + // single codepoint, not range + codepointInt, err := strconv.ParseUint(codepointRange, 16, 64) + if err != nil { + log.Printf("error parsing int: %v", err) + return + } + if codepointInt < 0x10000 { + r16 := unicode.Range16{ + Lo: uint16(codepointInt), + Hi: uint16(codepointInt), + Stride: 1, + } + addR16ToTable(rangeTable, r16) + } else { + r32 := unicode.Range32{ + Lo: uint32(codepointInt), + Hi: uint32(codepointInt), + Stride: 1, + } + addR32ToTable(rangeTable, r32) + } + } else { + rngeStart := codepointRange[0:rngeIndex] + rngeEnd := codepointRange[rngeIndex+2:] + rngeStartInt, err := strconv.ParseUint(rngeStart, 16, 64) + if err != nil { + log.Printf("error parsing int: %v", err) + return + } + rngeEndInt, err := strconv.ParseUint(rngeEnd, 16, 64) + if err != nil { + log.Printf("error parsing int: %v", err) + return + } + if rngeStartInt < 0x10000 && rngeEndInt < 0x10000 { + r16 := unicode.Range16{ + Lo: uint16(rngeStartInt), + Hi: uint16(rngeEndInt), + Stride: 1, + } + addR16ToTable(rangeTable, r16) + } else if rngeStartInt >= 0x10000 && rngeEndInt >= 0x10000 { + r32 := unicode.Range32{ + Lo: uint32(rngeStartInt), + Hi: uint32(rngeEndInt), + Stride: 1, + } + addR32ToTable(rangeTable, r32) + } else { + log.Printf("unexpected range") + } + } +} + +func addR16ToTable(r *unicode.RangeTable, r16 unicode.Range16) { + if r.R16 == nil { + r.R16 = make([]unicode.Range16, 0, 1) + } + r.R16 = append(r.R16, r16) + if r16.Hi <= unicode.MaxLatin1 { + r.LatinOffset++ + } +} + +func addR32ToTable(r *unicode.RangeTable, r32 unicode.Range32) { + if r.R32 == nil { + r.R32 = make([]unicode.Range32, 0, 1) + } + r.R32 = append(r.R32, r32) +} + +func generateTables(prefix string, propertyRanges map[string]*unicode.RangeTable) { + prNames := make([]string, 0, len(propertyRanges)) + for k := range propertyRanges { + prNames = append(prNames, k) + } + sort.Strings(prNames) + for _, key := range prNames { + rt := propertyRanges[key] + fmt.Fprintf(output, "var _%s%s = %s\n", prefix, key, generateRangeTable(rt)) + } + fmt.Fprintf(output, "type _%sRuneRange unicode.RangeTable\n", prefix) + + fmt.Fprintf(output, "func _%sRuneType(r rune) *_%sRuneRange {\n", prefix, prefix) + fmt.Fprintf(output, "\tswitch {\n") + for _, key := range prNames { + fmt.Fprintf(output, "\tcase unicode.Is(_%s%s, r):\n\t\treturn (*_%sRuneRange)(_%s%s)\n", prefix, key, prefix, prefix, key) + } + fmt.Fprintf(output, "\tdefault:\n\t\treturn nil\n") + fmt.Fprintf(output, "\t}\n") + fmt.Fprintf(output, "}\n") + + fmt.Fprintf(output, "func (rng *_%sRuneRange) String() string {\n", prefix) + fmt.Fprintf(output, "\tswitch (*unicode.RangeTable)(rng) {\n") + for _, key := range prNames { + fmt.Fprintf(output, "\tcase _%s%s:\n\t\treturn %q\n", prefix, key, key) + } + fmt.Fprintf(output, "\tdefault:\n\t\treturn \"Other\"\n") + fmt.Fprintf(output, "\t}\n") + fmt.Fprintf(output, "}\n") +} + +func generateRangeTable(rt *unicode.RangeTable) string { + rv := "&unicode.RangeTable{\n" + if rt.R16 != nil { + rv += "\tR16: []unicode.Range16{\n" + for _, r16 := range rt.R16 { + rv += fmt.Sprintf("\t\t%#v,\n", r16) + } + rv += "\t},\n" + } + if rt.R32 != nil { + rv += "\tR32: []unicode.Range32{\n" + for _, r32 := range rt.R32 { + rv += fmt.Sprintf("\t\t%#v,\n", r32) + } + rv += "\t},\n" + } + rv += fmt.Sprintf("\t\tLatinOffset: %d,\n", rt.LatinOffset) + rv += "}\n" + return rv +} + +const fileHeader = `// Generated by running +// maketables --url=%s +// DO NOT EDIT + +package textseg + +import( + "unicode" +) +` + +func setupOutput() { + output = bufio.NewWriter(startGofmt()) +} + +// startGofmt connects output to a gofmt process if -output is set. +func startGofmt() io.Writer { + if *outputFile == "" { + return os.Stdout + } + stdout, err := os.Create(*outputFile) + if err != nil { + log.Fatal(err) + } + // Pipe output to gofmt. + gofmt := exec.Command("gofmt") + fd, err := gofmt.StdinPipe() + if err != nil { + log.Fatal(err) + } + gofmt.Stdout = stdout + gofmt.Stderr = os.Stderr + err = gofmt.Start() + if err != nil { + log.Fatal(err) + } + return fd +} + +func flushOutput() { + err := output.Flush() + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go b/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go new file mode 100644 index 0000000..ac42002 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go @@ -0,0 +1,212 @@ +// Copyright (c) 2014 Couchbase, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file +// except in compliance with the License. You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed under the +// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +// either express or implied. See the License for the specific language governing permissions +// and limitations under the License. + +// +build ignore + +package main + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "strconv" + "strings" + "unicode" +) + +var url = flag.String("url", + "http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/", + "URL of Unicode database directory") +var verbose = flag.Bool("verbose", + false, + "write data to stdout as it is parsed") +var localFiles = flag.Bool("local", + false, + "data files have been copied to the current directory; for debugging only") + +var outputFile = flag.String("output", + "", + "output file for generated tables; default stdout") + +var output *bufio.Writer + +func main() { + flag.Parse() + setupOutput() + + graphemeTests := make([]test, 0) + graphemeTests = loadUnicodeData("GraphemeBreakTest.txt", graphemeTests) + wordTests := make([]test, 0) + wordTests = loadUnicodeData("WordBreakTest.txt", wordTests) + sentenceTests := make([]test, 0) + sentenceTests = loadUnicodeData("SentenceBreakTest.txt", sentenceTests) + + fmt.Fprintf(output, fileHeader, *url) + generateTestTables("Grapheme", graphemeTests) + generateTestTables("Word", wordTests) + generateTestTables("Sentence", sentenceTests) + + flushOutput() +} + +// WordBreakProperty.txt has the form: +// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD +// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ +func openReader(file string) (input io.ReadCloser) { + if *localFiles { + f, err := os.Open(file) + if err != nil { + log.Fatal(err) + } + input = f + } else { + path := *url + file + resp, err := http.Get(path) + if err != nil { + log.Fatal(err) + } + if resp.StatusCode != 200 { + log.Fatal("bad GET status for "+file, resp.Status) + } + input = resp.Body + } + return +} + +func loadUnicodeData(filename string, tests []test) []test { + f := openReader(filename) + defer f.Close() + bufioReader := bufio.NewReader(f) + line, err := bufioReader.ReadString('\n') + for err == nil { + tests = parseLine(line, tests) + line, err = bufioReader.ReadString('\n') + } + // if the err was EOF still need to process last value + if err == io.EOF { + tests = parseLine(line, tests) + } + return tests +} + +const comment = "#" +const brk = "÷" +const nbrk = "×" + +type test [][]byte + +func parseLine(line string, tests []test) []test { + if strings.HasPrefix(line, comment) { + return tests + } + line = strings.TrimSpace(line) + if len(line) == 0 { + return tests + } + commentStart := strings.Index(line, comment) + if commentStart > 0 { + line = line[0:commentStart] + } + pieces := strings.Split(line, brk) + t := make(test, 0) + for _, piece := range pieces { + piece = strings.TrimSpace(piece) + if len(piece) > 0 { + codePoints := strings.Split(piece, nbrk) + word := "" + for _, codePoint := range codePoints { + codePoint = strings.TrimSpace(codePoint) + r, err := strconv.ParseInt(codePoint, 16, 64) + if err != nil { + log.Printf("err: %v for '%s'", err, string(r)) + return tests + } + + word += string(r) + } + t = append(t, []byte(word)) + } + } + tests = append(tests, t) + return tests +} + +func generateTestTables(prefix string, tests []test) { + fmt.Fprintf(output, testHeader, prefix) + for _, t := range tests { + fmt.Fprintf(output, "\t\t{\n") + fmt.Fprintf(output, "\t\t\tinput: %#v,\n", bytes.Join(t, []byte{})) + fmt.Fprintf(output, "\t\t\toutput: %s,\n", generateTest(t)) + fmt.Fprintf(output, "\t\t},\n") + } + fmt.Fprintf(output, "}\n") +} + +func generateTest(t test) string { + rv := "[][]byte{" + for _, te := range t { + rv += fmt.Sprintf("%#v,", te) + } + rv += "}" + return rv +} + +const fileHeader = `// Generated by running +// maketesttables --url=%s +// DO NOT EDIT + +package textseg +` + +const testHeader = `var unicode%sTests = []struct { + input []byte + output [][]byte + }{ +` + +func setupOutput() { + output = bufio.NewWriter(startGofmt()) +} + +// startGofmt connects output to a gofmt process if -output is set. +func startGofmt() io.Writer { + if *outputFile == "" { + return os.Stdout + } + stdout, err := os.Create(*outputFile) + if err != nil { + log.Fatal(err) + } + // Pipe output to gofmt. + gofmt := exec.Command("gofmt") + fd, err := gofmt.StdinPipe() + if err != nil { + log.Fatal(err) + } + gofmt.Stdout = stdout + gofmt.Stderr = os.Stderr + err = gofmt.Start() + if err != nil { + log.Fatal(err) + } + return fd +} + +func flushOutput() { + err := output.Flush() + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go b/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go new file mode 100644 index 0000000..fab7e84 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go @@ -0,0 +1,5700 @@ +// Generated by running +// maketables --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/ +// DO NOT EDIT + +package textseg + +import ( + "unicode" +) + +var _GraphemeCR = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _GraphemeControl = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x0, Hi: 0x9, Stride: 0x1}, + unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1}, + unicode.Range16{Lo: 0xe, Hi: 0x1f, Stride: 0x1}, + unicode.Range16{Lo: 0x7f, Hi: 0x9f, Stride: 0x1}, + unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1}, + unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1}, + unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1}, + unicode.Range16{Lo: 0x200b, Hi: 0x200b, Stride: 0x1}, + unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1}, + unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1}, + unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1}, + unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1}, + unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1}, + unicode.Range16{Lo: 0x2065, Hi: 0x2065, Stride: 0x1}, + unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1}, + unicode.Range16{Lo: 0xd800, Hi: 0xdfff, Stride: 0x1}, + unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1}, + unicode.Range16{Lo: 0xfff0, Hi: 0xfff8, Stride: 0x1}, + unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0000, Hi: 0xe0000, Stride: 0x1}, + unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1}, + unicode.Range32{Lo: 0xe0002, Hi: 0xe001f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0080, Hi: 0xe00ff, Stride: 0x1}, + unicode.Range32{Lo: 0xe01f0, Hi: 0xe0fff, Stride: 0x1}, + }, + LatinOffset: 5, +} + +var _GraphemeE_Base = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x261d, Hi: 0x261d, Stride: 0x1}, + unicode.Range16{Lo: 0x26f9, Hi: 0x26f9, Stride: 0x1}, + unicode.Range16{Lo: 0x270a, Hi: 0x270d, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f385, Hi: 0x1f385, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3c3, Hi: 0x1f3c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3ca, Hi: 0x1f3cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1f442, Hi: 0x1f443, Stride: 0x1}, + unicode.Range32{Lo: 0x1f446, Hi: 0x1f450, Stride: 0x1}, + unicode.Range32{Lo: 0x1f46e, Hi: 0x1f46e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f470, Hi: 0x1f478, Stride: 0x1}, + unicode.Range32{Lo: 0x1f47c, Hi: 0x1f47c, Stride: 0x1}, + unicode.Range32{Lo: 0x1f481, Hi: 0x1f483, Stride: 0x1}, + unicode.Range32{Lo: 0x1f485, Hi: 0x1f487, Stride: 0x1}, + unicode.Range32{Lo: 0x1f4aa, Hi: 0x1f4aa, Stride: 0x1}, + unicode.Range32{Lo: 0x1f575, Hi: 0x1f575, Stride: 0x1}, + unicode.Range32{Lo: 0x1f57a, Hi: 0x1f57a, Stride: 0x1}, + unicode.Range32{Lo: 0x1f590, Hi: 0x1f590, Stride: 0x1}, + unicode.Range32{Lo: 0x1f595, Hi: 0x1f596, Stride: 0x1}, + unicode.Range32{Lo: 0x1f645, Hi: 0x1f647, Stride: 0x1}, + unicode.Range32{Lo: 0x1f64b, Hi: 0x1f64f, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6a3, Hi: 0x1f6a3, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6b4, Hi: 0x1f6b6, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6c0, Hi: 0x1f6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1f918, Hi: 0x1f91e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f926, Hi: 0x1f926, Stride: 0x1}, + unicode.Range32{Lo: 0x1f930, Hi: 0x1f930, Stride: 0x1}, + unicode.Range32{Lo: 0x1f933, Hi: 0x1f939, Stride: 0x1}, + unicode.Range32{Lo: 0x1f93c, Hi: 0x1f93e, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeE_Base_GAZ = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f466, Hi: 0x1f469, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeE_Modifier = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f3fb, Hi: 0x1f3ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeExtend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1}, + unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1}, + unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1}, + unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1}, + unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1}, + unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1}, + unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1}, + unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1}, + unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1}, + unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1}, + unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1}, + unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1}, + unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1}, + unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1}, + unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1}, + unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1}, + unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1}, + unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1}, + unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1}, + unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1}, + unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1}, + unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1}, + unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1}, + unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1}, + unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1}, + unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1}, + unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1}, + unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1}, + unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1}, + unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1}, + unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1}, + unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1}, + unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1}, + unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1}, + unicode.Range16{Lo: 0x9be, Hi: 0x9be, Stride: 0x1}, + unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1}, + unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1}, + unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1}, + unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1}, + unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1}, + unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1}, + unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1}, + unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1}, + unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1}, + unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1}, + unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1}, + unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1}, + unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1}, + unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1}, + unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1}, + unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1}, + unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1}, + unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1}, + unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1}, + unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1}, + unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1}, + unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1}, + unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1}, + unicode.Range16{Lo: 0xbbe, Hi: 0xbbe, Stride: 0x1}, + unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1}, + unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1}, + unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1}, + unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1}, + unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1}, + unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1}, + unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1}, + unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1}, + unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1}, + unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1}, + unicode.Range16{Lo: 0xcc2, Hi: 0xcc2, Stride: 0x1}, + unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1}, + unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1}, + unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1}, + unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1}, + unicode.Range16{Lo: 0xd3e, Hi: 0xd3e, Stride: 0x1}, + unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1}, + unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1}, + unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1}, + unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1}, + unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1}, + unicode.Range16{Lo: 0xdcf, Hi: 0xdcf, Stride: 0x1}, + unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1}, + unicode.Range16{Lo: 0xddf, Hi: 0xddf, Stride: 0x1}, + unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1}, + unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1}, + unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1}, + unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1}, + unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1}, + unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1}, + unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1}, + unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1}, + unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1}, + unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1}, + unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1}, + unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1}, + unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1}, + unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1}, + unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1}, + unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1}, + unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1}, + unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1}, + unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1}, + unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1}, + unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1}, + unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1}, + unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1}, + unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1}, + unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1}, + unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1}, + unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1}, + unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1}, + unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1}, + unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1}, + unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1}, + unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1}, + unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1}, + unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1}, + unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1}, + unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1}, + unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1}, + unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1}, + unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1}, + unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1}, + unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1}, + unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1}, + unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1}, + unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1}, + unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1}, + unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1}, + unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1}, + unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1}, + unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1}, + unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1}, + unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1}, + unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1}, + unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1}, + unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1}, + unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1}, + unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1}, + unicode.Range16{Lo: 0x200c, Hi: 0x200c, Stride: 0x1}, + unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1}, + unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1}, + unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1}, + unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1}, + unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1}, + unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1}, + unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1}, + unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1}, + unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1}, + unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1}, + unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1}, + unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1}, + unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1}, + unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1}, + unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1}, + unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1}, + unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1}, + unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1}, + unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1}, + unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1}, + unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1}, + unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1}, + unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1}, + unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1}, + unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1}, + unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1}, + unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1}, + unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1}, + unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1}, + unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1}, + unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1}, + unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1}, + unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1}, + unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1}, + unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1}, + unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1}, + unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1}, + unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1}, + unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1}, + unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1}, + unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1}, + unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1}, + unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1}, + unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1}, + unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1}, + unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1}, + unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1}, + unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1}, + unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1}, + unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1}, + unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1}, + unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1}, + unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1}, + unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1}, + unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1}, + unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1}, + unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1}, + unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1}, + unicode.Range32{Lo: 0x1133e, Hi: 0x1133e, Stride: 0x1}, + unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1}, + unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1}, + unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1}, + unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1}, + unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1}, + unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1}, + unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1}, + unicode.Range32{Lo: 0x114b0, Hi: 0x114b0, Stride: 0x1}, + unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1}, + unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1}, + unicode.Range32{Lo: 0x114bd, Hi: 0x114bd, Stride: 0x1}, + unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1}, + unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115af, Hi: 0x115af, Stride: 0x1}, + unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1}, + unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1}, + unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1}, + unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1}, + unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1}, + unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1}, + unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1}, + unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1}, + unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1}, + unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1}, + unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1}, + unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1}, + unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1}, + unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1}, + unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1}, + unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1}, + unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1}, + unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1}, + unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1}, + unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d165, Hi: 0x1d165, Stride: 0x1}, + unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16e, Hi: 0x1d172, Stride: 0x1}, + unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1}, + unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1}, + unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1}, + unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1}, + unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1}, + unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1}, + unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1}, + unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1}, + unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1}, + unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1}, + unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1}, + unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1}, + unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1}, + unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1}, + unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeGlue_After_Zwj = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2764, Hi: 0x2764, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f48b, Hi: 0x1f48b, Stride: 0x1}, + unicode.Range32{Lo: 0x1f5e8, Hi: 0x1f5e8, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeL = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x1100, Hi: 0x115f, Stride: 0x1}, + unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeLF = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _GraphemeLV = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xac00, Hi: 0xac00, Stride: 0x1}, + unicode.Range16{Lo: 0xac1c, Hi: 0xac1c, Stride: 0x1}, + unicode.Range16{Lo: 0xac38, Hi: 0xac38, Stride: 0x1}, + unicode.Range16{Lo: 0xac54, Hi: 0xac54, Stride: 0x1}, + unicode.Range16{Lo: 0xac70, Hi: 0xac70, Stride: 0x1}, + unicode.Range16{Lo: 0xac8c, Hi: 0xac8c, Stride: 0x1}, + unicode.Range16{Lo: 0xaca8, Hi: 0xaca8, Stride: 0x1}, + unicode.Range16{Lo: 0xacc4, Hi: 0xacc4, Stride: 0x1}, + unicode.Range16{Lo: 0xace0, Hi: 0xace0, Stride: 0x1}, + unicode.Range16{Lo: 0xacfc, Hi: 0xacfc, Stride: 0x1}, + unicode.Range16{Lo: 0xad18, Hi: 0xad18, Stride: 0x1}, + unicode.Range16{Lo: 0xad34, Hi: 0xad34, Stride: 0x1}, + unicode.Range16{Lo: 0xad50, Hi: 0xad50, Stride: 0x1}, + unicode.Range16{Lo: 0xad6c, Hi: 0xad6c, Stride: 0x1}, + unicode.Range16{Lo: 0xad88, Hi: 0xad88, Stride: 0x1}, + unicode.Range16{Lo: 0xada4, Hi: 0xada4, Stride: 0x1}, + unicode.Range16{Lo: 0xadc0, Hi: 0xadc0, Stride: 0x1}, + unicode.Range16{Lo: 0xaddc, Hi: 0xaddc, Stride: 0x1}, + unicode.Range16{Lo: 0xadf8, Hi: 0xadf8, Stride: 0x1}, + unicode.Range16{Lo: 0xae14, Hi: 0xae14, Stride: 0x1}, + unicode.Range16{Lo: 0xae30, Hi: 0xae30, Stride: 0x1}, + unicode.Range16{Lo: 0xae4c, Hi: 0xae4c, Stride: 0x1}, + unicode.Range16{Lo: 0xae68, Hi: 0xae68, Stride: 0x1}, + unicode.Range16{Lo: 0xae84, Hi: 0xae84, Stride: 0x1}, + unicode.Range16{Lo: 0xaea0, Hi: 0xaea0, Stride: 0x1}, + unicode.Range16{Lo: 0xaebc, Hi: 0xaebc, Stride: 0x1}, + unicode.Range16{Lo: 0xaed8, Hi: 0xaed8, Stride: 0x1}, + unicode.Range16{Lo: 0xaef4, Hi: 0xaef4, Stride: 0x1}, + unicode.Range16{Lo: 0xaf10, Hi: 0xaf10, Stride: 0x1}, + unicode.Range16{Lo: 0xaf2c, Hi: 0xaf2c, Stride: 0x1}, + unicode.Range16{Lo: 0xaf48, Hi: 0xaf48, Stride: 0x1}, + unicode.Range16{Lo: 0xaf64, Hi: 0xaf64, Stride: 0x1}, + unicode.Range16{Lo: 0xaf80, Hi: 0xaf80, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9c, Hi: 0xaf9c, Stride: 0x1}, + unicode.Range16{Lo: 0xafb8, Hi: 0xafb8, Stride: 0x1}, + unicode.Range16{Lo: 0xafd4, Hi: 0xafd4, Stride: 0x1}, + unicode.Range16{Lo: 0xaff0, Hi: 0xaff0, Stride: 0x1}, + unicode.Range16{Lo: 0xb00c, Hi: 0xb00c, Stride: 0x1}, + unicode.Range16{Lo: 0xb028, Hi: 0xb028, Stride: 0x1}, + unicode.Range16{Lo: 0xb044, Hi: 0xb044, Stride: 0x1}, + unicode.Range16{Lo: 0xb060, Hi: 0xb060, Stride: 0x1}, + unicode.Range16{Lo: 0xb07c, Hi: 0xb07c, Stride: 0x1}, + unicode.Range16{Lo: 0xb098, Hi: 0xb098, Stride: 0x1}, + unicode.Range16{Lo: 0xb0b4, Hi: 0xb0b4, Stride: 0x1}, + unicode.Range16{Lo: 0xb0d0, Hi: 0xb0d0, Stride: 0x1}, + unicode.Range16{Lo: 0xb0ec, Hi: 0xb0ec, Stride: 0x1}, + unicode.Range16{Lo: 0xb108, Hi: 0xb108, Stride: 0x1}, + unicode.Range16{Lo: 0xb124, Hi: 0xb124, Stride: 0x1}, + unicode.Range16{Lo: 0xb140, Hi: 0xb140, Stride: 0x1}, + unicode.Range16{Lo: 0xb15c, Hi: 0xb15c, Stride: 0x1}, + unicode.Range16{Lo: 0xb178, Hi: 0xb178, Stride: 0x1}, + unicode.Range16{Lo: 0xb194, Hi: 0xb194, Stride: 0x1}, + unicode.Range16{Lo: 0xb1b0, Hi: 0xb1b0, Stride: 0x1}, + unicode.Range16{Lo: 0xb1cc, Hi: 0xb1cc, Stride: 0x1}, + unicode.Range16{Lo: 0xb1e8, Hi: 0xb1e8, Stride: 0x1}, + unicode.Range16{Lo: 0xb204, Hi: 0xb204, Stride: 0x1}, + unicode.Range16{Lo: 0xb220, Hi: 0xb220, Stride: 0x1}, + unicode.Range16{Lo: 0xb23c, Hi: 0xb23c, Stride: 0x1}, + unicode.Range16{Lo: 0xb258, Hi: 0xb258, Stride: 0x1}, + unicode.Range16{Lo: 0xb274, Hi: 0xb274, Stride: 0x1}, + unicode.Range16{Lo: 0xb290, Hi: 0xb290, Stride: 0x1}, + unicode.Range16{Lo: 0xb2ac, Hi: 0xb2ac, Stride: 0x1}, + unicode.Range16{Lo: 0xb2c8, Hi: 0xb2c8, Stride: 0x1}, + unicode.Range16{Lo: 0xb2e4, Hi: 0xb2e4, Stride: 0x1}, + unicode.Range16{Lo: 0xb300, Hi: 0xb300, Stride: 0x1}, + unicode.Range16{Lo: 0xb31c, Hi: 0xb31c, Stride: 0x1}, + unicode.Range16{Lo: 0xb338, Hi: 0xb338, Stride: 0x1}, + unicode.Range16{Lo: 0xb354, Hi: 0xb354, Stride: 0x1}, + unicode.Range16{Lo: 0xb370, Hi: 0xb370, Stride: 0x1}, + unicode.Range16{Lo: 0xb38c, Hi: 0xb38c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3a8, Hi: 0xb3a8, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c4, Hi: 0xb3c4, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e0, Hi: 0xb3e0, Stride: 0x1}, + unicode.Range16{Lo: 0xb3fc, Hi: 0xb3fc, Stride: 0x1}, + unicode.Range16{Lo: 0xb418, Hi: 0xb418, Stride: 0x1}, + unicode.Range16{Lo: 0xb434, Hi: 0xb434, Stride: 0x1}, + unicode.Range16{Lo: 0xb450, Hi: 0xb450, Stride: 0x1}, + unicode.Range16{Lo: 0xb46c, Hi: 0xb46c, Stride: 0x1}, + unicode.Range16{Lo: 0xb488, Hi: 0xb488, Stride: 0x1}, + unicode.Range16{Lo: 0xb4a4, Hi: 0xb4a4, Stride: 0x1}, + unicode.Range16{Lo: 0xb4c0, Hi: 0xb4c0, Stride: 0x1}, + unicode.Range16{Lo: 0xb4dc, Hi: 0xb4dc, Stride: 0x1}, + unicode.Range16{Lo: 0xb4f8, Hi: 0xb4f8, Stride: 0x1}, + unicode.Range16{Lo: 0xb514, Hi: 0xb514, Stride: 0x1}, + unicode.Range16{Lo: 0xb530, Hi: 0xb530, Stride: 0x1}, + unicode.Range16{Lo: 0xb54c, Hi: 0xb54c, Stride: 0x1}, + unicode.Range16{Lo: 0xb568, Hi: 0xb568, Stride: 0x1}, + unicode.Range16{Lo: 0xb584, Hi: 0xb584, Stride: 0x1}, + unicode.Range16{Lo: 0xb5a0, Hi: 0xb5a0, Stride: 0x1}, + unicode.Range16{Lo: 0xb5bc, Hi: 0xb5bc, Stride: 0x1}, + unicode.Range16{Lo: 0xb5d8, Hi: 0xb5d8, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f4, Hi: 0xb5f4, Stride: 0x1}, + unicode.Range16{Lo: 0xb610, Hi: 0xb610, Stride: 0x1}, + unicode.Range16{Lo: 0xb62c, Hi: 0xb62c, Stride: 0x1}, + unicode.Range16{Lo: 0xb648, Hi: 0xb648, Stride: 0x1}, + unicode.Range16{Lo: 0xb664, Hi: 0xb664, Stride: 0x1}, + unicode.Range16{Lo: 0xb680, Hi: 0xb680, Stride: 0x1}, + unicode.Range16{Lo: 0xb69c, Hi: 0xb69c, Stride: 0x1}, + unicode.Range16{Lo: 0xb6b8, Hi: 0xb6b8, Stride: 0x1}, + unicode.Range16{Lo: 0xb6d4, Hi: 0xb6d4, Stride: 0x1}, + unicode.Range16{Lo: 0xb6f0, Hi: 0xb6f0, Stride: 0x1}, + unicode.Range16{Lo: 0xb70c, Hi: 0xb70c, Stride: 0x1}, + unicode.Range16{Lo: 0xb728, Hi: 0xb728, Stride: 0x1}, + unicode.Range16{Lo: 0xb744, Hi: 0xb744, Stride: 0x1}, + unicode.Range16{Lo: 0xb760, Hi: 0xb760, Stride: 0x1}, + unicode.Range16{Lo: 0xb77c, Hi: 0xb77c, Stride: 0x1}, + unicode.Range16{Lo: 0xb798, Hi: 0xb798, Stride: 0x1}, + unicode.Range16{Lo: 0xb7b4, Hi: 0xb7b4, Stride: 0x1}, + unicode.Range16{Lo: 0xb7d0, Hi: 0xb7d0, Stride: 0x1}, + unicode.Range16{Lo: 0xb7ec, Hi: 0xb7ec, Stride: 0x1}, + unicode.Range16{Lo: 0xb808, Hi: 0xb808, Stride: 0x1}, + unicode.Range16{Lo: 0xb824, Hi: 0xb824, Stride: 0x1}, + unicode.Range16{Lo: 0xb840, Hi: 0xb840, Stride: 0x1}, + unicode.Range16{Lo: 0xb85c, Hi: 0xb85c, Stride: 0x1}, + unicode.Range16{Lo: 0xb878, Hi: 0xb878, Stride: 0x1}, + unicode.Range16{Lo: 0xb894, Hi: 0xb894, Stride: 0x1}, + unicode.Range16{Lo: 0xb8b0, Hi: 0xb8b0, Stride: 0x1}, + unicode.Range16{Lo: 0xb8cc, Hi: 0xb8cc, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e8, Hi: 0xb8e8, Stride: 0x1}, + unicode.Range16{Lo: 0xb904, Hi: 0xb904, Stride: 0x1}, + unicode.Range16{Lo: 0xb920, Hi: 0xb920, Stride: 0x1}, + unicode.Range16{Lo: 0xb93c, Hi: 0xb93c, Stride: 0x1}, + unicode.Range16{Lo: 0xb958, Hi: 0xb958, Stride: 0x1}, + unicode.Range16{Lo: 0xb974, Hi: 0xb974, Stride: 0x1}, + unicode.Range16{Lo: 0xb990, Hi: 0xb990, Stride: 0x1}, + unicode.Range16{Lo: 0xb9ac, Hi: 0xb9ac, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c8, Hi: 0xb9c8, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e4, Hi: 0xb9e4, Stride: 0x1}, + unicode.Range16{Lo: 0xba00, Hi: 0xba00, Stride: 0x1}, + unicode.Range16{Lo: 0xba1c, Hi: 0xba1c, Stride: 0x1}, + unicode.Range16{Lo: 0xba38, Hi: 0xba38, Stride: 0x1}, + unicode.Range16{Lo: 0xba54, Hi: 0xba54, Stride: 0x1}, + unicode.Range16{Lo: 0xba70, Hi: 0xba70, Stride: 0x1}, + unicode.Range16{Lo: 0xba8c, Hi: 0xba8c, Stride: 0x1}, + unicode.Range16{Lo: 0xbaa8, Hi: 0xbaa8, Stride: 0x1}, + unicode.Range16{Lo: 0xbac4, Hi: 0xbac4, Stride: 0x1}, + unicode.Range16{Lo: 0xbae0, Hi: 0xbae0, Stride: 0x1}, + unicode.Range16{Lo: 0xbafc, Hi: 0xbafc, Stride: 0x1}, + unicode.Range16{Lo: 0xbb18, Hi: 0xbb18, Stride: 0x1}, + unicode.Range16{Lo: 0xbb34, Hi: 0xbb34, Stride: 0x1}, + unicode.Range16{Lo: 0xbb50, Hi: 0xbb50, Stride: 0x1}, + unicode.Range16{Lo: 0xbb6c, Hi: 0xbb6c, Stride: 0x1}, + unicode.Range16{Lo: 0xbb88, Hi: 0xbb88, Stride: 0x1}, + unicode.Range16{Lo: 0xbba4, Hi: 0xbba4, Stride: 0x1}, + unicode.Range16{Lo: 0xbbc0, Hi: 0xbbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbbdc, Hi: 0xbbdc, Stride: 0x1}, + unicode.Range16{Lo: 0xbbf8, Hi: 0xbbf8, Stride: 0x1}, + unicode.Range16{Lo: 0xbc14, Hi: 0xbc14, Stride: 0x1}, + unicode.Range16{Lo: 0xbc30, Hi: 0xbc30, Stride: 0x1}, + unicode.Range16{Lo: 0xbc4c, Hi: 0xbc4c, Stride: 0x1}, + unicode.Range16{Lo: 0xbc68, Hi: 0xbc68, Stride: 0x1}, + unicode.Range16{Lo: 0xbc84, Hi: 0xbc84, Stride: 0x1}, + unicode.Range16{Lo: 0xbca0, Hi: 0xbca0, Stride: 0x1}, + unicode.Range16{Lo: 0xbcbc, Hi: 0xbcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd8, Hi: 0xbcd8, Stride: 0x1}, + unicode.Range16{Lo: 0xbcf4, Hi: 0xbcf4, Stride: 0x1}, + unicode.Range16{Lo: 0xbd10, Hi: 0xbd10, Stride: 0x1}, + unicode.Range16{Lo: 0xbd2c, Hi: 0xbd2c, Stride: 0x1}, + unicode.Range16{Lo: 0xbd48, Hi: 0xbd48, Stride: 0x1}, + unicode.Range16{Lo: 0xbd64, Hi: 0xbd64, Stride: 0x1}, + unicode.Range16{Lo: 0xbd80, Hi: 0xbd80, Stride: 0x1}, + unicode.Range16{Lo: 0xbd9c, Hi: 0xbd9c, Stride: 0x1}, + unicode.Range16{Lo: 0xbdb8, Hi: 0xbdb8, Stride: 0x1}, + unicode.Range16{Lo: 0xbdd4, Hi: 0xbdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xbdf0, Hi: 0xbdf0, Stride: 0x1}, + unicode.Range16{Lo: 0xbe0c, Hi: 0xbe0c, Stride: 0x1}, + unicode.Range16{Lo: 0xbe28, Hi: 0xbe28, Stride: 0x1}, + unicode.Range16{Lo: 0xbe44, Hi: 0xbe44, Stride: 0x1}, + unicode.Range16{Lo: 0xbe60, Hi: 0xbe60, Stride: 0x1}, + unicode.Range16{Lo: 0xbe7c, Hi: 0xbe7c, Stride: 0x1}, + unicode.Range16{Lo: 0xbe98, Hi: 0xbe98, Stride: 0x1}, + unicode.Range16{Lo: 0xbeb4, Hi: 0xbeb4, Stride: 0x1}, + unicode.Range16{Lo: 0xbed0, Hi: 0xbed0, Stride: 0x1}, + unicode.Range16{Lo: 0xbeec, Hi: 0xbeec, Stride: 0x1}, + unicode.Range16{Lo: 0xbf08, Hi: 0xbf08, Stride: 0x1}, + unicode.Range16{Lo: 0xbf24, Hi: 0xbf24, Stride: 0x1}, + unicode.Range16{Lo: 0xbf40, Hi: 0xbf40, Stride: 0x1}, + unicode.Range16{Lo: 0xbf5c, Hi: 0xbf5c, Stride: 0x1}, + unicode.Range16{Lo: 0xbf78, Hi: 0xbf78, Stride: 0x1}, + unicode.Range16{Lo: 0xbf94, Hi: 0xbf94, Stride: 0x1}, + unicode.Range16{Lo: 0xbfb0, Hi: 0xbfb0, Stride: 0x1}, + unicode.Range16{Lo: 0xbfcc, Hi: 0xbfcc, Stride: 0x1}, + unicode.Range16{Lo: 0xbfe8, Hi: 0xbfe8, Stride: 0x1}, + unicode.Range16{Lo: 0xc004, Hi: 0xc004, Stride: 0x1}, + unicode.Range16{Lo: 0xc020, Hi: 0xc020, Stride: 0x1}, + unicode.Range16{Lo: 0xc03c, Hi: 0xc03c, Stride: 0x1}, + unicode.Range16{Lo: 0xc058, Hi: 0xc058, Stride: 0x1}, + unicode.Range16{Lo: 0xc074, Hi: 0xc074, Stride: 0x1}, + unicode.Range16{Lo: 0xc090, Hi: 0xc090, Stride: 0x1}, + unicode.Range16{Lo: 0xc0ac, Hi: 0xc0ac, Stride: 0x1}, + unicode.Range16{Lo: 0xc0c8, Hi: 0xc0c8, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e4, Hi: 0xc0e4, Stride: 0x1}, + unicode.Range16{Lo: 0xc100, Hi: 0xc100, Stride: 0x1}, + unicode.Range16{Lo: 0xc11c, Hi: 0xc11c, Stride: 0x1}, + unicode.Range16{Lo: 0xc138, Hi: 0xc138, Stride: 0x1}, + unicode.Range16{Lo: 0xc154, Hi: 0xc154, Stride: 0x1}, + unicode.Range16{Lo: 0xc170, Hi: 0xc170, Stride: 0x1}, + unicode.Range16{Lo: 0xc18c, Hi: 0xc18c, Stride: 0x1}, + unicode.Range16{Lo: 0xc1a8, Hi: 0xc1a8, Stride: 0x1}, + unicode.Range16{Lo: 0xc1c4, Hi: 0xc1c4, Stride: 0x1}, + unicode.Range16{Lo: 0xc1e0, Hi: 0xc1e0, Stride: 0x1}, + unicode.Range16{Lo: 0xc1fc, Hi: 0xc1fc, Stride: 0x1}, + unicode.Range16{Lo: 0xc218, Hi: 0xc218, Stride: 0x1}, + unicode.Range16{Lo: 0xc234, Hi: 0xc234, Stride: 0x1}, + unicode.Range16{Lo: 0xc250, Hi: 0xc250, Stride: 0x1}, + unicode.Range16{Lo: 0xc26c, Hi: 0xc26c, Stride: 0x1}, + unicode.Range16{Lo: 0xc288, Hi: 0xc288, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a4, Hi: 0xc2a4, Stride: 0x1}, + unicode.Range16{Lo: 0xc2c0, Hi: 0xc2c0, Stride: 0x1}, + unicode.Range16{Lo: 0xc2dc, Hi: 0xc2dc, Stride: 0x1}, + unicode.Range16{Lo: 0xc2f8, Hi: 0xc2f8, Stride: 0x1}, + unicode.Range16{Lo: 0xc314, Hi: 0xc314, Stride: 0x1}, + unicode.Range16{Lo: 0xc330, Hi: 0xc330, Stride: 0x1}, + unicode.Range16{Lo: 0xc34c, Hi: 0xc34c, Stride: 0x1}, + unicode.Range16{Lo: 0xc368, Hi: 0xc368, Stride: 0x1}, + unicode.Range16{Lo: 0xc384, Hi: 0xc384, Stride: 0x1}, + unicode.Range16{Lo: 0xc3a0, Hi: 0xc3a0, Stride: 0x1}, + unicode.Range16{Lo: 0xc3bc, Hi: 0xc3bc, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d8, Hi: 0xc3d8, Stride: 0x1}, + unicode.Range16{Lo: 0xc3f4, Hi: 0xc3f4, Stride: 0x1}, + unicode.Range16{Lo: 0xc410, Hi: 0xc410, Stride: 0x1}, + unicode.Range16{Lo: 0xc42c, Hi: 0xc42c, Stride: 0x1}, + unicode.Range16{Lo: 0xc448, Hi: 0xc448, Stride: 0x1}, + unicode.Range16{Lo: 0xc464, Hi: 0xc464, Stride: 0x1}, + unicode.Range16{Lo: 0xc480, Hi: 0xc480, Stride: 0x1}, + unicode.Range16{Lo: 0xc49c, Hi: 0xc49c, Stride: 0x1}, + unicode.Range16{Lo: 0xc4b8, Hi: 0xc4b8, Stride: 0x1}, + unicode.Range16{Lo: 0xc4d4, Hi: 0xc4d4, Stride: 0x1}, + unicode.Range16{Lo: 0xc4f0, Hi: 0xc4f0, Stride: 0x1}, + unicode.Range16{Lo: 0xc50c, Hi: 0xc50c, Stride: 0x1}, + unicode.Range16{Lo: 0xc528, Hi: 0xc528, Stride: 0x1}, + unicode.Range16{Lo: 0xc544, Hi: 0xc544, Stride: 0x1}, + unicode.Range16{Lo: 0xc560, Hi: 0xc560, Stride: 0x1}, + unicode.Range16{Lo: 0xc57c, Hi: 0xc57c, Stride: 0x1}, + unicode.Range16{Lo: 0xc598, Hi: 0xc598, Stride: 0x1}, + unicode.Range16{Lo: 0xc5b4, Hi: 0xc5b4, Stride: 0x1}, + unicode.Range16{Lo: 0xc5d0, Hi: 0xc5d0, Stride: 0x1}, + unicode.Range16{Lo: 0xc5ec, Hi: 0xc5ec, Stride: 0x1}, + unicode.Range16{Lo: 0xc608, Hi: 0xc608, Stride: 0x1}, + unicode.Range16{Lo: 0xc624, Hi: 0xc624, Stride: 0x1}, + unicode.Range16{Lo: 0xc640, Hi: 0xc640, Stride: 0x1}, + unicode.Range16{Lo: 0xc65c, Hi: 0xc65c, Stride: 0x1}, + unicode.Range16{Lo: 0xc678, Hi: 0xc678, Stride: 0x1}, + unicode.Range16{Lo: 0xc694, Hi: 0xc694, Stride: 0x1}, + unicode.Range16{Lo: 0xc6b0, Hi: 0xc6b0, Stride: 0x1}, + unicode.Range16{Lo: 0xc6cc, Hi: 0xc6cc, Stride: 0x1}, + unicode.Range16{Lo: 0xc6e8, Hi: 0xc6e8, Stride: 0x1}, + unicode.Range16{Lo: 0xc704, Hi: 0xc704, Stride: 0x1}, + unicode.Range16{Lo: 0xc720, Hi: 0xc720, Stride: 0x1}, + unicode.Range16{Lo: 0xc73c, Hi: 0xc73c, Stride: 0x1}, + unicode.Range16{Lo: 0xc758, Hi: 0xc758, Stride: 0x1}, + unicode.Range16{Lo: 0xc774, Hi: 0xc774, Stride: 0x1}, + unicode.Range16{Lo: 0xc790, Hi: 0xc790, Stride: 0x1}, + unicode.Range16{Lo: 0xc7ac, Hi: 0xc7ac, Stride: 0x1}, + unicode.Range16{Lo: 0xc7c8, Hi: 0xc7c8, Stride: 0x1}, + unicode.Range16{Lo: 0xc7e4, Hi: 0xc7e4, Stride: 0x1}, + unicode.Range16{Lo: 0xc800, Hi: 0xc800, Stride: 0x1}, + unicode.Range16{Lo: 0xc81c, Hi: 0xc81c, Stride: 0x1}, + unicode.Range16{Lo: 0xc838, Hi: 0xc838, Stride: 0x1}, + unicode.Range16{Lo: 0xc854, Hi: 0xc854, Stride: 0x1}, + unicode.Range16{Lo: 0xc870, Hi: 0xc870, Stride: 0x1}, + unicode.Range16{Lo: 0xc88c, Hi: 0xc88c, Stride: 0x1}, + unicode.Range16{Lo: 0xc8a8, Hi: 0xc8a8, Stride: 0x1}, + unicode.Range16{Lo: 0xc8c4, Hi: 0xc8c4, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e0, Hi: 0xc8e0, Stride: 0x1}, + unicode.Range16{Lo: 0xc8fc, Hi: 0xc8fc, Stride: 0x1}, + unicode.Range16{Lo: 0xc918, Hi: 0xc918, Stride: 0x1}, + unicode.Range16{Lo: 0xc934, Hi: 0xc934, Stride: 0x1}, + unicode.Range16{Lo: 0xc950, Hi: 0xc950, Stride: 0x1}, + unicode.Range16{Lo: 0xc96c, Hi: 0xc96c, Stride: 0x1}, + unicode.Range16{Lo: 0xc988, Hi: 0xc988, Stride: 0x1}, + unicode.Range16{Lo: 0xc9a4, Hi: 0xc9a4, Stride: 0x1}, + unicode.Range16{Lo: 0xc9c0, Hi: 0xc9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xc9dc, Hi: 0xc9dc, Stride: 0x1}, + unicode.Range16{Lo: 0xc9f8, Hi: 0xc9f8, Stride: 0x1}, + unicode.Range16{Lo: 0xca14, Hi: 0xca14, Stride: 0x1}, + unicode.Range16{Lo: 0xca30, Hi: 0xca30, Stride: 0x1}, + unicode.Range16{Lo: 0xca4c, Hi: 0xca4c, Stride: 0x1}, + unicode.Range16{Lo: 0xca68, Hi: 0xca68, Stride: 0x1}, + unicode.Range16{Lo: 0xca84, Hi: 0xca84, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa0, Hi: 0xcaa0, Stride: 0x1}, + unicode.Range16{Lo: 0xcabc, Hi: 0xcabc, Stride: 0x1}, + unicode.Range16{Lo: 0xcad8, Hi: 0xcad8, Stride: 0x1}, + unicode.Range16{Lo: 0xcaf4, Hi: 0xcaf4, Stride: 0x1}, + unicode.Range16{Lo: 0xcb10, Hi: 0xcb10, Stride: 0x1}, + unicode.Range16{Lo: 0xcb2c, Hi: 0xcb2c, Stride: 0x1}, + unicode.Range16{Lo: 0xcb48, Hi: 0xcb48, Stride: 0x1}, + unicode.Range16{Lo: 0xcb64, Hi: 0xcb64, Stride: 0x1}, + unicode.Range16{Lo: 0xcb80, Hi: 0xcb80, Stride: 0x1}, + unicode.Range16{Lo: 0xcb9c, Hi: 0xcb9c, Stride: 0x1}, + unicode.Range16{Lo: 0xcbb8, Hi: 0xcbb8, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd4, Hi: 0xcbd4, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf0, Hi: 0xcbf0, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0c, Hi: 0xcc0c, Stride: 0x1}, + unicode.Range16{Lo: 0xcc28, Hi: 0xcc28, Stride: 0x1}, + unicode.Range16{Lo: 0xcc44, Hi: 0xcc44, Stride: 0x1}, + unicode.Range16{Lo: 0xcc60, Hi: 0xcc60, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7c, Hi: 0xcc7c, Stride: 0x1}, + unicode.Range16{Lo: 0xcc98, Hi: 0xcc98, Stride: 0x1}, + unicode.Range16{Lo: 0xccb4, Hi: 0xccb4, Stride: 0x1}, + unicode.Range16{Lo: 0xccd0, Hi: 0xccd0, Stride: 0x1}, + unicode.Range16{Lo: 0xccec, Hi: 0xccec, Stride: 0x1}, + unicode.Range16{Lo: 0xcd08, Hi: 0xcd08, Stride: 0x1}, + unicode.Range16{Lo: 0xcd24, Hi: 0xcd24, Stride: 0x1}, + unicode.Range16{Lo: 0xcd40, Hi: 0xcd40, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5c, Hi: 0xcd5c, Stride: 0x1}, + unicode.Range16{Lo: 0xcd78, Hi: 0xcd78, Stride: 0x1}, + unicode.Range16{Lo: 0xcd94, Hi: 0xcd94, Stride: 0x1}, + unicode.Range16{Lo: 0xcdb0, Hi: 0xcdb0, Stride: 0x1}, + unicode.Range16{Lo: 0xcdcc, Hi: 0xcdcc, Stride: 0x1}, + unicode.Range16{Lo: 0xcde8, Hi: 0xcde8, Stride: 0x1}, + unicode.Range16{Lo: 0xce04, Hi: 0xce04, Stride: 0x1}, + unicode.Range16{Lo: 0xce20, Hi: 0xce20, Stride: 0x1}, + unicode.Range16{Lo: 0xce3c, Hi: 0xce3c, Stride: 0x1}, + unicode.Range16{Lo: 0xce58, Hi: 0xce58, Stride: 0x1}, + unicode.Range16{Lo: 0xce74, Hi: 0xce74, Stride: 0x1}, + unicode.Range16{Lo: 0xce90, Hi: 0xce90, Stride: 0x1}, + unicode.Range16{Lo: 0xceac, Hi: 0xceac, Stride: 0x1}, + unicode.Range16{Lo: 0xcec8, Hi: 0xcec8, Stride: 0x1}, + unicode.Range16{Lo: 0xcee4, Hi: 0xcee4, Stride: 0x1}, + unicode.Range16{Lo: 0xcf00, Hi: 0xcf00, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1c, Hi: 0xcf1c, Stride: 0x1}, + unicode.Range16{Lo: 0xcf38, Hi: 0xcf38, Stride: 0x1}, + unicode.Range16{Lo: 0xcf54, Hi: 0xcf54, Stride: 0x1}, + unicode.Range16{Lo: 0xcf70, Hi: 0xcf70, Stride: 0x1}, + unicode.Range16{Lo: 0xcf8c, Hi: 0xcf8c, Stride: 0x1}, + unicode.Range16{Lo: 0xcfa8, Hi: 0xcfa8, Stride: 0x1}, + unicode.Range16{Lo: 0xcfc4, Hi: 0xcfc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcfe0, Hi: 0xcfe0, Stride: 0x1}, + unicode.Range16{Lo: 0xcffc, Hi: 0xcffc, Stride: 0x1}, + unicode.Range16{Lo: 0xd018, Hi: 0xd018, Stride: 0x1}, + unicode.Range16{Lo: 0xd034, Hi: 0xd034, Stride: 0x1}, + unicode.Range16{Lo: 0xd050, Hi: 0xd050, Stride: 0x1}, + unicode.Range16{Lo: 0xd06c, Hi: 0xd06c, Stride: 0x1}, + unicode.Range16{Lo: 0xd088, Hi: 0xd088, Stride: 0x1}, + unicode.Range16{Lo: 0xd0a4, Hi: 0xd0a4, Stride: 0x1}, + unicode.Range16{Lo: 0xd0c0, Hi: 0xd0c0, Stride: 0x1}, + unicode.Range16{Lo: 0xd0dc, Hi: 0xd0dc, Stride: 0x1}, + unicode.Range16{Lo: 0xd0f8, Hi: 0xd0f8, Stride: 0x1}, + unicode.Range16{Lo: 0xd114, Hi: 0xd114, Stride: 0x1}, + unicode.Range16{Lo: 0xd130, Hi: 0xd130, Stride: 0x1}, + unicode.Range16{Lo: 0xd14c, Hi: 0xd14c, Stride: 0x1}, + unicode.Range16{Lo: 0xd168, Hi: 0xd168, Stride: 0x1}, + unicode.Range16{Lo: 0xd184, Hi: 0xd184, Stride: 0x1}, + unicode.Range16{Lo: 0xd1a0, Hi: 0xd1a0, Stride: 0x1}, + unicode.Range16{Lo: 0xd1bc, Hi: 0xd1bc, Stride: 0x1}, + unicode.Range16{Lo: 0xd1d8, Hi: 0xd1d8, Stride: 0x1}, + unicode.Range16{Lo: 0xd1f4, Hi: 0xd1f4, Stride: 0x1}, + unicode.Range16{Lo: 0xd210, Hi: 0xd210, Stride: 0x1}, + unicode.Range16{Lo: 0xd22c, Hi: 0xd22c, Stride: 0x1}, + unicode.Range16{Lo: 0xd248, Hi: 0xd248, Stride: 0x1}, + unicode.Range16{Lo: 0xd264, Hi: 0xd264, Stride: 0x1}, + unicode.Range16{Lo: 0xd280, Hi: 0xd280, Stride: 0x1}, + unicode.Range16{Lo: 0xd29c, Hi: 0xd29c, Stride: 0x1}, + unicode.Range16{Lo: 0xd2b8, Hi: 0xd2b8, Stride: 0x1}, + unicode.Range16{Lo: 0xd2d4, Hi: 0xd2d4, Stride: 0x1}, + unicode.Range16{Lo: 0xd2f0, Hi: 0xd2f0, Stride: 0x1}, + unicode.Range16{Lo: 0xd30c, Hi: 0xd30c, Stride: 0x1}, + unicode.Range16{Lo: 0xd328, Hi: 0xd328, Stride: 0x1}, + unicode.Range16{Lo: 0xd344, Hi: 0xd344, Stride: 0x1}, + unicode.Range16{Lo: 0xd360, Hi: 0xd360, Stride: 0x1}, + unicode.Range16{Lo: 0xd37c, Hi: 0xd37c, Stride: 0x1}, + unicode.Range16{Lo: 0xd398, Hi: 0xd398, Stride: 0x1}, + unicode.Range16{Lo: 0xd3b4, Hi: 0xd3b4, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d0, Hi: 0xd3d0, Stride: 0x1}, + unicode.Range16{Lo: 0xd3ec, Hi: 0xd3ec, Stride: 0x1}, + unicode.Range16{Lo: 0xd408, Hi: 0xd408, Stride: 0x1}, + unicode.Range16{Lo: 0xd424, Hi: 0xd424, Stride: 0x1}, + unicode.Range16{Lo: 0xd440, Hi: 0xd440, Stride: 0x1}, + unicode.Range16{Lo: 0xd45c, Hi: 0xd45c, Stride: 0x1}, + unicode.Range16{Lo: 0xd478, Hi: 0xd478, Stride: 0x1}, + unicode.Range16{Lo: 0xd494, Hi: 0xd494, Stride: 0x1}, + unicode.Range16{Lo: 0xd4b0, Hi: 0xd4b0, Stride: 0x1}, + unicode.Range16{Lo: 0xd4cc, Hi: 0xd4cc, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e8, Hi: 0xd4e8, Stride: 0x1}, + unicode.Range16{Lo: 0xd504, Hi: 0xd504, Stride: 0x1}, + unicode.Range16{Lo: 0xd520, Hi: 0xd520, Stride: 0x1}, + unicode.Range16{Lo: 0xd53c, Hi: 0xd53c, Stride: 0x1}, + unicode.Range16{Lo: 0xd558, Hi: 0xd558, Stride: 0x1}, + unicode.Range16{Lo: 0xd574, Hi: 0xd574, Stride: 0x1}, + unicode.Range16{Lo: 0xd590, Hi: 0xd590, Stride: 0x1}, + unicode.Range16{Lo: 0xd5ac, Hi: 0xd5ac, Stride: 0x1}, + unicode.Range16{Lo: 0xd5c8, Hi: 0xd5c8, Stride: 0x1}, + unicode.Range16{Lo: 0xd5e4, Hi: 0xd5e4, Stride: 0x1}, + unicode.Range16{Lo: 0xd600, Hi: 0xd600, Stride: 0x1}, + unicode.Range16{Lo: 0xd61c, Hi: 0xd61c, Stride: 0x1}, + unicode.Range16{Lo: 0xd638, Hi: 0xd638, Stride: 0x1}, + unicode.Range16{Lo: 0xd654, Hi: 0xd654, Stride: 0x1}, + unicode.Range16{Lo: 0xd670, Hi: 0xd670, Stride: 0x1}, + unicode.Range16{Lo: 0xd68c, Hi: 0xd68c, Stride: 0x1}, + unicode.Range16{Lo: 0xd6a8, Hi: 0xd6a8, Stride: 0x1}, + unicode.Range16{Lo: 0xd6c4, Hi: 0xd6c4, Stride: 0x1}, + unicode.Range16{Lo: 0xd6e0, Hi: 0xd6e0, Stride: 0x1}, + unicode.Range16{Lo: 0xd6fc, Hi: 0xd6fc, Stride: 0x1}, + unicode.Range16{Lo: 0xd718, Hi: 0xd718, Stride: 0x1}, + unicode.Range16{Lo: 0xd734, Hi: 0xd734, Stride: 0x1}, + unicode.Range16{Lo: 0xd750, Hi: 0xd750, Stride: 0x1}, + unicode.Range16{Lo: 0xd76c, Hi: 0xd76c, Stride: 0x1}, + unicode.Range16{Lo: 0xd788, Hi: 0xd788, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeLVT = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xac01, Hi: 0xac1b, Stride: 0x1}, + unicode.Range16{Lo: 0xac1d, Hi: 0xac37, Stride: 0x1}, + unicode.Range16{Lo: 0xac39, Hi: 0xac53, Stride: 0x1}, + unicode.Range16{Lo: 0xac55, Hi: 0xac6f, Stride: 0x1}, + unicode.Range16{Lo: 0xac71, Hi: 0xac8b, Stride: 0x1}, + unicode.Range16{Lo: 0xac8d, Hi: 0xaca7, Stride: 0x1}, + unicode.Range16{Lo: 0xaca9, Hi: 0xacc3, Stride: 0x1}, + unicode.Range16{Lo: 0xacc5, Hi: 0xacdf, Stride: 0x1}, + unicode.Range16{Lo: 0xace1, Hi: 0xacfb, Stride: 0x1}, + unicode.Range16{Lo: 0xacfd, Hi: 0xad17, Stride: 0x1}, + unicode.Range16{Lo: 0xad19, Hi: 0xad33, Stride: 0x1}, + unicode.Range16{Lo: 0xad35, Hi: 0xad4f, Stride: 0x1}, + unicode.Range16{Lo: 0xad51, Hi: 0xad6b, Stride: 0x1}, + unicode.Range16{Lo: 0xad6d, Hi: 0xad87, Stride: 0x1}, + unicode.Range16{Lo: 0xad89, Hi: 0xada3, Stride: 0x1}, + unicode.Range16{Lo: 0xada5, Hi: 0xadbf, Stride: 0x1}, + unicode.Range16{Lo: 0xadc1, Hi: 0xaddb, Stride: 0x1}, + unicode.Range16{Lo: 0xaddd, Hi: 0xadf7, Stride: 0x1}, + unicode.Range16{Lo: 0xadf9, Hi: 0xae13, Stride: 0x1}, + unicode.Range16{Lo: 0xae15, Hi: 0xae2f, Stride: 0x1}, + unicode.Range16{Lo: 0xae31, Hi: 0xae4b, Stride: 0x1}, + unicode.Range16{Lo: 0xae4d, Hi: 0xae67, Stride: 0x1}, + unicode.Range16{Lo: 0xae69, Hi: 0xae83, Stride: 0x1}, + unicode.Range16{Lo: 0xae85, Hi: 0xae9f, Stride: 0x1}, + unicode.Range16{Lo: 0xaea1, Hi: 0xaebb, Stride: 0x1}, + unicode.Range16{Lo: 0xaebd, Hi: 0xaed7, Stride: 0x1}, + unicode.Range16{Lo: 0xaed9, Hi: 0xaef3, Stride: 0x1}, + unicode.Range16{Lo: 0xaef5, Hi: 0xaf0f, Stride: 0x1}, + unicode.Range16{Lo: 0xaf11, Hi: 0xaf2b, Stride: 0x1}, + unicode.Range16{Lo: 0xaf2d, Hi: 0xaf47, Stride: 0x1}, + unicode.Range16{Lo: 0xaf49, Hi: 0xaf63, Stride: 0x1}, + unicode.Range16{Lo: 0xaf65, Hi: 0xaf7f, Stride: 0x1}, + unicode.Range16{Lo: 0xaf81, Hi: 0xaf9b, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9d, Hi: 0xafb7, Stride: 0x1}, + unicode.Range16{Lo: 0xafb9, Hi: 0xafd3, Stride: 0x1}, + unicode.Range16{Lo: 0xafd5, Hi: 0xafef, Stride: 0x1}, + unicode.Range16{Lo: 0xaff1, Hi: 0xb00b, Stride: 0x1}, + unicode.Range16{Lo: 0xb00d, Hi: 0xb027, Stride: 0x1}, + unicode.Range16{Lo: 0xb029, Hi: 0xb043, Stride: 0x1}, + unicode.Range16{Lo: 0xb045, Hi: 0xb05f, Stride: 0x1}, + unicode.Range16{Lo: 0xb061, Hi: 0xb07b, Stride: 0x1}, + unicode.Range16{Lo: 0xb07d, Hi: 0xb097, Stride: 0x1}, + unicode.Range16{Lo: 0xb099, Hi: 0xb0b3, Stride: 0x1}, + unicode.Range16{Lo: 0xb0b5, Hi: 0xb0cf, Stride: 0x1}, + unicode.Range16{Lo: 0xb0d1, Hi: 0xb0eb, Stride: 0x1}, + unicode.Range16{Lo: 0xb0ed, Hi: 0xb107, Stride: 0x1}, + unicode.Range16{Lo: 0xb109, Hi: 0xb123, Stride: 0x1}, + unicode.Range16{Lo: 0xb125, Hi: 0xb13f, Stride: 0x1}, + unicode.Range16{Lo: 0xb141, Hi: 0xb15b, Stride: 0x1}, + unicode.Range16{Lo: 0xb15d, Hi: 0xb177, Stride: 0x1}, + unicode.Range16{Lo: 0xb179, Hi: 0xb193, Stride: 0x1}, + unicode.Range16{Lo: 0xb195, Hi: 0xb1af, Stride: 0x1}, + unicode.Range16{Lo: 0xb1b1, Hi: 0xb1cb, Stride: 0x1}, + unicode.Range16{Lo: 0xb1cd, Hi: 0xb1e7, Stride: 0x1}, + unicode.Range16{Lo: 0xb1e9, Hi: 0xb203, Stride: 0x1}, + unicode.Range16{Lo: 0xb205, Hi: 0xb21f, Stride: 0x1}, + unicode.Range16{Lo: 0xb221, Hi: 0xb23b, Stride: 0x1}, + unicode.Range16{Lo: 0xb23d, Hi: 0xb257, Stride: 0x1}, + unicode.Range16{Lo: 0xb259, Hi: 0xb273, Stride: 0x1}, + unicode.Range16{Lo: 0xb275, Hi: 0xb28f, Stride: 0x1}, + unicode.Range16{Lo: 0xb291, Hi: 0xb2ab, Stride: 0x1}, + unicode.Range16{Lo: 0xb2ad, Hi: 0xb2c7, Stride: 0x1}, + unicode.Range16{Lo: 0xb2c9, Hi: 0xb2e3, Stride: 0x1}, + unicode.Range16{Lo: 0xb2e5, Hi: 0xb2ff, Stride: 0x1}, + unicode.Range16{Lo: 0xb301, Hi: 0xb31b, Stride: 0x1}, + unicode.Range16{Lo: 0xb31d, Hi: 0xb337, Stride: 0x1}, + unicode.Range16{Lo: 0xb339, Hi: 0xb353, Stride: 0x1}, + unicode.Range16{Lo: 0xb355, Hi: 0xb36f, Stride: 0x1}, + unicode.Range16{Lo: 0xb371, Hi: 0xb38b, Stride: 0x1}, + unicode.Range16{Lo: 0xb38d, Hi: 0xb3a7, Stride: 0x1}, + unicode.Range16{Lo: 0xb3a9, Hi: 0xb3c3, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c5, Hi: 0xb3df, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e1, Hi: 0xb3fb, Stride: 0x1}, + unicode.Range16{Lo: 0xb3fd, Hi: 0xb417, Stride: 0x1}, + unicode.Range16{Lo: 0xb419, Hi: 0xb433, Stride: 0x1}, + unicode.Range16{Lo: 0xb435, Hi: 0xb44f, Stride: 0x1}, + unicode.Range16{Lo: 0xb451, Hi: 0xb46b, Stride: 0x1}, + unicode.Range16{Lo: 0xb46d, Hi: 0xb487, Stride: 0x1}, + unicode.Range16{Lo: 0xb489, Hi: 0xb4a3, Stride: 0x1}, + unicode.Range16{Lo: 0xb4a5, Hi: 0xb4bf, Stride: 0x1}, + unicode.Range16{Lo: 0xb4c1, Hi: 0xb4db, Stride: 0x1}, + unicode.Range16{Lo: 0xb4dd, Hi: 0xb4f7, Stride: 0x1}, + unicode.Range16{Lo: 0xb4f9, Hi: 0xb513, Stride: 0x1}, + unicode.Range16{Lo: 0xb515, Hi: 0xb52f, Stride: 0x1}, + unicode.Range16{Lo: 0xb531, Hi: 0xb54b, Stride: 0x1}, + unicode.Range16{Lo: 0xb54d, Hi: 0xb567, Stride: 0x1}, + unicode.Range16{Lo: 0xb569, Hi: 0xb583, Stride: 0x1}, + unicode.Range16{Lo: 0xb585, Hi: 0xb59f, Stride: 0x1}, + unicode.Range16{Lo: 0xb5a1, Hi: 0xb5bb, Stride: 0x1}, + unicode.Range16{Lo: 0xb5bd, Hi: 0xb5d7, Stride: 0x1}, + unicode.Range16{Lo: 0xb5d9, Hi: 0xb5f3, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f5, Hi: 0xb60f, Stride: 0x1}, + unicode.Range16{Lo: 0xb611, Hi: 0xb62b, Stride: 0x1}, + unicode.Range16{Lo: 0xb62d, Hi: 0xb647, Stride: 0x1}, + unicode.Range16{Lo: 0xb649, Hi: 0xb663, Stride: 0x1}, + unicode.Range16{Lo: 0xb665, Hi: 0xb67f, Stride: 0x1}, + unicode.Range16{Lo: 0xb681, Hi: 0xb69b, Stride: 0x1}, + unicode.Range16{Lo: 0xb69d, Hi: 0xb6b7, Stride: 0x1}, + unicode.Range16{Lo: 0xb6b9, Hi: 0xb6d3, Stride: 0x1}, + unicode.Range16{Lo: 0xb6d5, Hi: 0xb6ef, Stride: 0x1}, + unicode.Range16{Lo: 0xb6f1, Hi: 0xb70b, Stride: 0x1}, + unicode.Range16{Lo: 0xb70d, Hi: 0xb727, Stride: 0x1}, + unicode.Range16{Lo: 0xb729, Hi: 0xb743, Stride: 0x1}, + unicode.Range16{Lo: 0xb745, Hi: 0xb75f, Stride: 0x1}, + unicode.Range16{Lo: 0xb761, Hi: 0xb77b, Stride: 0x1}, + unicode.Range16{Lo: 0xb77d, Hi: 0xb797, Stride: 0x1}, + unicode.Range16{Lo: 0xb799, Hi: 0xb7b3, Stride: 0x1}, + unicode.Range16{Lo: 0xb7b5, Hi: 0xb7cf, Stride: 0x1}, + unicode.Range16{Lo: 0xb7d1, Hi: 0xb7eb, Stride: 0x1}, + unicode.Range16{Lo: 0xb7ed, Hi: 0xb807, Stride: 0x1}, + unicode.Range16{Lo: 0xb809, Hi: 0xb823, Stride: 0x1}, + unicode.Range16{Lo: 0xb825, Hi: 0xb83f, Stride: 0x1}, + unicode.Range16{Lo: 0xb841, Hi: 0xb85b, Stride: 0x1}, + unicode.Range16{Lo: 0xb85d, Hi: 0xb877, Stride: 0x1}, + unicode.Range16{Lo: 0xb879, Hi: 0xb893, Stride: 0x1}, + unicode.Range16{Lo: 0xb895, Hi: 0xb8af, Stride: 0x1}, + unicode.Range16{Lo: 0xb8b1, Hi: 0xb8cb, Stride: 0x1}, + unicode.Range16{Lo: 0xb8cd, Hi: 0xb8e7, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e9, Hi: 0xb903, Stride: 0x1}, + unicode.Range16{Lo: 0xb905, Hi: 0xb91f, Stride: 0x1}, + unicode.Range16{Lo: 0xb921, Hi: 0xb93b, Stride: 0x1}, + unicode.Range16{Lo: 0xb93d, Hi: 0xb957, Stride: 0x1}, + unicode.Range16{Lo: 0xb959, Hi: 0xb973, Stride: 0x1}, + unicode.Range16{Lo: 0xb975, Hi: 0xb98f, Stride: 0x1}, + unicode.Range16{Lo: 0xb991, Hi: 0xb9ab, Stride: 0x1}, + unicode.Range16{Lo: 0xb9ad, Hi: 0xb9c7, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c9, Hi: 0xb9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e5, Hi: 0xb9ff, Stride: 0x1}, + unicode.Range16{Lo: 0xba01, Hi: 0xba1b, Stride: 0x1}, + unicode.Range16{Lo: 0xba1d, Hi: 0xba37, Stride: 0x1}, + unicode.Range16{Lo: 0xba39, Hi: 0xba53, Stride: 0x1}, + unicode.Range16{Lo: 0xba55, Hi: 0xba6f, Stride: 0x1}, + unicode.Range16{Lo: 0xba71, Hi: 0xba8b, Stride: 0x1}, + unicode.Range16{Lo: 0xba8d, Hi: 0xbaa7, Stride: 0x1}, + unicode.Range16{Lo: 0xbaa9, Hi: 0xbac3, Stride: 0x1}, + unicode.Range16{Lo: 0xbac5, Hi: 0xbadf, Stride: 0x1}, + unicode.Range16{Lo: 0xbae1, Hi: 0xbafb, Stride: 0x1}, + unicode.Range16{Lo: 0xbafd, Hi: 0xbb17, Stride: 0x1}, + unicode.Range16{Lo: 0xbb19, Hi: 0xbb33, Stride: 0x1}, + unicode.Range16{Lo: 0xbb35, Hi: 0xbb4f, Stride: 0x1}, + unicode.Range16{Lo: 0xbb51, Hi: 0xbb6b, Stride: 0x1}, + unicode.Range16{Lo: 0xbb6d, Hi: 0xbb87, Stride: 0x1}, + unicode.Range16{Lo: 0xbb89, Hi: 0xbba3, Stride: 0x1}, + unicode.Range16{Lo: 0xbba5, Hi: 0xbbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbbc1, Hi: 0xbbdb, Stride: 0x1}, + unicode.Range16{Lo: 0xbbdd, Hi: 0xbbf7, Stride: 0x1}, + unicode.Range16{Lo: 0xbbf9, Hi: 0xbc13, Stride: 0x1}, + unicode.Range16{Lo: 0xbc15, Hi: 0xbc2f, Stride: 0x1}, + unicode.Range16{Lo: 0xbc31, Hi: 0xbc4b, Stride: 0x1}, + unicode.Range16{Lo: 0xbc4d, Hi: 0xbc67, Stride: 0x1}, + unicode.Range16{Lo: 0xbc69, Hi: 0xbc83, Stride: 0x1}, + unicode.Range16{Lo: 0xbc85, Hi: 0xbc9f, Stride: 0x1}, + unicode.Range16{Lo: 0xbca1, Hi: 0xbcbb, Stride: 0x1}, + unicode.Range16{Lo: 0xbcbd, Hi: 0xbcd7, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd9, Hi: 0xbcf3, Stride: 0x1}, + unicode.Range16{Lo: 0xbcf5, Hi: 0xbd0f, Stride: 0x1}, + unicode.Range16{Lo: 0xbd11, Hi: 0xbd2b, Stride: 0x1}, + unicode.Range16{Lo: 0xbd2d, Hi: 0xbd47, Stride: 0x1}, + unicode.Range16{Lo: 0xbd49, Hi: 0xbd63, Stride: 0x1}, + unicode.Range16{Lo: 0xbd65, Hi: 0xbd7f, Stride: 0x1}, + unicode.Range16{Lo: 0xbd81, Hi: 0xbd9b, Stride: 0x1}, + unicode.Range16{Lo: 0xbd9d, Hi: 0xbdb7, Stride: 0x1}, + unicode.Range16{Lo: 0xbdb9, Hi: 0xbdd3, Stride: 0x1}, + unicode.Range16{Lo: 0xbdd5, Hi: 0xbdef, Stride: 0x1}, + unicode.Range16{Lo: 0xbdf1, Hi: 0xbe0b, Stride: 0x1}, + unicode.Range16{Lo: 0xbe0d, Hi: 0xbe27, Stride: 0x1}, + unicode.Range16{Lo: 0xbe29, Hi: 0xbe43, Stride: 0x1}, + unicode.Range16{Lo: 0xbe45, Hi: 0xbe5f, Stride: 0x1}, + unicode.Range16{Lo: 0xbe61, Hi: 0xbe7b, Stride: 0x1}, + unicode.Range16{Lo: 0xbe7d, Hi: 0xbe97, Stride: 0x1}, + unicode.Range16{Lo: 0xbe99, Hi: 0xbeb3, Stride: 0x1}, + unicode.Range16{Lo: 0xbeb5, Hi: 0xbecf, Stride: 0x1}, + unicode.Range16{Lo: 0xbed1, Hi: 0xbeeb, Stride: 0x1}, + unicode.Range16{Lo: 0xbeed, Hi: 0xbf07, Stride: 0x1}, + unicode.Range16{Lo: 0xbf09, Hi: 0xbf23, Stride: 0x1}, + unicode.Range16{Lo: 0xbf25, Hi: 0xbf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xbf41, Hi: 0xbf5b, Stride: 0x1}, + unicode.Range16{Lo: 0xbf5d, Hi: 0xbf77, Stride: 0x1}, + unicode.Range16{Lo: 0xbf79, Hi: 0xbf93, Stride: 0x1}, + unicode.Range16{Lo: 0xbf95, Hi: 0xbfaf, Stride: 0x1}, + unicode.Range16{Lo: 0xbfb1, Hi: 0xbfcb, Stride: 0x1}, + unicode.Range16{Lo: 0xbfcd, Hi: 0xbfe7, Stride: 0x1}, + unicode.Range16{Lo: 0xbfe9, Hi: 0xc003, Stride: 0x1}, + unicode.Range16{Lo: 0xc005, Hi: 0xc01f, Stride: 0x1}, + unicode.Range16{Lo: 0xc021, Hi: 0xc03b, Stride: 0x1}, + unicode.Range16{Lo: 0xc03d, Hi: 0xc057, Stride: 0x1}, + unicode.Range16{Lo: 0xc059, Hi: 0xc073, Stride: 0x1}, + unicode.Range16{Lo: 0xc075, Hi: 0xc08f, Stride: 0x1}, + unicode.Range16{Lo: 0xc091, Hi: 0xc0ab, Stride: 0x1}, + unicode.Range16{Lo: 0xc0ad, Hi: 0xc0c7, Stride: 0x1}, + unicode.Range16{Lo: 0xc0c9, Hi: 0xc0e3, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e5, Hi: 0xc0ff, Stride: 0x1}, + unicode.Range16{Lo: 0xc101, Hi: 0xc11b, Stride: 0x1}, + unicode.Range16{Lo: 0xc11d, Hi: 0xc137, Stride: 0x1}, + unicode.Range16{Lo: 0xc139, Hi: 0xc153, Stride: 0x1}, + unicode.Range16{Lo: 0xc155, Hi: 0xc16f, Stride: 0x1}, + unicode.Range16{Lo: 0xc171, Hi: 0xc18b, Stride: 0x1}, + unicode.Range16{Lo: 0xc18d, Hi: 0xc1a7, Stride: 0x1}, + unicode.Range16{Lo: 0xc1a9, Hi: 0xc1c3, Stride: 0x1}, + unicode.Range16{Lo: 0xc1c5, Hi: 0xc1df, Stride: 0x1}, + unicode.Range16{Lo: 0xc1e1, Hi: 0xc1fb, Stride: 0x1}, + unicode.Range16{Lo: 0xc1fd, Hi: 0xc217, Stride: 0x1}, + unicode.Range16{Lo: 0xc219, Hi: 0xc233, Stride: 0x1}, + unicode.Range16{Lo: 0xc235, Hi: 0xc24f, Stride: 0x1}, + unicode.Range16{Lo: 0xc251, Hi: 0xc26b, Stride: 0x1}, + unicode.Range16{Lo: 0xc26d, Hi: 0xc287, Stride: 0x1}, + unicode.Range16{Lo: 0xc289, Hi: 0xc2a3, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a5, Hi: 0xc2bf, Stride: 0x1}, + unicode.Range16{Lo: 0xc2c1, Hi: 0xc2db, Stride: 0x1}, + unicode.Range16{Lo: 0xc2dd, Hi: 0xc2f7, Stride: 0x1}, + unicode.Range16{Lo: 0xc2f9, Hi: 0xc313, Stride: 0x1}, + unicode.Range16{Lo: 0xc315, Hi: 0xc32f, Stride: 0x1}, + unicode.Range16{Lo: 0xc331, Hi: 0xc34b, Stride: 0x1}, + unicode.Range16{Lo: 0xc34d, Hi: 0xc367, Stride: 0x1}, + unicode.Range16{Lo: 0xc369, Hi: 0xc383, Stride: 0x1}, + unicode.Range16{Lo: 0xc385, Hi: 0xc39f, Stride: 0x1}, + unicode.Range16{Lo: 0xc3a1, Hi: 0xc3bb, Stride: 0x1}, + unicode.Range16{Lo: 0xc3bd, Hi: 0xc3d7, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d9, Hi: 0xc3f3, Stride: 0x1}, + unicode.Range16{Lo: 0xc3f5, Hi: 0xc40f, Stride: 0x1}, + unicode.Range16{Lo: 0xc411, Hi: 0xc42b, Stride: 0x1}, + unicode.Range16{Lo: 0xc42d, Hi: 0xc447, Stride: 0x1}, + unicode.Range16{Lo: 0xc449, Hi: 0xc463, Stride: 0x1}, + unicode.Range16{Lo: 0xc465, Hi: 0xc47f, Stride: 0x1}, + unicode.Range16{Lo: 0xc481, Hi: 0xc49b, Stride: 0x1}, + unicode.Range16{Lo: 0xc49d, Hi: 0xc4b7, Stride: 0x1}, + unicode.Range16{Lo: 0xc4b9, Hi: 0xc4d3, Stride: 0x1}, + unicode.Range16{Lo: 0xc4d5, Hi: 0xc4ef, Stride: 0x1}, + unicode.Range16{Lo: 0xc4f1, Hi: 0xc50b, Stride: 0x1}, + unicode.Range16{Lo: 0xc50d, Hi: 0xc527, Stride: 0x1}, + unicode.Range16{Lo: 0xc529, Hi: 0xc543, Stride: 0x1}, + unicode.Range16{Lo: 0xc545, Hi: 0xc55f, Stride: 0x1}, + unicode.Range16{Lo: 0xc561, Hi: 0xc57b, Stride: 0x1}, + unicode.Range16{Lo: 0xc57d, Hi: 0xc597, Stride: 0x1}, + unicode.Range16{Lo: 0xc599, Hi: 0xc5b3, Stride: 0x1}, + unicode.Range16{Lo: 0xc5b5, Hi: 0xc5cf, Stride: 0x1}, + unicode.Range16{Lo: 0xc5d1, Hi: 0xc5eb, Stride: 0x1}, + unicode.Range16{Lo: 0xc5ed, Hi: 0xc607, Stride: 0x1}, + unicode.Range16{Lo: 0xc609, Hi: 0xc623, Stride: 0x1}, + unicode.Range16{Lo: 0xc625, Hi: 0xc63f, Stride: 0x1}, + unicode.Range16{Lo: 0xc641, Hi: 0xc65b, Stride: 0x1}, + unicode.Range16{Lo: 0xc65d, Hi: 0xc677, Stride: 0x1}, + unicode.Range16{Lo: 0xc679, Hi: 0xc693, Stride: 0x1}, + unicode.Range16{Lo: 0xc695, Hi: 0xc6af, Stride: 0x1}, + unicode.Range16{Lo: 0xc6b1, Hi: 0xc6cb, Stride: 0x1}, + unicode.Range16{Lo: 0xc6cd, Hi: 0xc6e7, Stride: 0x1}, + unicode.Range16{Lo: 0xc6e9, Hi: 0xc703, Stride: 0x1}, + unicode.Range16{Lo: 0xc705, Hi: 0xc71f, Stride: 0x1}, + unicode.Range16{Lo: 0xc721, Hi: 0xc73b, Stride: 0x1}, + unicode.Range16{Lo: 0xc73d, Hi: 0xc757, Stride: 0x1}, + unicode.Range16{Lo: 0xc759, Hi: 0xc773, Stride: 0x1}, + unicode.Range16{Lo: 0xc775, Hi: 0xc78f, Stride: 0x1}, + unicode.Range16{Lo: 0xc791, Hi: 0xc7ab, Stride: 0x1}, + unicode.Range16{Lo: 0xc7ad, Hi: 0xc7c7, Stride: 0x1}, + unicode.Range16{Lo: 0xc7c9, Hi: 0xc7e3, Stride: 0x1}, + unicode.Range16{Lo: 0xc7e5, Hi: 0xc7ff, Stride: 0x1}, + unicode.Range16{Lo: 0xc801, Hi: 0xc81b, Stride: 0x1}, + unicode.Range16{Lo: 0xc81d, Hi: 0xc837, Stride: 0x1}, + unicode.Range16{Lo: 0xc839, Hi: 0xc853, Stride: 0x1}, + unicode.Range16{Lo: 0xc855, Hi: 0xc86f, Stride: 0x1}, + unicode.Range16{Lo: 0xc871, Hi: 0xc88b, Stride: 0x1}, + unicode.Range16{Lo: 0xc88d, Hi: 0xc8a7, Stride: 0x1}, + unicode.Range16{Lo: 0xc8a9, Hi: 0xc8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xc8c5, Hi: 0xc8df, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e1, Hi: 0xc8fb, Stride: 0x1}, + unicode.Range16{Lo: 0xc8fd, Hi: 0xc917, Stride: 0x1}, + unicode.Range16{Lo: 0xc919, Hi: 0xc933, Stride: 0x1}, + unicode.Range16{Lo: 0xc935, Hi: 0xc94f, Stride: 0x1}, + unicode.Range16{Lo: 0xc951, Hi: 0xc96b, Stride: 0x1}, + unicode.Range16{Lo: 0xc96d, Hi: 0xc987, Stride: 0x1}, + unicode.Range16{Lo: 0xc989, Hi: 0xc9a3, Stride: 0x1}, + unicode.Range16{Lo: 0xc9a5, Hi: 0xc9bf, Stride: 0x1}, + unicode.Range16{Lo: 0xc9c1, Hi: 0xc9db, Stride: 0x1}, + unicode.Range16{Lo: 0xc9dd, Hi: 0xc9f7, Stride: 0x1}, + unicode.Range16{Lo: 0xc9f9, Hi: 0xca13, Stride: 0x1}, + unicode.Range16{Lo: 0xca15, Hi: 0xca2f, Stride: 0x1}, + unicode.Range16{Lo: 0xca31, Hi: 0xca4b, Stride: 0x1}, + unicode.Range16{Lo: 0xca4d, Hi: 0xca67, Stride: 0x1}, + unicode.Range16{Lo: 0xca69, Hi: 0xca83, Stride: 0x1}, + unicode.Range16{Lo: 0xca85, Hi: 0xca9f, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa1, Hi: 0xcabb, Stride: 0x1}, + unicode.Range16{Lo: 0xcabd, Hi: 0xcad7, Stride: 0x1}, + unicode.Range16{Lo: 0xcad9, Hi: 0xcaf3, Stride: 0x1}, + unicode.Range16{Lo: 0xcaf5, Hi: 0xcb0f, Stride: 0x1}, + unicode.Range16{Lo: 0xcb11, Hi: 0xcb2b, Stride: 0x1}, + unicode.Range16{Lo: 0xcb2d, Hi: 0xcb47, Stride: 0x1}, + unicode.Range16{Lo: 0xcb49, Hi: 0xcb63, Stride: 0x1}, + unicode.Range16{Lo: 0xcb65, Hi: 0xcb7f, Stride: 0x1}, + unicode.Range16{Lo: 0xcb81, Hi: 0xcb9b, Stride: 0x1}, + unicode.Range16{Lo: 0xcb9d, Hi: 0xcbb7, Stride: 0x1}, + unicode.Range16{Lo: 0xcbb9, Hi: 0xcbd3, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd5, Hi: 0xcbef, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf1, Hi: 0xcc0b, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0d, Hi: 0xcc27, Stride: 0x1}, + unicode.Range16{Lo: 0xcc29, Hi: 0xcc43, Stride: 0x1}, + unicode.Range16{Lo: 0xcc45, Hi: 0xcc5f, Stride: 0x1}, + unicode.Range16{Lo: 0xcc61, Hi: 0xcc7b, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7d, Hi: 0xcc97, Stride: 0x1}, + unicode.Range16{Lo: 0xcc99, Hi: 0xccb3, Stride: 0x1}, + unicode.Range16{Lo: 0xccb5, Hi: 0xcccf, Stride: 0x1}, + unicode.Range16{Lo: 0xccd1, Hi: 0xcceb, Stride: 0x1}, + unicode.Range16{Lo: 0xcced, Hi: 0xcd07, Stride: 0x1}, + unicode.Range16{Lo: 0xcd09, Hi: 0xcd23, Stride: 0x1}, + unicode.Range16{Lo: 0xcd25, Hi: 0xcd3f, Stride: 0x1}, + unicode.Range16{Lo: 0xcd41, Hi: 0xcd5b, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5d, Hi: 0xcd77, Stride: 0x1}, + unicode.Range16{Lo: 0xcd79, Hi: 0xcd93, Stride: 0x1}, + unicode.Range16{Lo: 0xcd95, Hi: 0xcdaf, Stride: 0x1}, + unicode.Range16{Lo: 0xcdb1, Hi: 0xcdcb, Stride: 0x1}, + unicode.Range16{Lo: 0xcdcd, Hi: 0xcde7, Stride: 0x1}, + unicode.Range16{Lo: 0xcde9, Hi: 0xce03, Stride: 0x1}, + unicode.Range16{Lo: 0xce05, Hi: 0xce1f, Stride: 0x1}, + unicode.Range16{Lo: 0xce21, Hi: 0xce3b, Stride: 0x1}, + unicode.Range16{Lo: 0xce3d, Hi: 0xce57, Stride: 0x1}, + unicode.Range16{Lo: 0xce59, Hi: 0xce73, Stride: 0x1}, + unicode.Range16{Lo: 0xce75, Hi: 0xce8f, Stride: 0x1}, + unicode.Range16{Lo: 0xce91, Hi: 0xceab, Stride: 0x1}, + unicode.Range16{Lo: 0xcead, Hi: 0xcec7, Stride: 0x1}, + unicode.Range16{Lo: 0xcec9, Hi: 0xcee3, Stride: 0x1}, + unicode.Range16{Lo: 0xcee5, Hi: 0xceff, Stride: 0x1}, + unicode.Range16{Lo: 0xcf01, Hi: 0xcf1b, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1d, Hi: 0xcf37, Stride: 0x1}, + unicode.Range16{Lo: 0xcf39, Hi: 0xcf53, Stride: 0x1}, + unicode.Range16{Lo: 0xcf55, Hi: 0xcf6f, Stride: 0x1}, + unicode.Range16{Lo: 0xcf71, Hi: 0xcf8b, Stride: 0x1}, + unicode.Range16{Lo: 0xcf8d, Hi: 0xcfa7, Stride: 0x1}, + unicode.Range16{Lo: 0xcfa9, Hi: 0xcfc3, Stride: 0x1}, + unicode.Range16{Lo: 0xcfc5, Hi: 0xcfdf, Stride: 0x1}, + unicode.Range16{Lo: 0xcfe1, Hi: 0xcffb, Stride: 0x1}, + unicode.Range16{Lo: 0xcffd, Hi: 0xd017, Stride: 0x1}, + unicode.Range16{Lo: 0xd019, Hi: 0xd033, Stride: 0x1}, + unicode.Range16{Lo: 0xd035, Hi: 0xd04f, Stride: 0x1}, + unicode.Range16{Lo: 0xd051, Hi: 0xd06b, Stride: 0x1}, + unicode.Range16{Lo: 0xd06d, Hi: 0xd087, Stride: 0x1}, + unicode.Range16{Lo: 0xd089, Hi: 0xd0a3, Stride: 0x1}, + unicode.Range16{Lo: 0xd0a5, Hi: 0xd0bf, Stride: 0x1}, + unicode.Range16{Lo: 0xd0c1, Hi: 0xd0db, Stride: 0x1}, + unicode.Range16{Lo: 0xd0dd, Hi: 0xd0f7, Stride: 0x1}, + unicode.Range16{Lo: 0xd0f9, Hi: 0xd113, Stride: 0x1}, + unicode.Range16{Lo: 0xd115, Hi: 0xd12f, Stride: 0x1}, + unicode.Range16{Lo: 0xd131, Hi: 0xd14b, Stride: 0x1}, + unicode.Range16{Lo: 0xd14d, Hi: 0xd167, Stride: 0x1}, + unicode.Range16{Lo: 0xd169, Hi: 0xd183, Stride: 0x1}, + unicode.Range16{Lo: 0xd185, Hi: 0xd19f, Stride: 0x1}, + unicode.Range16{Lo: 0xd1a1, Hi: 0xd1bb, Stride: 0x1}, + unicode.Range16{Lo: 0xd1bd, Hi: 0xd1d7, Stride: 0x1}, + unicode.Range16{Lo: 0xd1d9, Hi: 0xd1f3, Stride: 0x1}, + unicode.Range16{Lo: 0xd1f5, Hi: 0xd20f, Stride: 0x1}, + unicode.Range16{Lo: 0xd211, Hi: 0xd22b, Stride: 0x1}, + unicode.Range16{Lo: 0xd22d, Hi: 0xd247, Stride: 0x1}, + unicode.Range16{Lo: 0xd249, Hi: 0xd263, Stride: 0x1}, + unicode.Range16{Lo: 0xd265, Hi: 0xd27f, Stride: 0x1}, + unicode.Range16{Lo: 0xd281, Hi: 0xd29b, Stride: 0x1}, + unicode.Range16{Lo: 0xd29d, Hi: 0xd2b7, Stride: 0x1}, + unicode.Range16{Lo: 0xd2b9, Hi: 0xd2d3, Stride: 0x1}, + unicode.Range16{Lo: 0xd2d5, Hi: 0xd2ef, Stride: 0x1}, + unicode.Range16{Lo: 0xd2f1, Hi: 0xd30b, Stride: 0x1}, + unicode.Range16{Lo: 0xd30d, Hi: 0xd327, Stride: 0x1}, + unicode.Range16{Lo: 0xd329, Hi: 0xd343, Stride: 0x1}, + unicode.Range16{Lo: 0xd345, Hi: 0xd35f, Stride: 0x1}, + unicode.Range16{Lo: 0xd361, Hi: 0xd37b, Stride: 0x1}, + unicode.Range16{Lo: 0xd37d, Hi: 0xd397, Stride: 0x1}, + unicode.Range16{Lo: 0xd399, Hi: 0xd3b3, Stride: 0x1}, + unicode.Range16{Lo: 0xd3b5, Hi: 0xd3cf, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d1, Hi: 0xd3eb, Stride: 0x1}, + unicode.Range16{Lo: 0xd3ed, Hi: 0xd407, Stride: 0x1}, + unicode.Range16{Lo: 0xd409, Hi: 0xd423, Stride: 0x1}, + unicode.Range16{Lo: 0xd425, Hi: 0xd43f, Stride: 0x1}, + unicode.Range16{Lo: 0xd441, Hi: 0xd45b, Stride: 0x1}, + unicode.Range16{Lo: 0xd45d, Hi: 0xd477, Stride: 0x1}, + unicode.Range16{Lo: 0xd479, Hi: 0xd493, Stride: 0x1}, + unicode.Range16{Lo: 0xd495, Hi: 0xd4af, Stride: 0x1}, + unicode.Range16{Lo: 0xd4b1, Hi: 0xd4cb, Stride: 0x1}, + unicode.Range16{Lo: 0xd4cd, Hi: 0xd4e7, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e9, Hi: 0xd503, Stride: 0x1}, + unicode.Range16{Lo: 0xd505, Hi: 0xd51f, Stride: 0x1}, + unicode.Range16{Lo: 0xd521, Hi: 0xd53b, Stride: 0x1}, + unicode.Range16{Lo: 0xd53d, Hi: 0xd557, Stride: 0x1}, + unicode.Range16{Lo: 0xd559, Hi: 0xd573, Stride: 0x1}, + unicode.Range16{Lo: 0xd575, Hi: 0xd58f, Stride: 0x1}, + unicode.Range16{Lo: 0xd591, Hi: 0xd5ab, Stride: 0x1}, + unicode.Range16{Lo: 0xd5ad, Hi: 0xd5c7, Stride: 0x1}, + unicode.Range16{Lo: 0xd5c9, Hi: 0xd5e3, Stride: 0x1}, + unicode.Range16{Lo: 0xd5e5, Hi: 0xd5ff, Stride: 0x1}, + unicode.Range16{Lo: 0xd601, Hi: 0xd61b, Stride: 0x1}, + unicode.Range16{Lo: 0xd61d, Hi: 0xd637, Stride: 0x1}, + unicode.Range16{Lo: 0xd639, Hi: 0xd653, Stride: 0x1}, + unicode.Range16{Lo: 0xd655, Hi: 0xd66f, Stride: 0x1}, + unicode.Range16{Lo: 0xd671, Hi: 0xd68b, Stride: 0x1}, + unicode.Range16{Lo: 0xd68d, Hi: 0xd6a7, Stride: 0x1}, + unicode.Range16{Lo: 0xd6a9, Hi: 0xd6c3, Stride: 0x1}, + unicode.Range16{Lo: 0xd6c5, Hi: 0xd6df, Stride: 0x1}, + unicode.Range16{Lo: 0xd6e1, Hi: 0xd6fb, Stride: 0x1}, + unicode.Range16{Lo: 0xd6fd, Hi: 0xd717, Stride: 0x1}, + unicode.Range16{Lo: 0xd719, Hi: 0xd733, Stride: 0x1}, + unicode.Range16{Lo: 0xd735, Hi: 0xd74f, Stride: 0x1}, + unicode.Range16{Lo: 0xd751, Hi: 0xd76b, Stride: 0x1}, + unicode.Range16{Lo: 0xd76d, Hi: 0xd787, Stride: 0x1}, + unicode.Range16{Lo: 0xd789, Hi: 0xd7a3, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemePrepend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1}, + unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1}, + unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1}, + unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1}, + unicode.Range32{Lo: 0x111c2, Hi: 0x111c3, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeRegional_Indicator = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f1e6, Hi: 0x1f1ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeSpacingMark = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1}, + unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1}, + unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1}, + unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1}, + unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1}, + unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1}, + unicode.Range16{Lo: 0x9bf, Hi: 0x9c0, Stride: 0x1}, + unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1}, + unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1}, + unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1}, + unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1}, + unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1}, + unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1}, + unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1}, + unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1}, + unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1}, + unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1}, + unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1}, + unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1}, + unicode.Range16{Lo: 0xbbf, Hi: 0xbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1}, + unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1}, + unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1}, + unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1}, + unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1}, + unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1}, + unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0, Hi: 0xcc1, Stride: 0x1}, + unicode.Range16{Lo: 0xcc3, Hi: 0xcc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1}, + unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1}, + unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1}, + unicode.Range16{Lo: 0xd3f, Hi: 0xd40, Stride: 0x1}, + unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1}, + unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1}, + unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1}, + unicode.Range16{Lo: 0xdd0, Hi: 0xdd1, Stride: 0x1}, + unicode.Range16{Lo: 0xdd8, Hi: 0xdde, Stride: 0x1}, + unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1}, + unicode.Range16{Lo: 0xe33, Hi: 0xe33, Stride: 0x1}, + unicode.Range16{Lo: 0xeb3, Hi: 0xeb3, Stride: 0x1}, + unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1}, + unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1}, + unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1}, + unicode.Range16{Lo: 0x1084, Hi: 0x1084, Stride: 0x1}, + unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1}, + unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1}, + unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1}, + unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1}, + unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1}, + unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1}, + unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1}, + unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1}, + unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1}, + unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1}, + unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1}, + unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1}, + unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1}, + unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1}, + unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1}, + unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1}, + unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1}, + unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1}, + unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1}, + unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1}, + unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1}, + unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1}, + unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1}, + unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1}, + unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1}, + unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1}, + unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1}, + unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1}, + unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1}, + unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1}, + unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1}, + unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1}, + unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1}, + unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1}, + unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1}, + unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1}, + unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1}, + unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1}, + unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1}, + unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1}, + unicode.Range32{Lo: 0x1133f, Hi: 0x1133f, Stride: 0x1}, + unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1}, + unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1}, + unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1}, + unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1}, + unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1}, + unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1}, + unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1}, + unicode.Range32{Lo: 0x114b1, Hi: 0x114b2, Stride: 0x1}, + unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1}, + unicode.Range32{Lo: 0x114bb, Hi: 0x114bc, Stride: 0x1}, + unicode.Range32{Lo: 0x114be, Hi: 0x114be, Stride: 0x1}, + unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b0, Hi: 0x115b1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1}, + unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1}, + unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1}, + unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1}, + unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1}, + unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1}, + unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1}, + unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1}, + unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1}, + unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1}, + unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1}, + unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1}, + unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d166, Hi: 0x1d166, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16d, Hi: 0x1d16d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeT = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x11a8, Hi: 0x11ff, Stride: 0x1}, + unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeV = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x1160, Hi: 0x11a7, Stride: 0x1}, + unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeZWJ = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x200d, Hi: 0x200d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +type _GraphemeRuneRange unicode.RangeTable + +func _GraphemeRuneType(r rune) *_GraphemeRuneRange { + switch { + case unicode.Is(_GraphemeCR, r): + return (*_GraphemeRuneRange)(_GraphemeCR) + case unicode.Is(_GraphemeControl, r): + return (*_GraphemeRuneRange)(_GraphemeControl) + case unicode.Is(_GraphemeE_Base, r): + return (*_GraphemeRuneRange)(_GraphemeE_Base) + case unicode.Is(_GraphemeE_Base_GAZ, r): + return (*_GraphemeRuneRange)(_GraphemeE_Base_GAZ) + case unicode.Is(_GraphemeE_Modifier, r): + return (*_GraphemeRuneRange)(_GraphemeE_Modifier) + case unicode.Is(_GraphemeExtend, r): + return (*_GraphemeRuneRange)(_GraphemeExtend) + case unicode.Is(_GraphemeGlue_After_Zwj, r): + return (*_GraphemeRuneRange)(_GraphemeGlue_After_Zwj) + case unicode.Is(_GraphemeL, r): + return (*_GraphemeRuneRange)(_GraphemeL) + case unicode.Is(_GraphemeLF, r): + return (*_GraphemeRuneRange)(_GraphemeLF) + case unicode.Is(_GraphemeLV, r): + return (*_GraphemeRuneRange)(_GraphemeLV) + case unicode.Is(_GraphemeLVT, r): + return (*_GraphemeRuneRange)(_GraphemeLVT) + case unicode.Is(_GraphemePrepend, r): + return (*_GraphemeRuneRange)(_GraphemePrepend) + case unicode.Is(_GraphemeRegional_Indicator, r): + return (*_GraphemeRuneRange)(_GraphemeRegional_Indicator) + case unicode.Is(_GraphemeSpacingMark, r): + return (*_GraphemeRuneRange)(_GraphemeSpacingMark) + case unicode.Is(_GraphemeT, r): + return (*_GraphemeRuneRange)(_GraphemeT) + case unicode.Is(_GraphemeV, r): + return (*_GraphemeRuneRange)(_GraphemeV) + case unicode.Is(_GraphemeZWJ, r): + return (*_GraphemeRuneRange)(_GraphemeZWJ) + default: + return nil + } +} +func (rng *_GraphemeRuneRange) String() string { + switch (*unicode.RangeTable)(rng) { + case _GraphemeCR: + return "CR" + case _GraphemeControl: + return "Control" + case _GraphemeE_Base: + return "E_Base" + case _GraphemeE_Base_GAZ: + return "E_Base_GAZ" + case _GraphemeE_Modifier: + return "E_Modifier" + case _GraphemeExtend: + return "Extend" + case _GraphemeGlue_After_Zwj: + return "Glue_After_Zwj" + case _GraphemeL: + return "L" + case _GraphemeLF: + return "LF" + case _GraphemeLV: + return "LV" + case _GraphemeLVT: + return "LVT" + case _GraphemePrepend: + return "Prepend" + case _GraphemeRegional_Indicator: + return "Regional_Indicator" + case _GraphemeSpacingMark: + return "SpacingMark" + case _GraphemeT: + return "T" + case _GraphemeV: + return "V" + case _GraphemeZWJ: + return "ZWJ" + default: + return "Other" + } +} + +var _WordALetter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x41, Hi: 0x5a, Stride: 0x1}, + unicode.Range16{Lo: 0x61, Hi: 0x7a, Stride: 0x1}, + unicode.Range16{Lo: 0xaa, Hi: 0xaa, Stride: 0x1}, + unicode.Range16{Lo: 0xb5, Hi: 0xb5, Stride: 0x1}, + unicode.Range16{Lo: 0xba, Hi: 0xba, Stride: 0x1}, + unicode.Range16{Lo: 0xc0, Hi: 0xd6, Stride: 0x1}, + unicode.Range16{Lo: 0xd8, Hi: 0xf6, Stride: 0x1}, + unicode.Range16{Lo: 0xf8, Hi: 0x1ba, Stride: 0x1}, + unicode.Range16{Lo: 0x1bb, Hi: 0x1bb, Stride: 0x1}, + unicode.Range16{Lo: 0x1bc, Hi: 0x1bf, Stride: 0x1}, + unicode.Range16{Lo: 0x1c0, Hi: 0x1c3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4, Hi: 0x293, Stride: 0x1}, + unicode.Range16{Lo: 0x294, Hi: 0x294, Stride: 0x1}, + unicode.Range16{Lo: 0x295, Hi: 0x2af, Stride: 0x1}, + unicode.Range16{Lo: 0x2b0, Hi: 0x2c1, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6, Hi: 0x2d1, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0, Hi: 0x2e4, Stride: 0x1}, + unicode.Range16{Lo: 0x2ec, Hi: 0x2ec, Stride: 0x1}, + unicode.Range16{Lo: 0x2ee, Hi: 0x2ee, Stride: 0x1}, + unicode.Range16{Lo: 0x370, Hi: 0x373, Stride: 0x1}, + unicode.Range16{Lo: 0x374, Hi: 0x374, Stride: 0x1}, + unicode.Range16{Lo: 0x376, Hi: 0x377, Stride: 0x1}, + unicode.Range16{Lo: 0x37a, Hi: 0x37a, Stride: 0x1}, + unicode.Range16{Lo: 0x37b, Hi: 0x37d, Stride: 0x1}, + unicode.Range16{Lo: 0x37f, Hi: 0x37f, Stride: 0x1}, + unicode.Range16{Lo: 0x386, Hi: 0x386, Stride: 0x1}, + unicode.Range16{Lo: 0x388, Hi: 0x38a, Stride: 0x1}, + unicode.Range16{Lo: 0x38c, Hi: 0x38c, Stride: 0x1}, + unicode.Range16{Lo: 0x38e, Hi: 0x3a1, Stride: 0x1}, + unicode.Range16{Lo: 0x3a3, Hi: 0x3f5, Stride: 0x1}, + unicode.Range16{Lo: 0x3f7, Hi: 0x481, Stride: 0x1}, + unicode.Range16{Lo: 0x48a, Hi: 0x52f, Stride: 0x1}, + unicode.Range16{Lo: 0x531, Hi: 0x556, Stride: 0x1}, + unicode.Range16{Lo: 0x559, Hi: 0x559, Stride: 0x1}, + unicode.Range16{Lo: 0x561, Hi: 0x587, Stride: 0x1}, + unicode.Range16{Lo: 0x5f3, Hi: 0x5f3, Stride: 0x1}, + unicode.Range16{Lo: 0x620, Hi: 0x63f, Stride: 0x1}, + unicode.Range16{Lo: 0x640, Hi: 0x640, Stride: 0x1}, + unicode.Range16{Lo: 0x641, Hi: 0x64a, Stride: 0x1}, + unicode.Range16{Lo: 0x66e, Hi: 0x66f, Stride: 0x1}, + unicode.Range16{Lo: 0x671, Hi: 0x6d3, Stride: 0x1}, + unicode.Range16{Lo: 0x6d5, Hi: 0x6d5, Stride: 0x1}, + unicode.Range16{Lo: 0x6e5, Hi: 0x6e6, Stride: 0x1}, + unicode.Range16{Lo: 0x6ee, Hi: 0x6ef, Stride: 0x1}, + unicode.Range16{Lo: 0x6fa, Hi: 0x6fc, Stride: 0x1}, + unicode.Range16{Lo: 0x6ff, Hi: 0x6ff, Stride: 0x1}, + unicode.Range16{Lo: 0x710, Hi: 0x710, Stride: 0x1}, + unicode.Range16{Lo: 0x712, Hi: 0x72f, Stride: 0x1}, + unicode.Range16{Lo: 0x74d, Hi: 0x7a5, Stride: 0x1}, + unicode.Range16{Lo: 0x7b1, Hi: 0x7b1, Stride: 0x1}, + unicode.Range16{Lo: 0x7ca, Hi: 0x7ea, Stride: 0x1}, + unicode.Range16{Lo: 0x7f4, Hi: 0x7f5, Stride: 0x1}, + unicode.Range16{Lo: 0x7fa, Hi: 0x7fa, Stride: 0x1}, + unicode.Range16{Lo: 0x800, Hi: 0x815, Stride: 0x1}, + unicode.Range16{Lo: 0x81a, Hi: 0x81a, Stride: 0x1}, + unicode.Range16{Lo: 0x824, Hi: 0x824, Stride: 0x1}, + unicode.Range16{Lo: 0x828, Hi: 0x828, Stride: 0x1}, + unicode.Range16{Lo: 0x840, Hi: 0x858, Stride: 0x1}, + unicode.Range16{Lo: 0x8a0, Hi: 0x8b4, Stride: 0x1}, + unicode.Range16{Lo: 0x8b6, Hi: 0x8bd, Stride: 0x1}, + unicode.Range16{Lo: 0x904, Hi: 0x939, Stride: 0x1}, + unicode.Range16{Lo: 0x93d, Hi: 0x93d, Stride: 0x1}, + unicode.Range16{Lo: 0x950, Hi: 0x950, Stride: 0x1}, + unicode.Range16{Lo: 0x958, Hi: 0x961, Stride: 0x1}, + unicode.Range16{Lo: 0x971, Hi: 0x971, Stride: 0x1}, + unicode.Range16{Lo: 0x972, Hi: 0x980, Stride: 0x1}, + unicode.Range16{Lo: 0x985, Hi: 0x98c, Stride: 0x1}, + unicode.Range16{Lo: 0x98f, Hi: 0x990, Stride: 0x1}, + unicode.Range16{Lo: 0x993, Hi: 0x9a8, Stride: 0x1}, + unicode.Range16{Lo: 0x9aa, Hi: 0x9b0, Stride: 0x1}, + unicode.Range16{Lo: 0x9b2, Hi: 0x9b2, Stride: 0x1}, + unicode.Range16{Lo: 0x9b6, Hi: 0x9b9, Stride: 0x1}, + unicode.Range16{Lo: 0x9bd, Hi: 0x9bd, Stride: 0x1}, + unicode.Range16{Lo: 0x9ce, Hi: 0x9ce, Stride: 0x1}, + unicode.Range16{Lo: 0x9dc, Hi: 0x9dd, Stride: 0x1}, + unicode.Range16{Lo: 0x9df, Hi: 0x9e1, Stride: 0x1}, + unicode.Range16{Lo: 0x9f0, Hi: 0x9f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa05, Hi: 0xa0a, Stride: 0x1}, + unicode.Range16{Lo: 0xa0f, Hi: 0xa10, Stride: 0x1}, + unicode.Range16{Lo: 0xa13, Hi: 0xa28, Stride: 0x1}, + unicode.Range16{Lo: 0xa2a, Hi: 0xa30, Stride: 0x1}, + unicode.Range16{Lo: 0xa32, Hi: 0xa33, Stride: 0x1}, + unicode.Range16{Lo: 0xa35, Hi: 0xa36, Stride: 0x1}, + unicode.Range16{Lo: 0xa38, Hi: 0xa39, Stride: 0x1}, + unicode.Range16{Lo: 0xa59, Hi: 0xa5c, Stride: 0x1}, + unicode.Range16{Lo: 0xa5e, Hi: 0xa5e, Stride: 0x1}, + unicode.Range16{Lo: 0xa72, Hi: 0xa74, Stride: 0x1}, + unicode.Range16{Lo: 0xa85, Hi: 0xa8d, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f, Hi: 0xa91, Stride: 0x1}, + unicode.Range16{Lo: 0xa93, Hi: 0xaa8, Stride: 0x1}, + unicode.Range16{Lo: 0xaaa, Hi: 0xab0, Stride: 0x1}, + unicode.Range16{Lo: 0xab2, Hi: 0xab3, Stride: 0x1}, + unicode.Range16{Lo: 0xab5, Hi: 0xab9, Stride: 0x1}, + unicode.Range16{Lo: 0xabd, Hi: 0xabd, Stride: 0x1}, + unicode.Range16{Lo: 0xad0, Hi: 0xad0, Stride: 0x1}, + unicode.Range16{Lo: 0xae0, Hi: 0xae1, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9, Hi: 0xaf9, Stride: 0x1}, + unicode.Range16{Lo: 0xb05, Hi: 0xb0c, Stride: 0x1}, + unicode.Range16{Lo: 0xb0f, Hi: 0xb10, Stride: 0x1}, + unicode.Range16{Lo: 0xb13, Hi: 0xb28, Stride: 0x1}, + unicode.Range16{Lo: 0xb2a, Hi: 0xb30, Stride: 0x1}, + unicode.Range16{Lo: 0xb32, Hi: 0xb33, Stride: 0x1}, + unicode.Range16{Lo: 0xb35, Hi: 0xb39, Stride: 0x1}, + unicode.Range16{Lo: 0xb3d, Hi: 0xb3d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5c, Hi: 0xb5d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f, Hi: 0xb61, Stride: 0x1}, + unicode.Range16{Lo: 0xb71, Hi: 0xb71, Stride: 0x1}, + unicode.Range16{Lo: 0xb83, Hi: 0xb83, Stride: 0x1}, + unicode.Range16{Lo: 0xb85, Hi: 0xb8a, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e, Hi: 0xb90, Stride: 0x1}, + unicode.Range16{Lo: 0xb92, Hi: 0xb95, Stride: 0x1}, + unicode.Range16{Lo: 0xb99, Hi: 0xb9a, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c, Hi: 0xb9c, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e, Hi: 0xb9f, Stride: 0x1}, + unicode.Range16{Lo: 0xba3, Hi: 0xba4, Stride: 0x1}, + unicode.Range16{Lo: 0xba8, Hi: 0xbaa, Stride: 0x1}, + unicode.Range16{Lo: 0xbae, Hi: 0xbb9, Stride: 0x1}, + unicode.Range16{Lo: 0xbd0, Hi: 0xbd0, Stride: 0x1}, + unicode.Range16{Lo: 0xc05, Hi: 0xc0c, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e, Hi: 0xc10, Stride: 0x1}, + unicode.Range16{Lo: 0xc12, Hi: 0xc28, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a, Hi: 0xc39, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d, Hi: 0xc3d, Stride: 0x1}, + unicode.Range16{Lo: 0xc58, Hi: 0xc5a, Stride: 0x1}, + unicode.Range16{Lo: 0xc60, Hi: 0xc61, Stride: 0x1}, + unicode.Range16{Lo: 0xc80, Hi: 0xc80, Stride: 0x1}, + unicode.Range16{Lo: 0xc85, Hi: 0xc8c, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e, Hi: 0xc90, Stride: 0x1}, + unicode.Range16{Lo: 0xc92, Hi: 0xca8, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa, Hi: 0xcb3, Stride: 0x1}, + unicode.Range16{Lo: 0xcb5, Hi: 0xcb9, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd, Hi: 0xcbd, Stride: 0x1}, + unicode.Range16{Lo: 0xcde, Hi: 0xcde, Stride: 0x1}, + unicode.Range16{Lo: 0xce0, Hi: 0xce1, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1, Hi: 0xcf2, Stride: 0x1}, + unicode.Range16{Lo: 0xd05, Hi: 0xd0c, Stride: 0x1}, + unicode.Range16{Lo: 0xd0e, Hi: 0xd10, Stride: 0x1}, + unicode.Range16{Lo: 0xd12, Hi: 0xd3a, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d, Hi: 0xd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1}, + unicode.Range16{Lo: 0xd54, Hi: 0xd56, Stride: 0x1}, + unicode.Range16{Lo: 0xd5f, Hi: 0xd61, Stride: 0x1}, + unicode.Range16{Lo: 0xd7a, Hi: 0xd7f, Stride: 0x1}, + unicode.Range16{Lo: 0xd85, Hi: 0xd96, Stride: 0x1}, + unicode.Range16{Lo: 0xd9a, Hi: 0xdb1, Stride: 0x1}, + unicode.Range16{Lo: 0xdb3, Hi: 0xdbb, Stride: 0x1}, + unicode.Range16{Lo: 0xdbd, Hi: 0xdbd, Stride: 0x1}, + unicode.Range16{Lo: 0xdc0, Hi: 0xdc6, Stride: 0x1}, + unicode.Range16{Lo: 0xf00, Hi: 0xf00, Stride: 0x1}, + unicode.Range16{Lo: 0xf40, Hi: 0xf47, Stride: 0x1}, + unicode.Range16{Lo: 0xf49, Hi: 0xf6c, Stride: 0x1}, + unicode.Range16{Lo: 0xf88, Hi: 0xf8c, Stride: 0x1}, + unicode.Range16{Lo: 0x10a0, Hi: 0x10c5, Stride: 0x1}, + unicode.Range16{Lo: 0x10c7, Hi: 0x10c7, Stride: 0x1}, + unicode.Range16{Lo: 0x10cd, Hi: 0x10cd, Stride: 0x1}, + unicode.Range16{Lo: 0x10d0, Hi: 0x10fa, Stride: 0x1}, + unicode.Range16{Lo: 0x10fc, Hi: 0x10fc, Stride: 0x1}, + unicode.Range16{Lo: 0x10fd, Hi: 0x1248, Stride: 0x1}, + unicode.Range16{Lo: 0x124a, Hi: 0x124d, Stride: 0x1}, + unicode.Range16{Lo: 0x1250, Hi: 0x1256, Stride: 0x1}, + unicode.Range16{Lo: 0x1258, Hi: 0x1258, Stride: 0x1}, + unicode.Range16{Lo: 0x125a, Hi: 0x125d, Stride: 0x1}, + unicode.Range16{Lo: 0x1260, Hi: 0x1288, Stride: 0x1}, + unicode.Range16{Lo: 0x128a, Hi: 0x128d, Stride: 0x1}, + unicode.Range16{Lo: 0x1290, Hi: 0x12b0, Stride: 0x1}, + unicode.Range16{Lo: 0x12b2, Hi: 0x12b5, Stride: 0x1}, + unicode.Range16{Lo: 0x12b8, Hi: 0x12be, Stride: 0x1}, + unicode.Range16{Lo: 0x12c0, Hi: 0x12c0, Stride: 0x1}, + unicode.Range16{Lo: 0x12c2, Hi: 0x12c5, Stride: 0x1}, + unicode.Range16{Lo: 0x12c8, Hi: 0x12d6, Stride: 0x1}, + unicode.Range16{Lo: 0x12d8, Hi: 0x1310, Stride: 0x1}, + unicode.Range16{Lo: 0x1312, Hi: 0x1315, Stride: 0x1}, + unicode.Range16{Lo: 0x1318, Hi: 0x135a, Stride: 0x1}, + unicode.Range16{Lo: 0x1380, Hi: 0x138f, Stride: 0x1}, + unicode.Range16{Lo: 0x13a0, Hi: 0x13f5, Stride: 0x1}, + unicode.Range16{Lo: 0x13f8, Hi: 0x13fd, Stride: 0x1}, + unicode.Range16{Lo: 0x1401, Hi: 0x166c, Stride: 0x1}, + unicode.Range16{Lo: 0x166f, Hi: 0x167f, Stride: 0x1}, + unicode.Range16{Lo: 0x1681, Hi: 0x169a, Stride: 0x1}, + unicode.Range16{Lo: 0x16a0, Hi: 0x16ea, Stride: 0x1}, + unicode.Range16{Lo: 0x16ee, Hi: 0x16f0, Stride: 0x1}, + unicode.Range16{Lo: 0x16f1, Hi: 0x16f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1700, Hi: 0x170c, Stride: 0x1}, + unicode.Range16{Lo: 0x170e, Hi: 0x1711, Stride: 0x1}, + unicode.Range16{Lo: 0x1720, Hi: 0x1731, Stride: 0x1}, + unicode.Range16{Lo: 0x1740, Hi: 0x1751, Stride: 0x1}, + unicode.Range16{Lo: 0x1760, Hi: 0x176c, Stride: 0x1}, + unicode.Range16{Lo: 0x176e, Hi: 0x1770, Stride: 0x1}, + unicode.Range16{Lo: 0x1820, Hi: 0x1842, Stride: 0x1}, + unicode.Range16{Lo: 0x1843, Hi: 0x1843, Stride: 0x1}, + unicode.Range16{Lo: 0x1844, Hi: 0x1877, Stride: 0x1}, + unicode.Range16{Lo: 0x1880, Hi: 0x1884, Stride: 0x1}, + unicode.Range16{Lo: 0x1887, Hi: 0x18a8, Stride: 0x1}, + unicode.Range16{Lo: 0x18aa, Hi: 0x18aa, Stride: 0x1}, + unicode.Range16{Lo: 0x18b0, Hi: 0x18f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1900, Hi: 0x191e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a00, Hi: 0x1a16, Stride: 0x1}, + unicode.Range16{Lo: 0x1b05, Hi: 0x1b33, Stride: 0x1}, + unicode.Range16{Lo: 0x1b45, Hi: 0x1b4b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b83, Hi: 0x1ba0, Stride: 0x1}, + unicode.Range16{Lo: 0x1bae, Hi: 0x1baf, Stride: 0x1}, + unicode.Range16{Lo: 0x1bba, Hi: 0x1be5, Stride: 0x1}, + unicode.Range16{Lo: 0x1c00, Hi: 0x1c23, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 0x1}, + unicode.Range16{Lo: 0x1c5a, Hi: 0x1c77, Stride: 0x1}, + unicode.Range16{Lo: 0x1c78, Hi: 0x1c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1c80, Hi: 0x1c88, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce9, Hi: 0x1cec, Stride: 0x1}, + unicode.Range16{Lo: 0x1cee, Hi: 0x1cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 0x1}, + unicode.Range16{Lo: 0x1d00, Hi: 0x1d2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1d2c, Hi: 0x1d6a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d6b, Hi: 0x1d77, Stride: 0x1}, + unicode.Range16{Lo: 0x1d78, Hi: 0x1d78, Stride: 0x1}, + unicode.Range16{Lo: 0x1d79, Hi: 0x1d9a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d9b, Hi: 0x1dbf, Stride: 0x1}, + unicode.Range16{Lo: 0x1e00, Hi: 0x1f15, Stride: 0x1}, + unicode.Range16{Lo: 0x1f18, Hi: 0x1f1d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f20, Hi: 0x1f45, Stride: 0x1}, + unicode.Range16{Lo: 0x1f48, Hi: 0x1f4d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f50, Hi: 0x1f57, Stride: 0x1}, + unicode.Range16{Lo: 0x1f59, Hi: 0x1f59, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f80, Hi: 0x1fb4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe0, Hi: 0x1fec, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 0x1}, + unicode.Range16{Lo: 0x2071, Hi: 0x2071, Stride: 0x1}, + unicode.Range16{Lo: 0x207f, Hi: 0x207f, Stride: 0x1}, + unicode.Range16{Lo: 0x2090, Hi: 0x209c, Stride: 0x1}, + unicode.Range16{Lo: 0x2102, Hi: 0x2102, Stride: 0x1}, + unicode.Range16{Lo: 0x2107, Hi: 0x2107, Stride: 0x1}, + unicode.Range16{Lo: 0x210a, Hi: 0x2113, Stride: 0x1}, + unicode.Range16{Lo: 0x2115, Hi: 0x2115, Stride: 0x1}, + unicode.Range16{Lo: 0x2119, Hi: 0x211d, Stride: 0x1}, + unicode.Range16{Lo: 0x2124, Hi: 0x2124, Stride: 0x1}, + unicode.Range16{Lo: 0x2126, Hi: 0x2126, Stride: 0x1}, + unicode.Range16{Lo: 0x2128, Hi: 0x2128, Stride: 0x1}, + unicode.Range16{Lo: 0x212a, Hi: 0x212d, Stride: 0x1}, + unicode.Range16{Lo: 0x212f, Hi: 0x2134, Stride: 0x1}, + unicode.Range16{Lo: 0x2135, Hi: 0x2138, Stride: 0x1}, + unicode.Range16{Lo: 0x2139, Hi: 0x2139, Stride: 0x1}, + unicode.Range16{Lo: 0x213c, Hi: 0x213f, Stride: 0x1}, + unicode.Range16{Lo: 0x2145, Hi: 0x2149, Stride: 0x1}, + unicode.Range16{Lo: 0x214e, Hi: 0x214e, Stride: 0x1}, + unicode.Range16{Lo: 0x2160, Hi: 0x2182, Stride: 0x1}, + unicode.Range16{Lo: 0x2183, Hi: 0x2184, Stride: 0x1}, + unicode.Range16{Lo: 0x2185, Hi: 0x2188, Stride: 0x1}, + unicode.Range16{Lo: 0x24b6, Hi: 0x24e9, Stride: 0x1}, + unicode.Range16{Lo: 0x2c00, Hi: 0x2c2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c30, Hi: 0x2c5e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c60, Hi: 0x2c7b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7c, Hi: 0x2c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7e, Hi: 0x2ce4, Stride: 0x1}, + unicode.Range16{Lo: 0x2ceb, Hi: 0x2cee, Stride: 0x1}, + unicode.Range16{Lo: 0x2cf2, Hi: 0x2cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x2d00, Hi: 0x2d25, Stride: 0x1}, + unicode.Range16{Lo: 0x2d27, Hi: 0x2d27, Stride: 0x1}, + unicode.Range16{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 0x1}, + unicode.Range16{Lo: 0x2d30, Hi: 0x2d67, Stride: 0x1}, + unicode.Range16{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 0x1}, + unicode.Range16{Lo: 0x2d80, Hi: 0x2d96, Stride: 0x1}, + unicode.Range16{Lo: 0x2da0, Hi: 0x2da6, Stride: 0x1}, + unicode.Range16{Lo: 0x2da8, Hi: 0x2dae, Stride: 0x1}, + unicode.Range16{Lo: 0x2db0, Hi: 0x2db6, Stride: 0x1}, + unicode.Range16{Lo: 0x2db8, Hi: 0x2dbe, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc8, Hi: 0x2dce, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd8, Hi: 0x2dde, Stride: 0x1}, + unicode.Range16{Lo: 0x2e2f, Hi: 0x2e2f, Stride: 0x1}, + unicode.Range16{Lo: 0x3005, Hi: 0x3005, Stride: 0x1}, + unicode.Range16{Lo: 0x303b, Hi: 0x303b, Stride: 0x1}, + unicode.Range16{Lo: 0x303c, Hi: 0x303c, Stride: 0x1}, + unicode.Range16{Lo: 0x3105, Hi: 0x312d, Stride: 0x1}, + unicode.Range16{Lo: 0x3131, Hi: 0x318e, Stride: 0x1}, + unicode.Range16{Lo: 0x31a0, Hi: 0x31ba, Stride: 0x1}, + unicode.Range16{Lo: 0xa000, Hi: 0xa014, Stride: 0x1}, + unicode.Range16{Lo: 0xa015, Hi: 0xa015, Stride: 0x1}, + unicode.Range16{Lo: 0xa016, Hi: 0xa48c, Stride: 0x1}, + unicode.Range16{Lo: 0xa4d0, Hi: 0xa4f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa4f8, Hi: 0xa4fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa500, Hi: 0xa60b, Stride: 0x1}, + unicode.Range16{Lo: 0xa60c, Hi: 0xa60c, Stride: 0x1}, + unicode.Range16{Lo: 0xa610, Hi: 0xa61f, Stride: 0x1}, + unicode.Range16{Lo: 0xa62a, Hi: 0xa62b, Stride: 0x1}, + unicode.Range16{Lo: 0xa640, Hi: 0xa66d, Stride: 0x1}, + unicode.Range16{Lo: 0xa66e, Hi: 0xa66e, Stride: 0x1}, + unicode.Range16{Lo: 0xa67f, Hi: 0xa67f, Stride: 0x1}, + unicode.Range16{Lo: 0xa680, Hi: 0xa69b, Stride: 0x1}, + unicode.Range16{Lo: 0xa69c, Hi: 0xa69d, Stride: 0x1}, + unicode.Range16{Lo: 0xa6a0, Hi: 0xa6e5, Stride: 0x1}, + unicode.Range16{Lo: 0xa6e6, Hi: 0xa6ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa717, Hi: 0xa71f, Stride: 0x1}, + unicode.Range16{Lo: 0xa722, Hi: 0xa76f, Stride: 0x1}, + unicode.Range16{Lo: 0xa770, Hi: 0xa770, Stride: 0x1}, + unicode.Range16{Lo: 0xa771, Hi: 0xa787, Stride: 0x1}, + unicode.Range16{Lo: 0xa788, Hi: 0xa788, Stride: 0x1}, + unicode.Range16{Lo: 0xa78b, Hi: 0xa78e, Stride: 0x1}, + unicode.Range16{Lo: 0xa78f, Hi: 0xa78f, Stride: 0x1}, + unicode.Range16{Lo: 0xa790, Hi: 0xa7ae, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b0, Hi: 0xa7b7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f7, Hi: 0xa7f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f8, Hi: 0xa7f9, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fa, Hi: 0xa7fa, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fb, Hi: 0xa801, Stride: 0x1}, + unicode.Range16{Lo: 0xa803, Hi: 0xa805, Stride: 0x1}, + unicode.Range16{Lo: 0xa807, Hi: 0xa80a, Stride: 0x1}, + unicode.Range16{Lo: 0xa80c, Hi: 0xa822, Stride: 0x1}, + unicode.Range16{Lo: 0xa840, Hi: 0xa873, Stride: 0x1}, + unicode.Range16{Lo: 0xa882, Hi: 0xa8b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f2, Hi: 0xa8f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fd, Hi: 0xa8fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa90a, Hi: 0xa925, Stride: 0x1}, + unicode.Range16{Lo: 0xa930, Hi: 0xa946, Stride: 0x1}, + unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1}, + unicode.Range16{Lo: 0xa984, Hi: 0xa9b2, Stride: 0x1}, + unicode.Range16{Lo: 0xa9cf, Hi: 0xa9cf, Stride: 0x1}, + unicode.Range16{Lo: 0xaa00, Hi: 0xaa28, Stride: 0x1}, + unicode.Range16{Lo: 0xaa40, Hi: 0xaa42, Stride: 0x1}, + unicode.Range16{Lo: 0xaa44, Hi: 0xaa4b, Stride: 0x1}, + unicode.Range16{Lo: 0xaae0, Hi: 0xaaea, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf2, Hi: 0xaaf2, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf3, Hi: 0xaaf4, Stride: 0x1}, + unicode.Range16{Lo: 0xab01, Hi: 0xab06, Stride: 0x1}, + unicode.Range16{Lo: 0xab09, Hi: 0xab0e, Stride: 0x1}, + unicode.Range16{Lo: 0xab11, Hi: 0xab16, Stride: 0x1}, + unicode.Range16{Lo: 0xab20, Hi: 0xab26, Stride: 0x1}, + unicode.Range16{Lo: 0xab28, Hi: 0xab2e, Stride: 0x1}, + unicode.Range16{Lo: 0xab30, Hi: 0xab5a, Stride: 0x1}, + unicode.Range16{Lo: 0xab5c, Hi: 0xab5f, Stride: 0x1}, + unicode.Range16{Lo: 0xab60, Hi: 0xab65, Stride: 0x1}, + unicode.Range16{Lo: 0xab70, Hi: 0xabbf, Stride: 0x1}, + unicode.Range16{Lo: 0xabc0, Hi: 0xabe2, Stride: 0x1}, + unicode.Range16{Lo: 0xac00, Hi: 0xd7a3, Stride: 0x1}, + unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1}, + unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1}, + unicode.Range16{Lo: 0xfb00, Hi: 0xfb06, Stride: 0x1}, + unicode.Range16{Lo: 0xfb13, Hi: 0xfb17, Stride: 0x1}, + unicode.Range16{Lo: 0xfb50, Hi: 0xfbb1, Stride: 0x1}, + unicode.Range16{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xfd50, Hi: 0xfd8f, Stride: 0x1}, + unicode.Range16{Lo: 0xfd92, Hi: 0xfdc7, Stride: 0x1}, + unicode.Range16{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 0x1}, + unicode.Range16{Lo: 0xfe70, Hi: 0xfe74, Stride: 0x1}, + unicode.Range16{Lo: 0xfe76, Hi: 0xfefc, Stride: 0x1}, + unicode.Range16{Lo: 0xff21, Hi: 0xff3a, Stride: 0x1}, + unicode.Range16{Lo: 0xff41, Hi: 0xff5a, Stride: 0x1}, + unicode.Range16{Lo: 0xffa0, Hi: 0xffbe, Stride: 0x1}, + unicode.Range16{Lo: 0xffc2, Hi: 0xffc7, Stride: 0x1}, + unicode.Range16{Lo: 0xffca, Hi: 0xffcf, Stride: 0x1}, + unicode.Range16{Lo: 0xffd2, Hi: 0xffd7, Stride: 0x1}, + unicode.Range16{Lo: 0xffda, Hi: 0xffdc, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10000, Hi: 0x1000b, Stride: 0x1}, + unicode.Range32{Lo: 0x1000d, Hi: 0x10026, Stride: 0x1}, + unicode.Range32{Lo: 0x10028, Hi: 0x1003a, Stride: 0x1}, + unicode.Range32{Lo: 0x1003c, Hi: 0x1003d, Stride: 0x1}, + unicode.Range32{Lo: 0x1003f, Hi: 0x1004d, Stride: 0x1}, + unicode.Range32{Lo: 0x10050, Hi: 0x1005d, Stride: 0x1}, + unicode.Range32{Lo: 0x10080, Hi: 0x100fa, Stride: 0x1}, + unicode.Range32{Lo: 0x10140, Hi: 0x10174, Stride: 0x1}, + unicode.Range32{Lo: 0x10280, Hi: 0x1029c, Stride: 0x1}, + unicode.Range32{Lo: 0x102a0, Hi: 0x102d0, Stride: 0x1}, + unicode.Range32{Lo: 0x10300, Hi: 0x1031f, Stride: 0x1}, + unicode.Range32{Lo: 0x10330, Hi: 0x10340, Stride: 0x1}, + unicode.Range32{Lo: 0x10341, Hi: 0x10341, Stride: 0x1}, + unicode.Range32{Lo: 0x10342, Hi: 0x10349, Stride: 0x1}, + unicode.Range32{Lo: 0x1034a, Hi: 0x1034a, Stride: 0x1}, + unicode.Range32{Lo: 0x10350, Hi: 0x10375, Stride: 0x1}, + unicode.Range32{Lo: 0x10380, Hi: 0x1039d, Stride: 0x1}, + unicode.Range32{Lo: 0x103a0, Hi: 0x103c3, Stride: 0x1}, + unicode.Range32{Lo: 0x103c8, Hi: 0x103cf, Stride: 0x1}, + unicode.Range32{Lo: 0x103d1, Hi: 0x103d5, Stride: 0x1}, + unicode.Range32{Lo: 0x10400, Hi: 0x1044f, Stride: 0x1}, + unicode.Range32{Lo: 0x10450, Hi: 0x1049d, Stride: 0x1}, + unicode.Range32{Lo: 0x104b0, Hi: 0x104d3, Stride: 0x1}, + unicode.Range32{Lo: 0x104d8, Hi: 0x104fb, Stride: 0x1}, + unicode.Range32{Lo: 0x10500, Hi: 0x10527, Stride: 0x1}, + unicode.Range32{Lo: 0x10530, Hi: 0x10563, Stride: 0x1}, + unicode.Range32{Lo: 0x10600, Hi: 0x10736, Stride: 0x1}, + unicode.Range32{Lo: 0x10740, Hi: 0x10755, Stride: 0x1}, + unicode.Range32{Lo: 0x10760, Hi: 0x10767, Stride: 0x1}, + unicode.Range32{Lo: 0x10800, Hi: 0x10805, Stride: 0x1}, + unicode.Range32{Lo: 0x10808, Hi: 0x10808, Stride: 0x1}, + unicode.Range32{Lo: 0x1080a, Hi: 0x10835, Stride: 0x1}, + unicode.Range32{Lo: 0x10837, Hi: 0x10838, Stride: 0x1}, + unicode.Range32{Lo: 0x1083c, Hi: 0x1083c, Stride: 0x1}, + unicode.Range32{Lo: 0x1083f, Hi: 0x10855, Stride: 0x1}, + unicode.Range32{Lo: 0x10860, Hi: 0x10876, Stride: 0x1}, + unicode.Range32{Lo: 0x10880, Hi: 0x1089e, Stride: 0x1}, + unicode.Range32{Lo: 0x108e0, Hi: 0x108f2, Stride: 0x1}, + unicode.Range32{Lo: 0x108f4, Hi: 0x108f5, Stride: 0x1}, + unicode.Range32{Lo: 0x10900, Hi: 0x10915, Stride: 0x1}, + unicode.Range32{Lo: 0x10920, Hi: 0x10939, Stride: 0x1}, + unicode.Range32{Lo: 0x10980, Hi: 0x109b7, Stride: 0x1}, + unicode.Range32{Lo: 0x109be, Hi: 0x109bf, Stride: 0x1}, + unicode.Range32{Lo: 0x10a00, Hi: 0x10a00, Stride: 0x1}, + unicode.Range32{Lo: 0x10a10, Hi: 0x10a13, Stride: 0x1}, + unicode.Range32{Lo: 0x10a15, Hi: 0x10a17, Stride: 0x1}, + unicode.Range32{Lo: 0x10a19, Hi: 0x10a33, Stride: 0x1}, + unicode.Range32{Lo: 0x10a60, Hi: 0x10a7c, Stride: 0x1}, + unicode.Range32{Lo: 0x10a80, Hi: 0x10a9c, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac9, Hi: 0x10ae4, Stride: 0x1}, + unicode.Range32{Lo: 0x10b00, Hi: 0x10b35, Stride: 0x1}, + unicode.Range32{Lo: 0x10b40, Hi: 0x10b55, Stride: 0x1}, + unicode.Range32{Lo: 0x10b60, Hi: 0x10b72, Stride: 0x1}, + unicode.Range32{Lo: 0x10b80, Hi: 0x10b91, Stride: 0x1}, + unicode.Range32{Lo: 0x10c00, Hi: 0x10c48, Stride: 0x1}, + unicode.Range32{Lo: 0x10c80, Hi: 0x10cb2, Stride: 0x1}, + unicode.Range32{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 0x1}, + unicode.Range32{Lo: 0x11003, Hi: 0x11037, Stride: 0x1}, + unicode.Range32{Lo: 0x11083, Hi: 0x110af, Stride: 0x1}, + unicode.Range32{Lo: 0x110d0, Hi: 0x110e8, Stride: 0x1}, + unicode.Range32{Lo: 0x11103, Hi: 0x11126, Stride: 0x1}, + unicode.Range32{Lo: 0x11150, Hi: 0x11172, Stride: 0x1}, + unicode.Range32{Lo: 0x11176, Hi: 0x11176, Stride: 0x1}, + unicode.Range32{Lo: 0x11183, Hi: 0x111b2, Stride: 0x1}, + unicode.Range32{Lo: 0x111c1, Hi: 0x111c4, Stride: 0x1}, + unicode.Range32{Lo: 0x111da, Hi: 0x111da, Stride: 0x1}, + unicode.Range32{Lo: 0x111dc, Hi: 0x111dc, Stride: 0x1}, + unicode.Range32{Lo: 0x11200, Hi: 0x11211, Stride: 0x1}, + unicode.Range32{Lo: 0x11213, Hi: 0x1122b, Stride: 0x1}, + unicode.Range32{Lo: 0x11280, Hi: 0x11286, Stride: 0x1}, + unicode.Range32{Lo: 0x11288, Hi: 0x11288, Stride: 0x1}, + unicode.Range32{Lo: 0x1128a, Hi: 0x1128d, Stride: 0x1}, + unicode.Range32{Lo: 0x1128f, Hi: 0x1129d, Stride: 0x1}, + unicode.Range32{Lo: 0x1129f, Hi: 0x112a8, Stride: 0x1}, + unicode.Range32{Lo: 0x112b0, Hi: 0x112de, Stride: 0x1}, + unicode.Range32{Lo: 0x11305, Hi: 0x1130c, Stride: 0x1}, + unicode.Range32{Lo: 0x1130f, Hi: 0x11310, Stride: 0x1}, + unicode.Range32{Lo: 0x11313, Hi: 0x11328, Stride: 0x1}, + unicode.Range32{Lo: 0x1132a, Hi: 0x11330, Stride: 0x1}, + unicode.Range32{Lo: 0x11332, Hi: 0x11333, Stride: 0x1}, + unicode.Range32{Lo: 0x11335, Hi: 0x11339, Stride: 0x1}, + unicode.Range32{Lo: 0x1133d, Hi: 0x1133d, Stride: 0x1}, + unicode.Range32{Lo: 0x11350, Hi: 0x11350, Stride: 0x1}, + unicode.Range32{Lo: 0x1135d, Hi: 0x11361, Stride: 0x1}, + unicode.Range32{Lo: 0x11400, Hi: 0x11434, Stride: 0x1}, + unicode.Range32{Lo: 0x11447, Hi: 0x1144a, Stride: 0x1}, + unicode.Range32{Lo: 0x11480, Hi: 0x114af, Stride: 0x1}, + unicode.Range32{Lo: 0x114c4, Hi: 0x114c5, Stride: 0x1}, + unicode.Range32{Lo: 0x114c7, Hi: 0x114c7, Stride: 0x1}, + unicode.Range32{Lo: 0x11580, Hi: 0x115ae, Stride: 0x1}, + unicode.Range32{Lo: 0x115d8, Hi: 0x115db, Stride: 0x1}, + unicode.Range32{Lo: 0x11600, Hi: 0x1162f, Stride: 0x1}, + unicode.Range32{Lo: 0x11644, Hi: 0x11644, Stride: 0x1}, + unicode.Range32{Lo: 0x11680, Hi: 0x116aa, Stride: 0x1}, + unicode.Range32{Lo: 0x118a0, Hi: 0x118df, Stride: 0x1}, + unicode.Range32{Lo: 0x118ff, Hi: 0x118ff, Stride: 0x1}, + unicode.Range32{Lo: 0x11ac0, Hi: 0x11af8, Stride: 0x1}, + unicode.Range32{Lo: 0x11c00, Hi: 0x11c08, Stride: 0x1}, + unicode.Range32{Lo: 0x11c0a, Hi: 0x11c2e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c40, Hi: 0x11c40, Stride: 0x1}, + unicode.Range32{Lo: 0x11c72, Hi: 0x11c8f, Stride: 0x1}, + unicode.Range32{Lo: 0x12000, Hi: 0x12399, Stride: 0x1}, + unicode.Range32{Lo: 0x12400, Hi: 0x1246e, Stride: 0x1}, + unicode.Range32{Lo: 0x12480, Hi: 0x12543, Stride: 0x1}, + unicode.Range32{Lo: 0x13000, Hi: 0x1342e, Stride: 0x1}, + unicode.Range32{Lo: 0x14400, Hi: 0x14646, Stride: 0x1}, + unicode.Range32{Lo: 0x16800, Hi: 0x16a38, Stride: 0x1}, + unicode.Range32{Lo: 0x16a40, Hi: 0x16a5e, Stride: 0x1}, + unicode.Range32{Lo: 0x16ad0, Hi: 0x16aed, Stride: 0x1}, + unicode.Range32{Lo: 0x16b00, Hi: 0x16b2f, Stride: 0x1}, + unicode.Range32{Lo: 0x16b40, Hi: 0x16b43, Stride: 0x1}, + unicode.Range32{Lo: 0x16b63, Hi: 0x16b77, Stride: 0x1}, + unicode.Range32{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 0x1}, + unicode.Range32{Lo: 0x16f00, Hi: 0x16f44, Stride: 0x1}, + unicode.Range32{Lo: 0x16f50, Hi: 0x16f50, Stride: 0x1}, + unicode.Range32{Lo: 0x16f93, Hi: 0x16f9f, Stride: 0x1}, + unicode.Range32{Lo: 0x16fe0, Hi: 0x16fe0, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 0x1}, + unicode.Range32{Lo: 0x1d400, Hi: 0x1d454, Stride: 0x1}, + unicode.Range32{Lo: 0x1d456, Hi: 0x1d49c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4ae, Hi: 0x1d4b9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4c5, Hi: 0x1d505, Stride: 0x1}, + unicode.Range32{Lo: 0x1d507, Hi: 0x1d50a, Stride: 0x1}, + unicode.Range32{Lo: 0x1d50d, Hi: 0x1d514, Stride: 0x1}, + unicode.Range32{Lo: 0x1d516, Hi: 0x1d51c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d51e, Hi: 0x1d539, Stride: 0x1}, + unicode.Range32{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d540, Hi: 0x1d544, Stride: 0x1}, + unicode.Range32{Lo: 0x1d546, Hi: 0x1d546, Stride: 0x1}, + unicode.Range32{Lo: 0x1d54a, Hi: 0x1d550, Stride: 0x1}, + unicode.Range32{Lo: 0x1d552, Hi: 0x1d6a5, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6dc, Hi: 0x1d6fa, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 0x1}, + unicode.Range32{Lo: 0x1d716, Hi: 0x1d734, Stride: 0x1}, + unicode.Range32{Lo: 0x1d736, Hi: 0x1d74e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d750, Hi: 0x1d76e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d770, Hi: 0x1d788, Stride: 0x1}, + unicode.Range32{Lo: 0x1d78a, Hi: 0x1d7a8, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7c4, Hi: 0x1d7cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1e900, Hi: 0x1e943, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 0x1}, + unicode.Range32{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 0x1}, + unicode.Range32{Lo: 0x1f130, Hi: 0x1f149, Stride: 0x1}, + unicode.Range32{Lo: 0x1f150, Hi: 0x1f169, Stride: 0x1}, + unicode.Range32{Lo: 0x1f170, Hi: 0x1f189, Stride: 0x1}, + }, + LatinOffset: 7, +} + +var _WordCR = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordDouble_Quote = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x22, Hi: 0x22, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordE_Base = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x261d, Hi: 0x261d, Stride: 0x1}, + unicode.Range16{Lo: 0x26f9, Hi: 0x26f9, Stride: 0x1}, + unicode.Range16{Lo: 0x270a, Hi: 0x270d, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f385, Hi: 0x1f385, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3c3, Hi: 0x1f3c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3ca, Hi: 0x1f3cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1f442, Hi: 0x1f443, Stride: 0x1}, + unicode.Range32{Lo: 0x1f446, Hi: 0x1f450, Stride: 0x1}, + unicode.Range32{Lo: 0x1f46e, Hi: 0x1f46e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f470, Hi: 0x1f478, Stride: 0x1}, + unicode.Range32{Lo: 0x1f47c, Hi: 0x1f47c, Stride: 0x1}, + unicode.Range32{Lo: 0x1f481, Hi: 0x1f483, Stride: 0x1}, + unicode.Range32{Lo: 0x1f485, Hi: 0x1f487, Stride: 0x1}, + unicode.Range32{Lo: 0x1f4aa, Hi: 0x1f4aa, Stride: 0x1}, + unicode.Range32{Lo: 0x1f575, Hi: 0x1f575, Stride: 0x1}, + unicode.Range32{Lo: 0x1f57a, Hi: 0x1f57a, Stride: 0x1}, + unicode.Range32{Lo: 0x1f590, Hi: 0x1f590, Stride: 0x1}, + unicode.Range32{Lo: 0x1f595, Hi: 0x1f596, Stride: 0x1}, + unicode.Range32{Lo: 0x1f645, Hi: 0x1f647, Stride: 0x1}, + unicode.Range32{Lo: 0x1f64b, Hi: 0x1f64f, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6a3, Hi: 0x1f6a3, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6b4, Hi: 0x1f6b6, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6c0, Hi: 0x1f6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1f918, Hi: 0x1f91e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f926, Hi: 0x1f926, Stride: 0x1}, + unicode.Range32{Lo: 0x1f930, Hi: 0x1f930, Stride: 0x1}, + unicode.Range32{Lo: 0x1f933, Hi: 0x1f939, Stride: 0x1}, + unicode.Range32{Lo: 0x1f93c, Hi: 0x1f93e, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordE_Base_GAZ = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f466, Hi: 0x1f469, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordE_Modifier = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f3fb, Hi: 0x1f3ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordExtend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1}, + unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1}, + unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1}, + unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1}, + unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1}, + unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1}, + unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1}, + unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1}, + unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1}, + unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1}, + unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1}, + unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1}, + unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1}, + unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1}, + unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1}, + unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1}, + unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1}, + unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1}, + unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1}, + unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1}, + unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1}, + unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1}, + unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1}, + unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1}, + unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1}, + unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1}, + unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1}, + unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1}, + unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1}, + unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1}, + unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1}, + unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1}, + unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1}, + unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1}, + unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1}, + unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1}, + unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1}, + unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1}, + unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1}, + unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1}, + unicode.Range16{Lo: 0x9be, Hi: 0x9c0, Stride: 0x1}, + unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1}, + unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1}, + unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1}, + unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1}, + unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1}, + unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1}, + unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1}, + unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1}, + unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1}, + unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1}, + unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1}, + unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1}, + unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1}, + unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1}, + unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1}, + unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1}, + unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1}, + unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1}, + unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1}, + unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1}, + unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1}, + unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1}, + unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1}, + unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1}, + unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1}, + unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1}, + unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1}, + unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1}, + unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1}, + unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1}, + unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1}, + unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1}, + unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1}, + unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1}, + unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1}, + unicode.Range16{Lo: 0xbbe, Hi: 0xbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1}, + unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1}, + unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1}, + unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1}, + unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1}, + unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1}, + unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1}, + unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1}, + unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1}, + unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1}, + unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1}, + unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1}, + unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1}, + unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1}, + unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0, Hi: 0xcc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1}, + unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1}, + unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1}, + unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1}, + unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1}, + unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1}, + unicode.Range16{Lo: 0xd3e, Hi: 0xd40, Stride: 0x1}, + unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1}, + unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1}, + unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1}, + unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1}, + unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1}, + unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1}, + unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1}, + unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1}, + unicode.Range16{Lo: 0xdcf, Hi: 0xdd1, Stride: 0x1}, + unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1}, + unicode.Range16{Lo: 0xdd8, Hi: 0xddf, Stride: 0x1}, + unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1}, + unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1}, + unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1}, + unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1}, + unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1}, + unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1}, + unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1}, + unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1}, + unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1}, + unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1}, + unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1}, + unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1}, + unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1}, + unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1}, + unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1}, + unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1}, + unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1}, + unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1}, + unicode.Range16{Lo: 0x102b, Hi: 0x102c, Stride: 0x1}, + unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1}, + unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1}, + unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1}, + unicode.Range16{Lo: 0x1038, Hi: 0x1038, Stride: 0x1}, + unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1}, + unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1}, + unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1}, + unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1}, + unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1}, + unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1}, + unicode.Range16{Lo: 0x1062, Hi: 0x1064, Stride: 0x1}, + unicode.Range16{Lo: 0x1067, Hi: 0x106d, Stride: 0x1}, + unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1}, + unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1}, + unicode.Range16{Lo: 0x1083, Hi: 0x1084, Stride: 0x1}, + unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1}, + unicode.Range16{Lo: 0x1087, Hi: 0x108c, Stride: 0x1}, + unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1}, + unicode.Range16{Lo: 0x108f, Hi: 0x108f, Stride: 0x1}, + unicode.Range16{Lo: 0x109a, Hi: 0x109c, Stride: 0x1}, + unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1}, + unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1}, + unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1}, + unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1}, + unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1}, + unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1}, + unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1}, + unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1}, + unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1}, + unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1}, + unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1}, + unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1}, + unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1}, + unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1}, + unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1}, + unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1}, + unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1}, + unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1}, + unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1}, + unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1}, + unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1}, + unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1}, + unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1}, + unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1}, + unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1}, + unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1}, + unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1}, + unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1}, + unicode.Range16{Lo: 0x1a61, Hi: 0x1a61, Stride: 0x1}, + unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1}, + unicode.Range16{Lo: 0x1a63, Hi: 0x1a64, Stride: 0x1}, + unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1}, + unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1}, + unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1}, + unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1}, + unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1}, + unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1}, + unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1}, + unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1}, + unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1}, + unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1}, + unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1}, + unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1}, + unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1}, + unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1}, + unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1}, + unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1}, + unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1}, + unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1}, + unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1}, + unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1}, + unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1}, + unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1}, + unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1}, + unicode.Range16{Lo: 0x200c, Hi: 0x200c, Stride: 0x1}, + unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1}, + unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1}, + unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1}, + unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1}, + unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1}, + unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1}, + unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1}, + unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1}, + unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1}, + unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1}, + unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1}, + unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1}, + unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1}, + unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1}, + unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1}, + unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1}, + unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1}, + unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1}, + unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1}, + unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1}, + unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1}, + unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1}, + unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1}, + unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1}, + unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1}, + unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1}, + unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1}, + unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1}, + unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1}, + unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7b, Hi: 0xaa7b, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7d, Hi: 0xaa7d, Stride: 0x1}, + unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1}, + unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1}, + unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1}, + unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1}, + unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1}, + unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1}, + unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1}, + unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1}, + unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1}, + unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1}, + unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1}, + unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1}, + unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1}, + unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1}, + unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1}, + unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1}, + unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1}, + unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1}, + unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1}, + unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1}, + unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1}, + unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1}, + unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1}, + unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1}, + unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1}, + unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1}, + unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1}, + unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1}, + unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1}, + unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1}, + unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1}, + unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1}, + unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1}, + unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1}, + unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1}, + unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1}, + unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1}, + unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1}, + unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1}, + unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1}, + unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1}, + unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1}, + unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1}, + unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1}, + unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1}, + unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1}, + unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1}, + unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1}, + unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1}, + unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1}, + unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1}, + unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1}, + unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1}, + unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1}, + unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1}, + unicode.Range32{Lo: 0x1133e, Hi: 0x1133f, Stride: 0x1}, + unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1}, + unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1}, + unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1}, + unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1}, + unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1}, + unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1}, + unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1}, + unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1}, + unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1}, + unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1}, + unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1}, + unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1}, + unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1}, + unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1}, + unicode.Range32{Lo: 0x114b0, Hi: 0x114b2, Stride: 0x1}, + unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1}, + unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1}, + unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1}, + unicode.Range32{Lo: 0x114bb, Hi: 0x114be, Stride: 0x1}, + unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1}, + unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1}, + unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115af, Hi: 0x115b1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1}, + unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1}, + unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1}, + unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1}, + unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1}, + unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1}, + unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1}, + unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1}, + unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1}, + unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1}, + unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1}, + unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1}, + unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1}, + unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1}, + unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1}, + unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1}, + unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1}, + unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1}, + unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1}, + unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1}, + unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1}, + unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1}, + unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1}, + unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1}, + unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1}, + unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1}, + unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1}, + unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1}, + unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1}, + unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1}, + unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1}, + unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d165, Hi: 0x1d166, Stride: 0x1}, + unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16d, Hi: 0x1d172, Stride: 0x1}, + unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1}, + unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1}, + unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1}, + unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1}, + unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1}, + unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1}, + unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1}, + unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1}, + unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1}, + unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1}, + unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1}, + unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1}, + unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1}, + unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1}, + unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordExtendNumLet = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x5f, Hi: 0x5f, Stride: 0x1}, + unicode.Range16{Lo: 0x202f, Hi: 0x202f, Stride: 0x1}, + unicode.Range16{Lo: 0x203f, Hi: 0x2040, Stride: 0x1}, + unicode.Range16{Lo: 0x2054, Hi: 0x2054, Stride: 0x1}, + unicode.Range16{Lo: 0xfe33, Hi: 0xfe34, Stride: 0x1}, + unicode.Range16{Lo: 0xfe4d, Hi: 0xfe4f, Stride: 0x1}, + unicode.Range16{Lo: 0xff3f, Hi: 0xff3f, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordFormat = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1}, + unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1}, + unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1}, + unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1}, + unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1}, + unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1}, + unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1}, + unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1}, + unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1}, + unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1}, + unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1}, + unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1}, + unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1}, + unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordGlue_After_Zwj = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2764, Hi: 0x2764, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f48b, Hi: 0x1f48b, Stride: 0x1}, + unicode.Range32{Lo: 0x1f5e8, Hi: 0x1f5e8, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordHebrew_Letter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x5d0, Hi: 0x5ea, Stride: 0x1}, + unicode.Range16{Lo: 0x5f0, Hi: 0x5f2, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1f, Hi: 0xfb28, Stride: 0x1}, + unicode.Range16{Lo: 0xfb2a, Hi: 0xfb36, Stride: 0x1}, + unicode.Range16{Lo: 0xfb38, Hi: 0xfb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfb40, Hi: 0xfb41, Stride: 0x1}, + unicode.Range16{Lo: 0xfb43, Hi: 0xfb44, Stride: 0x1}, + unicode.Range16{Lo: 0xfb46, Hi: 0xfb4f, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordKatakana = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x3031, Hi: 0x3035, Stride: 0x1}, + unicode.Range16{Lo: 0x309b, Hi: 0x309c, Stride: 0x1}, + unicode.Range16{Lo: 0x30a0, Hi: 0x30a0, Stride: 0x1}, + unicode.Range16{Lo: 0x30a1, Hi: 0x30fa, Stride: 0x1}, + unicode.Range16{Lo: 0x30fc, Hi: 0x30fe, Stride: 0x1}, + unicode.Range16{Lo: 0x30ff, Hi: 0x30ff, Stride: 0x1}, + unicode.Range16{Lo: 0x31f0, Hi: 0x31ff, Stride: 0x1}, + unicode.Range16{Lo: 0x32d0, Hi: 0x32fe, Stride: 0x1}, + unicode.Range16{Lo: 0x3300, Hi: 0x3357, Stride: 0x1}, + unicode.Range16{Lo: 0xff66, Hi: 0xff6f, Stride: 0x1}, + unicode.Range16{Lo: 0xff70, Hi: 0xff70, Stride: 0x1}, + unicode.Range16{Lo: 0xff71, Hi: 0xff9d, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1b000, Hi: 0x1b000, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordLF = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordMidLetter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x3a, Hi: 0x3a, Stride: 0x1}, + unicode.Range16{Lo: 0xb7, Hi: 0xb7, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7, Hi: 0x2d7, Stride: 0x1}, + unicode.Range16{Lo: 0x387, Hi: 0x387, Stride: 0x1}, + unicode.Range16{Lo: 0x5f4, Hi: 0x5f4, Stride: 0x1}, + unicode.Range16{Lo: 0x2027, Hi: 0x2027, Stride: 0x1}, + unicode.Range16{Lo: 0xfe13, Hi: 0xfe13, Stride: 0x1}, + unicode.Range16{Lo: 0xfe55, Hi: 0xfe55, Stride: 0x1}, + unicode.Range16{Lo: 0xff1a, Hi: 0xff1a, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _WordMidNum = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2c, Hi: 0x2c, Stride: 0x1}, + unicode.Range16{Lo: 0x3b, Hi: 0x3b, Stride: 0x1}, + unicode.Range16{Lo: 0x37e, Hi: 0x37e, Stride: 0x1}, + unicode.Range16{Lo: 0x589, Hi: 0x589, Stride: 0x1}, + unicode.Range16{Lo: 0x60c, Hi: 0x60d, Stride: 0x1}, + unicode.Range16{Lo: 0x66c, Hi: 0x66c, Stride: 0x1}, + unicode.Range16{Lo: 0x7f8, Hi: 0x7f8, Stride: 0x1}, + unicode.Range16{Lo: 0x2044, Hi: 0x2044, Stride: 0x1}, + unicode.Range16{Lo: 0xfe10, Hi: 0xfe10, Stride: 0x1}, + unicode.Range16{Lo: 0xfe14, Hi: 0xfe14, Stride: 0x1}, + unicode.Range16{Lo: 0xfe50, Hi: 0xfe50, Stride: 0x1}, + unicode.Range16{Lo: 0xfe54, Hi: 0xfe54, Stride: 0x1}, + unicode.Range16{Lo: 0xff0c, Hi: 0xff0c, Stride: 0x1}, + unicode.Range16{Lo: 0xff1b, Hi: 0xff1b, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _WordMidNumLet = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2e, Hi: 0x2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2018, Hi: 0x2018, Stride: 0x1}, + unicode.Range16{Lo: 0x2019, Hi: 0x2019, Stride: 0x1}, + unicode.Range16{Lo: 0x2024, Hi: 0x2024, Stride: 0x1}, + unicode.Range16{Lo: 0xfe52, Hi: 0xfe52, Stride: 0x1}, + unicode.Range16{Lo: 0xff07, Hi: 0xff07, Stride: 0x1}, + unicode.Range16{Lo: 0xff0e, Hi: 0xff0e, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordNewline = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1}, + unicode.Range16{Lo: 0x85, Hi: 0x85, Stride: 0x1}, + unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1}, + unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _WordNumeric = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x30, Hi: 0x39, Stride: 0x1}, + unicode.Range16{Lo: 0x660, Hi: 0x669, Stride: 0x1}, + unicode.Range16{Lo: 0x66b, Hi: 0x66b, Stride: 0x1}, + unicode.Range16{Lo: 0x6f0, Hi: 0x6f9, Stride: 0x1}, + unicode.Range16{Lo: 0x7c0, Hi: 0x7c9, Stride: 0x1}, + unicode.Range16{Lo: 0x966, Hi: 0x96f, Stride: 0x1}, + unicode.Range16{Lo: 0x9e6, Hi: 0x9ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa66, Hi: 0xa6f, Stride: 0x1}, + unicode.Range16{Lo: 0xae6, Hi: 0xaef, Stride: 0x1}, + unicode.Range16{Lo: 0xb66, Hi: 0xb6f, Stride: 0x1}, + unicode.Range16{Lo: 0xbe6, Hi: 0xbef, Stride: 0x1}, + unicode.Range16{Lo: 0xc66, Hi: 0xc6f, Stride: 0x1}, + unicode.Range16{Lo: 0xce6, Hi: 0xcef, Stride: 0x1}, + unicode.Range16{Lo: 0xd66, Hi: 0xd6f, Stride: 0x1}, + unicode.Range16{Lo: 0xde6, Hi: 0xdef, Stride: 0x1}, + unicode.Range16{Lo: 0xe50, Hi: 0xe59, Stride: 0x1}, + unicode.Range16{Lo: 0xed0, Hi: 0xed9, Stride: 0x1}, + unicode.Range16{Lo: 0xf20, Hi: 0xf29, Stride: 0x1}, + unicode.Range16{Lo: 0x1040, Hi: 0x1049, Stride: 0x1}, + unicode.Range16{Lo: 0x1090, Hi: 0x1099, Stride: 0x1}, + unicode.Range16{Lo: 0x17e0, Hi: 0x17e9, Stride: 0x1}, + unicode.Range16{Lo: 0x1810, Hi: 0x1819, Stride: 0x1}, + unicode.Range16{Lo: 0x1946, Hi: 0x194f, Stride: 0x1}, + unicode.Range16{Lo: 0x19d0, Hi: 0x19d9, Stride: 0x1}, + unicode.Range16{Lo: 0x1a80, Hi: 0x1a89, Stride: 0x1}, + unicode.Range16{Lo: 0x1a90, Hi: 0x1a99, Stride: 0x1}, + unicode.Range16{Lo: 0x1b50, Hi: 0x1b59, Stride: 0x1}, + unicode.Range16{Lo: 0x1bb0, Hi: 0x1bb9, Stride: 0x1}, + unicode.Range16{Lo: 0x1c40, Hi: 0x1c49, Stride: 0x1}, + unicode.Range16{Lo: 0x1c50, Hi: 0x1c59, Stride: 0x1}, + unicode.Range16{Lo: 0xa620, Hi: 0xa629, Stride: 0x1}, + unicode.Range16{Lo: 0xa8d0, Hi: 0xa8d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa900, Hi: 0xa909, Stride: 0x1}, + unicode.Range16{Lo: 0xa9d0, Hi: 0xa9d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9f0, Hi: 0xa9f9, Stride: 0x1}, + unicode.Range16{Lo: 0xaa50, Hi: 0xaa59, Stride: 0x1}, + unicode.Range16{Lo: 0xabf0, Hi: 0xabf9, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x104a0, Hi: 0x104a9, Stride: 0x1}, + unicode.Range32{Lo: 0x11066, Hi: 0x1106f, Stride: 0x1}, + unicode.Range32{Lo: 0x110f0, Hi: 0x110f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11136, Hi: 0x1113f, Stride: 0x1}, + unicode.Range32{Lo: 0x111d0, Hi: 0x111d9, Stride: 0x1}, + unicode.Range32{Lo: 0x112f0, Hi: 0x112f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11450, Hi: 0x11459, Stride: 0x1}, + unicode.Range32{Lo: 0x114d0, Hi: 0x114d9, Stride: 0x1}, + unicode.Range32{Lo: 0x11650, Hi: 0x11659, Stride: 0x1}, + unicode.Range32{Lo: 0x116c0, Hi: 0x116c9, Stride: 0x1}, + unicode.Range32{Lo: 0x11730, Hi: 0x11739, Stride: 0x1}, + unicode.Range32{Lo: 0x118e0, Hi: 0x118e9, Stride: 0x1}, + unicode.Range32{Lo: 0x11c50, Hi: 0x11c59, Stride: 0x1}, + unicode.Range32{Lo: 0x16a60, Hi: 0x16a69, Stride: 0x1}, + unicode.Range32{Lo: 0x16b50, Hi: 0x16b59, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7ce, Hi: 0x1d7ff, Stride: 0x1}, + unicode.Range32{Lo: 0x1e950, Hi: 0x1e959, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordRegional_Indicator = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f1e6, Hi: 0x1f1ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordSingle_Quote = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x27, Hi: 0x27, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordZWJ = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x200d, Hi: 0x200d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +type _WordRuneRange unicode.RangeTable + +func _WordRuneType(r rune) *_WordRuneRange { + switch { + case unicode.Is(_WordALetter, r): + return (*_WordRuneRange)(_WordALetter) + case unicode.Is(_WordCR, r): + return (*_WordRuneRange)(_WordCR) + case unicode.Is(_WordDouble_Quote, r): + return (*_WordRuneRange)(_WordDouble_Quote) + case unicode.Is(_WordE_Base, r): + return (*_WordRuneRange)(_WordE_Base) + case unicode.Is(_WordE_Base_GAZ, r): + return (*_WordRuneRange)(_WordE_Base_GAZ) + case unicode.Is(_WordE_Modifier, r): + return (*_WordRuneRange)(_WordE_Modifier) + case unicode.Is(_WordExtend, r): + return (*_WordRuneRange)(_WordExtend) + case unicode.Is(_WordExtendNumLet, r): + return (*_WordRuneRange)(_WordExtendNumLet) + case unicode.Is(_WordFormat, r): + return (*_WordRuneRange)(_WordFormat) + case unicode.Is(_WordGlue_After_Zwj, r): + return (*_WordRuneRange)(_WordGlue_After_Zwj) + case unicode.Is(_WordHebrew_Letter, r): + return (*_WordRuneRange)(_WordHebrew_Letter) + case unicode.Is(_WordKatakana, r): + return (*_WordRuneRange)(_WordKatakana) + case unicode.Is(_WordLF, r): + return (*_WordRuneRange)(_WordLF) + case unicode.Is(_WordMidLetter, r): + return (*_WordRuneRange)(_WordMidLetter) + case unicode.Is(_WordMidNum, r): + return (*_WordRuneRange)(_WordMidNum) + case unicode.Is(_WordMidNumLet, r): + return (*_WordRuneRange)(_WordMidNumLet) + case unicode.Is(_WordNewline, r): + return (*_WordRuneRange)(_WordNewline) + case unicode.Is(_WordNumeric, r): + return (*_WordRuneRange)(_WordNumeric) + case unicode.Is(_WordRegional_Indicator, r): + return (*_WordRuneRange)(_WordRegional_Indicator) + case unicode.Is(_WordSingle_Quote, r): + return (*_WordRuneRange)(_WordSingle_Quote) + case unicode.Is(_WordZWJ, r): + return (*_WordRuneRange)(_WordZWJ) + default: + return nil + } +} +func (rng *_WordRuneRange) String() string { + switch (*unicode.RangeTable)(rng) { + case _WordALetter: + return "ALetter" + case _WordCR: + return "CR" + case _WordDouble_Quote: + return "Double_Quote" + case _WordE_Base: + return "E_Base" + case _WordE_Base_GAZ: + return "E_Base_GAZ" + case _WordE_Modifier: + return "E_Modifier" + case _WordExtend: + return "Extend" + case _WordExtendNumLet: + return "ExtendNumLet" + case _WordFormat: + return "Format" + case _WordGlue_After_Zwj: + return "Glue_After_Zwj" + case _WordHebrew_Letter: + return "Hebrew_Letter" + case _WordKatakana: + return "Katakana" + case _WordLF: + return "LF" + case _WordMidLetter: + return "MidLetter" + case _WordMidNum: + return "MidNum" + case _WordMidNumLet: + return "MidNumLet" + case _WordNewline: + return "Newline" + case _WordNumeric: + return "Numeric" + case _WordRegional_Indicator: + return "Regional_Indicator" + case _WordSingle_Quote: + return "Single_Quote" + case _WordZWJ: + return "ZWJ" + default: + return "Other" + } +} + +var _SentenceATerm = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2e, Hi: 0x2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2024, Hi: 0x2024, Stride: 0x1}, + unicode.Range16{Lo: 0xfe52, Hi: 0xfe52, Stride: 0x1}, + unicode.Range16{Lo: 0xff0e, Hi: 0xff0e, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceCR = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceClose = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x22, Hi: 0x22, Stride: 0x1}, + unicode.Range16{Lo: 0x27, Hi: 0x27, Stride: 0x1}, + unicode.Range16{Lo: 0x28, Hi: 0x28, Stride: 0x1}, + unicode.Range16{Lo: 0x29, Hi: 0x29, Stride: 0x1}, + unicode.Range16{Lo: 0x5b, Hi: 0x5b, Stride: 0x1}, + unicode.Range16{Lo: 0x5d, Hi: 0x5d, Stride: 0x1}, + unicode.Range16{Lo: 0x7b, Hi: 0x7b, Stride: 0x1}, + unicode.Range16{Lo: 0x7d, Hi: 0x7d, Stride: 0x1}, + unicode.Range16{Lo: 0xab, Hi: 0xab, Stride: 0x1}, + unicode.Range16{Lo: 0xbb, Hi: 0xbb, Stride: 0x1}, + unicode.Range16{Lo: 0xf3a, Hi: 0xf3a, Stride: 0x1}, + unicode.Range16{Lo: 0xf3b, Hi: 0xf3b, Stride: 0x1}, + unicode.Range16{Lo: 0xf3c, Hi: 0xf3c, Stride: 0x1}, + unicode.Range16{Lo: 0xf3d, Hi: 0xf3d, Stride: 0x1}, + unicode.Range16{Lo: 0x169b, Hi: 0x169b, Stride: 0x1}, + unicode.Range16{Lo: 0x169c, Hi: 0x169c, Stride: 0x1}, + unicode.Range16{Lo: 0x2018, Hi: 0x2018, Stride: 0x1}, + unicode.Range16{Lo: 0x2019, Hi: 0x2019, Stride: 0x1}, + unicode.Range16{Lo: 0x201a, Hi: 0x201a, Stride: 0x1}, + unicode.Range16{Lo: 0x201b, Hi: 0x201c, Stride: 0x1}, + unicode.Range16{Lo: 0x201d, Hi: 0x201d, Stride: 0x1}, + unicode.Range16{Lo: 0x201e, Hi: 0x201e, Stride: 0x1}, + unicode.Range16{Lo: 0x201f, Hi: 0x201f, Stride: 0x1}, + unicode.Range16{Lo: 0x2039, Hi: 0x2039, Stride: 0x1}, + unicode.Range16{Lo: 0x203a, Hi: 0x203a, Stride: 0x1}, + unicode.Range16{Lo: 0x2045, Hi: 0x2045, Stride: 0x1}, + unicode.Range16{Lo: 0x2046, Hi: 0x2046, Stride: 0x1}, + unicode.Range16{Lo: 0x207d, Hi: 0x207d, Stride: 0x1}, + unicode.Range16{Lo: 0x207e, Hi: 0x207e, Stride: 0x1}, + unicode.Range16{Lo: 0x208d, Hi: 0x208d, Stride: 0x1}, + unicode.Range16{Lo: 0x208e, Hi: 0x208e, Stride: 0x1}, + unicode.Range16{Lo: 0x2308, Hi: 0x2308, Stride: 0x1}, + unicode.Range16{Lo: 0x2309, Hi: 0x2309, Stride: 0x1}, + unicode.Range16{Lo: 0x230a, Hi: 0x230a, Stride: 0x1}, + unicode.Range16{Lo: 0x230b, Hi: 0x230b, Stride: 0x1}, + unicode.Range16{Lo: 0x2329, Hi: 0x2329, Stride: 0x1}, + unicode.Range16{Lo: 0x232a, Hi: 0x232a, Stride: 0x1}, + unicode.Range16{Lo: 0x275b, Hi: 0x2760, Stride: 0x1}, + unicode.Range16{Lo: 0x2768, Hi: 0x2768, Stride: 0x1}, + unicode.Range16{Lo: 0x2769, Hi: 0x2769, Stride: 0x1}, + unicode.Range16{Lo: 0x276a, Hi: 0x276a, Stride: 0x1}, + unicode.Range16{Lo: 0x276b, Hi: 0x276b, Stride: 0x1}, + unicode.Range16{Lo: 0x276c, Hi: 0x276c, Stride: 0x1}, + unicode.Range16{Lo: 0x276d, Hi: 0x276d, Stride: 0x1}, + unicode.Range16{Lo: 0x276e, Hi: 0x276e, Stride: 0x1}, + unicode.Range16{Lo: 0x276f, Hi: 0x276f, Stride: 0x1}, + unicode.Range16{Lo: 0x2770, Hi: 0x2770, Stride: 0x1}, + unicode.Range16{Lo: 0x2771, Hi: 0x2771, Stride: 0x1}, + unicode.Range16{Lo: 0x2772, Hi: 0x2772, Stride: 0x1}, + unicode.Range16{Lo: 0x2773, Hi: 0x2773, Stride: 0x1}, + unicode.Range16{Lo: 0x2774, Hi: 0x2774, Stride: 0x1}, + unicode.Range16{Lo: 0x2775, Hi: 0x2775, Stride: 0x1}, + unicode.Range16{Lo: 0x27c5, Hi: 0x27c5, Stride: 0x1}, + unicode.Range16{Lo: 0x27c6, Hi: 0x27c6, Stride: 0x1}, + unicode.Range16{Lo: 0x27e6, Hi: 0x27e6, Stride: 0x1}, + unicode.Range16{Lo: 0x27e7, Hi: 0x27e7, Stride: 0x1}, + unicode.Range16{Lo: 0x27e8, Hi: 0x27e8, Stride: 0x1}, + unicode.Range16{Lo: 0x27e9, Hi: 0x27e9, Stride: 0x1}, + unicode.Range16{Lo: 0x27ea, Hi: 0x27ea, Stride: 0x1}, + unicode.Range16{Lo: 0x27eb, Hi: 0x27eb, Stride: 0x1}, + unicode.Range16{Lo: 0x27ec, Hi: 0x27ec, Stride: 0x1}, + unicode.Range16{Lo: 0x27ed, Hi: 0x27ed, Stride: 0x1}, + unicode.Range16{Lo: 0x27ee, Hi: 0x27ee, Stride: 0x1}, + unicode.Range16{Lo: 0x27ef, Hi: 0x27ef, Stride: 0x1}, + unicode.Range16{Lo: 0x2983, Hi: 0x2983, Stride: 0x1}, + unicode.Range16{Lo: 0x2984, Hi: 0x2984, Stride: 0x1}, + unicode.Range16{Lo: 0x2985, Hi: 0x2985, Stride: 0x1}, + unicode.Range16{Lo: 0x2986, Hi: 0x2986, Stride: 0x1}, + unicode.Range16{Lo: 0x2987, Hi: 0x2987, Stride: 0x1}, + unicode.Range16{Lo: 0x2988, Hi: 0x2988, Stride: 0x1}, + unicode.Range16{Lo: 0x2989, Hi: 0x2989, Stride: 0x1}, + unicode.Range16{Lo: 0x298a, Hi: 0x298a, Stride: 0x1}, + unicode.Range16{Lo: 0x298b, Hi: 0x298b, Stride: 0x1}, + unicode.Range16{Lo: 0x298c, Hi: 0x298c, Stride: 0x1}, + unicode.Range16{Lo: 0x298d, Hi: 0x298d, Stride: 0x1}, + unicode.Range16{Lo: 0x298e, Hi: 0x298e, Stride: 0x1}, + unicode.Range16{Lo: 0x298f, Hi: 0x298f, Stride: 0x1}, + unicode.Range16{Lo: 0x2990, Hi: 0x2990, Stride: 0x1}, + unicode.Range16{Lo: 0x2991, Hi: 0x2991, Stride: 0x1}, + unicode.Range16{Lo: 0x2992, Hi: 0x2992, Stride: 0x1}, + unicode.Range16{Lo: 0x2993, Hi: 0x2993, Stride: 0x1}, + unicode.Range16{Lo: 0x2994, Hi: 0x2994, Stride: 0x1}, + unicode.Range16{Lo: 0x2995, Hi: 0x2995, Stride: 0x1}, + unicode.Range16{Lo: 0x2996, Hi: 0x2996, Stride: 0x1}, + unicode.Range16{Lo: 0x2997, Hi: 0x2997, Stride: 0x1}, + unicode.Range16{Lo: 0x2998, Hi: 0x2998, Stride: 0x1}, + unicode.Range16{Lo: 0x29d8, Hi: 0x29d8, Stride: 0x1}, + unicode.Range16{Lo: 0x29d9, Hi: 0x29d9, Stride: 0x1}, + unicode.Range16{Lo: 0x29da, Hi: 0x29da, Stride: 0x1}, + unicode.Range16{Lo: 0x29db, Hi: 0x29db, Stride: 0x1}, + unicode.Range16{Lo: 0x29fc, Hi: 0x29fc, Stride: 0x1}, + unicode.Range16{Lo: 0x29fd, Hi: 0x29fd, Stride: 0x1}, + unicode.Range16{Lo: 0x2e00, Hi: 0x2e01, Stride: 0x1}, + unicode.Range16{Lo: 0x2e02, Hi: 0x2e02, Stride: 0x1}, + unicode.Range16{Lo: 0x2e03, Hi: 0x2e03, Stride: 0x1}, + unicode.Range16{Lo: 0x2e04, Hi: 0x2e04, Stride: 0x1}, + unicode.Range16{Lo: 0x2e05, Hi: 0x2e05, Stride: 0x1}, + unicode.Range16{Lo: 0x2e06, Hi: 0x2e08, Stride: 0x1}, + unicode.Range16{Lo: 0x2e09, Hi: 0x2e09, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0a, Hi: 0x2e0a, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0b, Hi: 0x2e0b, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0c, Hi: 0x2e0c, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0d, Hi: 0x2e0d, Stride: 0x1}, + unicode.Range16{Lo: 0x2e1c, Hi: 0x2e1c, Stride: 0x1}, + unicode.Range16{Lo: 0x2e1d, Hi: 0x2e1d, Stride: 0x1}, + unicode.Range16{Lo: 0x2e20, Hi: 0x2e20, Stride: 0x1}, + unicode.Range16{Lo: 0x2e21, Hi: 0x2e21, Stride: 0x1}, + unicode.Range16{Lo: 0x2e22, Hi: 0x2e22, Stride: 0x1}, + unicode.Range16{Lo: 0x2e23, Hi: 0x2e23, Stride: 0x1}, + unicode.Range16{Lo: 0x2e24, Hi: 0x2e24, Stride: 0x1}, + unicode.Range16{Lo: 0x2e25, Hi: 0x2e25, Stride: 0x1}, + unicode.Range16{Lo: 0x2e26, Hi: 0x2e26, Stride: 0x1}, + unicode.Range16{Lo: 0x2e27, Hi: 0x2e27, Stride: 0x1}, + unicode.Range16{Lo: 0x2e28, Hi: 0x2e28, Stride: 0x1}, + unicode.Range16{Lo: 0x2e29, Hi: 0x2e29, Stride: 0x1}, + unicode.Range16{Lo: 0x2e42, Hi: 0x2e42, Stride: 0x1}, + unicode.Range16{Lo: 0x3008, Hi: 0x3008, Stride: 0x1}, + unicode.Range16{Lo: 0x3009, Hi: 0x3009, Stride: 0x1}, + unicode.Range16{Lo: 0x300a, Hi: 0x300a, Stride: 0x1}, + unicode.Range16{Lo: 0x300b, Hi: 0x300b, Stride: 0x1}, + unicode.Range16{Lo: 0x300c, Hi: 0x300c, Stride: 0x1}, + unicode.Range16{Lo: 0x300d, Hi: 0x300d, Stride: 0x1}, + unicode.Range16{Lo: 0x300e, Hi: 0x300e, Stride: 0x1}, + unicode.Range16{Lo: 0x300f, Hi: 0x300f, Stride: 0x1}, + unicode.Range16{Lo: 0x3010, Hi: 0x3010, Stride: 0x1}, + unicode.Range16{Lo: 0x3011, Hi: 0x3011, Stride: 0x1}, + unicode.Range16{Lo: 0x3014, Hi: 0x3014, Stride: 0x1}, + unicode.Range16{Lo: 0x3015, Hi: 0x3015, Stride: 0x1}, + unicode.Range16{Lo: 0x3016, Hi: 0x3016, Stride: 0x1}, + unicode.Range16{Lo: 0x3017, Hi: 0x3017, Stride: 0x1}, + unicode.Range16{Lo: 0x3018, Hi: 0x3018, Stride: 0x1}, + unicode.Range16{Lo: 0x3019, Hi: 0x3019, Stride: 0x1}, + unicode.Range16{Lo: 0x301a, Hi: 0x301a, Stride: 0x1}, + unicode.Range16{Lo: 0x301b, Hi: 0x301b, Stride: 0x1}, + unicode.Range16{Lo: 0x301d, Hi: 0x301d, Stride: 0x1}, + unicode.Range16{Lo: 0x301e, Hi: 0x301f, Stride: 0x1}, + unicode.Range16{Lo: 0xfd3e, Hi: 0xfd3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfd3f, Hi: 0xfd3f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe17, Hi: 0xfe17, Stride: 0x1}, + unicode.Range16{Lo: 0xfe18, Hi: 0xfe18, Stride: 0x1}, + unicode.Range16{Lo: 0xfe35, Hi: 0xfe35, Stride: 0x1}, + unicode.Range16{Lo: 0xfe36, Hi: 0xfe36, Stride: 0x1}, + unicode.Range16{Lo: 0xfe37, Hi: 0xfe37, Stride: 0x1}, + unicode.Range16{Lo: 0xfe38, Hi: 0xfe38, Stride: 0x1}, + unicode.Range16{Lo: 0xfe39, Hi: 0xfe39, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3a, Hi: 0xfe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3b, Hi: 0xfe3b, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3c, Hi: 0xfe3c, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3d, Hi: 0xfe3d, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3e, Hi: 0xfe3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3f, Hi: 0xfe3f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe40, Hi: 0xfe40, Stride: 0x1}, + unicode.Range16{Lo: 0xfe41, Hi: 0xfe41, Stride: 0x1}, + unicode.Range16{Lo: 0xfe42, Hi: 0xfe42, Stride: 0x1}, + unicode.Range16{Lo: 0xfe43, Hi: 0xfe43, Stride: 0x1}, + unicode.Range16{Lo: 0xfe44, Hi: 0xfe44, Stride: 0x1}, + unicode.Range16{Lo: 0xfe47, Hi: 0xfe47, Stride: 0x1}, + unicode.Range16{Lo: 0xfe48, Hi: 0xfe48, Stride: 0x1}, + unicode.Range16{Lo: 0xfe59, Hi: 0xfe59, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5a, Hi: 0xfe5a, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5b, Hi: 0xfe5b, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5c, Hi: 0xfe5c, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5d, Hi: 0xfe5d, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5e, Hi: 0xfe5e, Stride: 0x1}, + unicode.Range16{Lo: 0xff08, Hi: 0xff08, Stride: 0x1}, + unicode.Range16{Lo: 0xff09, Hi: 0xff09, Stride: 0x1}, + unicode.Range16{Lo: 0xff3b, Hi: 0xff3b, Stride: 0x1}, + unicode.Range16{Lo: 0xff3d, Hi: 0xff3d, Stride: 0x1}, + unicode.Range16{Lo: 0xff5b, Hi: 0xff5b, Stride: 0x1}, + unicode.Range16{Lo: 0xff5d, Hi: 0xff5d, Stride: 0x1}, + unicode.Range16{Lo: 0xff5f, Hi: 0xff5f, Stride: 0x1}, + unicode.Range16{Lo: 0xff60, Hi: 0xff60, Stride: 0x1}, + unicode.Range16{Lo: 0xff62, Hi: 0xff62, Stride: 0x1}, + unicode.Range16{Lo: 0xff63, Hi: 0xff63, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f676, Hi: 0x1f678, Stride: 0x1}, + }, + LatinOffset: 10, +} + +var _SentenceExtend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1}, + unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1}, + unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1}, + unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1}, + unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1}, + unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1}, + unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1}, + unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1}, + unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1}, + unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1}, + unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1}, + unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1}, + unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1}, + unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1}, + unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1}, + unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1}, + unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1}, + unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1}, + unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1}, + unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1}, + unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1}, + unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1}, + unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1}, + unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1}, + unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1}, + unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1}, + unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1}, + unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1}, + unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1}, + unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1}, + unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1}, + unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1}, + unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1}, + unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1}, + unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1}, + unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1}, + unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1}, + unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1}, + unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1}, + unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1}, + unicode.Range16{Lo: 0x9be, Hi: 0x9c0, Stride: 0x1}, + unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1}, + unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1}, + unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1}, + unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1}, + unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1}, + unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1}, + unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1}, + unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1}, + unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1}, + unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1}, + unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1}, + unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1}, + unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1}, + unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1}, + unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1}, + unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1}, + unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1}, + unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1}, + unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1}, + unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1}, + unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1}, + unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1}, + unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1}, + unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1}, + unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1}, + unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1}, + unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1}, + unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1}, + unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1}, + unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1}, + unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1}, + unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1}, + unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1}, + unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1}, + unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1}, + unicode.Range16{Lo: 0xbbe, Hi: 0xbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1}, + unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1}, + unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1}, + unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1}, + unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1}, + unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1}, + unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1}, + unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1}, + unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1}, + unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1}, + unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1}, + unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1}, + unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1}, + unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1}, + unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0, Hi: 0xcc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1}, + unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1}, + unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1}, + unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1}, + unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1}, + unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1}, + unicode.Range16{Lo: 0xd3e, Hi: 0xd40, Stride: 0x1}, + unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1}, + unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1}, + unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1}, + unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1}, + unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1}, + unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1}, + unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1}, + unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1}, + unicode.Range16{Lo: 0xdcf, Hi: 0xdd1, Stride: 0x1}, + unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1}, + unicode.Range16{Lo: 0xdd8, Hi: 0xddf, Stride: 0x1}, + unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1}, + unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1}, + unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1}, + unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1}, + unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1}, + unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1}, + unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1}, + unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1}, + unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1}, + unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1}, + unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1}, + unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1}, + unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1}, + unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1}, + unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1}, + unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1}, + unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1}, + unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1}, + unicode.Range16{Lo: 0x102b, Hi: 0x102c, Stride: 0x1}, + unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1}, + unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1}, + unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1}, + unicode.Range16{Lo: 0x1038, Hi: 0x1038, Stride: 0x1}, + unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1}, + unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1}, + unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1}, + unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1}, + unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1}, + unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1}, + unicode.Range16{Lo: 0x1062, Hi: 0x1064, Stride: 0x1}, + unicode.Range16{Lo: 0x1067, Hi: 0x106d, Stride: 0x1}, + unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1}, + unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1}, + unicode.Range16{Lo: 0x1083, Hi: 0x1084, Stride: 0x1}, + unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1}, + unicode.Range16{Lo: 0x1087, Hi: 0x108c, Stride: 0x1}, + unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1}, + unicode.Range16{Lo: 0x108f, Hi: 0x108f, Stride: 0x1}, + unicode.Range16{Lo: 0x109a, Hi: 0x109c, Stride: 0x1}, + unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1}, + unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1}, + unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1}, + unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1}, + unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1}, + unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1}, + unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1}, + unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1}, + unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1}, + unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1}, + unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1}, + unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1}, + unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1}, + unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1}, + unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1}, + unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1}, + unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1}, + unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1}, + unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1}, + unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1}, + unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1}, + unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1}, + unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1}, + unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1}, + unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1}, + unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1}, + unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1}, + unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1}, + unicode.Range16{Lo: 0x1a61, Hi: 0x1a61, Stride: 0x1}, + unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1}, + unicode.Range16{Lo: 0x1a63, Hi: 0x1a64, Stride: 0x1}, + unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1}, + unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1}, + unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1}, + unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1}, + unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1}, + unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1}, + unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1}, + unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1}, + unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1}, + unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1}, + unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1}, + unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1}, + unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1}, + unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1}, + unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1}, + unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1}, + unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1}, + unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1}, + unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1}, + unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1}, + unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1}, + unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1}, + unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1}, + unicode.Range16{Lo: 0x200c, Hi: 0x200d, Stride: 0x1}, + unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1}, + unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1}, + unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1}, + unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1}, + unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1}, + unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1}, + unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1}, + unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1}, + unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1}, + unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1}, + unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1}, + unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1}, + unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1}, + unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1}, + unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1}, + unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1}, + unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1}, + unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1}, + unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1}, + unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1}, + unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1}, + unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1}, + unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1}, + unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1}, + unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1}, + unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1}, + unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1}, + unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1}, + unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1}, + unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7b, Hi: 0xaa7b, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7d, Hi: 0xaa7d, Stride: 0x1}, + unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1}, + unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1}, + unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1}, + unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1}, + unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1}, + unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1}, + unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1}, + unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1}, + unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1}, + unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1}, + unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1}, + unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1}, + unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1}, + unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1}, + unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1}, + unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1}, + unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1}, + unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1}, + unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1}, + unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1}, + unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1}, + unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1}, + unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1}, + unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1}, + unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1}, + unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1}, + unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1}, + unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1}, + unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1}, + unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1}, + unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1}, + unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1}, + unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1}, + unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1}, + unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1}, + unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1}, + unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1}, + unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1}, + unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1}, + unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1}, + unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1}, + unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1}, + unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1}, + unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1}, + unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1}, + unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1}, + unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1}, + unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1}, + unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1}, + unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1}, + unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1}, + unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1}, + unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1}, + unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1}, + unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1}, + unicode.Range32{Lo: 0x1133e, Hi: 0x1133f, Stride: 0x1}, + unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1}, + unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1}, + unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1}, + unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1}, + unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1}, + unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1}, + unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1}, + unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1}, + unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1}, + unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1}, + unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1}, + unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1}, + unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1}, + unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1}, + unicode.Range32{Lo: 0x114b0, Hi: 0x114b2, Stride: 0x1}, + unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1}, + unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1}, + unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1}, + unicode.Range32{Lo: 0x114bb, Hi: 0x114be, Stride: 0x1}, + unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1}, + unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1}, + unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115af, Hi: 0x115b1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1}, + unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1}, + unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1}, + unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1}, + unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1}, + unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1}, + unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1}, + unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1}, + unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1}, + unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1}, + unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1}, + unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1}, + unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1}, + unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1}, + unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1}, + unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1}, + unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1}, + unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1}, + unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1}, + unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1}, + unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1}, + unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1}, + unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1}, + unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1}, + unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1}, + unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1}, + unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1}, + unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1}, + unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1}, + unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1}, + unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1}, + unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d165, Hi: 0x1d166, Stride: 0x1}, + unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16d, Hi: 0x1d172, Stride: 0x1}, + unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1}, + unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1}, + unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1}, + unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1}, + unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1}, + unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1}, + unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1}, + unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1}, + unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1}, + unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1}, + unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1}, + unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1}, + unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1}, + unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1}, + unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _SentenceFormat = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1}, + unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1}, + unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1}, + unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1}, + unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1}, + unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1}, + unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1}, + unicode.Range16{Lo: 0x200b, Hi: 0x200b, Stride: 0x1}, + unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1}, + unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1}, + unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1}, + unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1}, + unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1}, + unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1}, + unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceLF = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceLower = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x61, Hi: 0x7a, Stride: 0x1}, + unicode.Range16{Lo: 0xaa, Hi: 0xaa, Stride: 0x1}, + unicode.Range16{Lo: 0xb5, Hi: 0xb5, Stride: 0x1}, + unicode.Range16{Lo: 0xba, Hi: 0xba, Stride: 0x1}, + unicode.Range16{Lo: 0xdf, Hi: 0xf6, Stride: 0x1}, + unicode.Range16{Lo: 0xf8, Hi: 0xff, Stride: 0x1}, + unicode.Range16{Lo: 0x101, Hi: 0x101, Stride: 0x1}, + unicode.Range16{Lo: 0x103, Hi: 0x103, Stride: 0x1}, + unicode.Range16{Lo: 0x105, Hi: 0x105, Stride: 0x1}, + unicode.Range16{Lo: 0x107, Hi: 0x107, Stride: 0x1}, + unicode.Range16{Lo: 0x109, Hi: 0x109, Stride: 0x1}, + unicode.Range16{Lo: 0x10b, Hi: 0x10b, Stride: 0x1}, + unicode.Range16{Lo: 0x10d, Hi: 0x10d, Stride: 0x1}, + unicode.Range16{Lo: 0x10f, Hi: 0x10f, Stride: 0x1}, + unicode.Range16{Lo: 0x111, Hi: 0x111, Stride: 0x1}, + unicode.Range16{Lo: 0x113, Hi: 0x113, Stride: 0x1}, + unicode.Range16{Lo: 0x115, Hi: 0x115, Stride: 0x1}, + unicode.Range16{Lo: 0x117, Hi: 0x117, Stride: 0x1}, + unicode.Range16{Lo: 0x119, Hi: 0x119, Stride: 0x1}, + unicode.Range16{Lo: 0x11b, Hi: 0x11b, Stride: 0x1}, + unicode.Range16{Lo: 0x11d, Hi: 0x11d, Stride: 0x1}, + unicode.Range16{Lo: 0x11f, Hi: 0x11f, Stride: 0x1}, + unicode.Range16{Lo: 0x121, Hi: 0x121, Stride: 0x1}, + unicode.Range16{Lo: 0x123, Hi: 0x123, Stride: 0x1}, + unicode.Range16{Lo: 0x125, Hi: 0x125, Stride: 0x1}, + unicode.Range16{Lo: 0x127, Hi: 0x127, Stride: 0x1}, + unicode.Range16{Lo: 0x129, Hi: 0x129, Stride: 0x1}, + unicode.Range16{Lo: 0x12b, Hi: 0x12b, Stride: 0x1}, + unicode.Range16{Lo: 0x12d, Hi: 0x12d, Stride: 0x1}, + unicode.Range16{Lo: 0x12f, Hi: 0x12f, Stride: 0x1}, + unicode.Range16{Lo: 0x131, Hi: 0x131, Stride: 0x1}, + unicode.Range16{Lo: 0x133, Hi: 0x133, Stride: 0x1}, + unicode.Range16{Lo: 0x135, Hi: 0x135, Stride: 0x1}, + unicode.Range16{Lo: 0x137, Hi: 0x138, Stride: 0x1}, + unicode.Range16{Lo: 0x13a, Hi: 0x13a, Stride: 0x1}, + unicode.Range16{Lo: 0x13c, Hi: 0x13c, Stride: 0x1}, + unicode.Range16{Lo: 0x13e, Hi: 0x13e, Stride: 0x1}, + unicode.Range16{Lo: 0x140, Hi: 0x140, Stride: 0x1}, + unicode.Range16{Lo: 0x142, Hi: 0x142, Stride: 0x1}, + unicode.Range16{Lo: 0x144, Hi: 0x144, Stride: 0x1}, + unicode.Range16{Lo: 0x146, Hi: 0x146, Stride: 0x1}, + unicode.Range16{Lo: 0x148, Hi: 0x149, Stride: 0x1}, + unicode.Range16{Lo: 0x14b, Hi: 0x14b, Stride: 0x1}, + unicode.Range16{Lo: 0x14d, Hi: 0x14d, Stride: 0x1}, + unicode.Range16{Lo: 0x14f, Hi: 0x14f, Stride: 0x1}, + unicode.Range16{Lo: 0x151, Hi: 0x151, Stride: 0x1}, + unicode.Range16{Lo: 0x153, Hi: 0x153, Stride: 0x1}, + unicode.Range16{Lo: 0x155, Hi: 0x155, Stride: 0x1}, + unicode.Range16{Lo: 0x157, Hi: 0x157, Stride: 0x1}, + unicode.Range16{Lo: 0x159, Hi: 0x159, Stride: 0x1}, + unicode.Range16{Lo: 0x15b, Hi: 0x15b, Stride: 0x1}, + unicode.Range16{Lo: 0x15d, Hi: 0x15d, Stride: 0x1}, + unicode.Range16{Lo: 0x15f, Hi: 0x15f, Stride: 0x1}, + unicode.Range16{Lo: 0x161, Hi: 0x161, Stride: 0x1}, + unicode.Range16{Lo: 0x163, Hi: 0x163, Stride: 0x1}, + unicode.Range16{Lo: 0x165, Hi: 0x165, Stride: 0x1}, + unicode.Range16{Lo: 0x167, Hi: 0x167, Stride: 0x1}, + unicode.Range16{Lo: 0x169, Hi: 0x169, Stride: 0x1}, + unicode.Range16{Lo: 0x16b, Hi: 0x16b, Stride: 0x1}, + unicode.Range16{Lo: 0x16d, Hi: 0x16d, Stride: 0x1}, + unicode.Range16{Lo: 0x16f, Hi: 0x16f, Stride: 0x1}, + unicode.Range16{Lo: 0x171, Hi: 0x171, Stride: 0x1}, + unicode.Range16{Lo: 0x173, Hi: 0x173, Stride: 0x1}, + unicode.Range16{Lo: 0x175, Hi: 0x175, Stride: 0x1}, + unicode.Range16{Lo: 0x177, Hi: 0x177, Stride: 0x1}, + unicode.Range16{Lo: 0x17a, Hi: 0x17a, Stride: 0x1}, + unicode.Range16{Lo: 0x17c, Hi: 0x17c, Stride: 0x1}, + unicode.Range16{Lo: 0x17e, Hi: 0x180, Stride: 0x1}, + unicode.Range16{Lo: 0x183, Hi: 0x183, Stride: 0x1}, + unicode.Range16{Lo: 0x185, Hi: 0x185, Stride: 0x1}, + unicode.Range16{Lo: 0x188, Hi: 0x188, Stride: 0x1}, + unicode.Range16{Lo: 0x18c, Hi: 0x18d, Stride: 0x1}, + unicode.Range16{Lo: 0x192, Hi: 0x192, Stride: 0x1}, + unicode.Range16{Lo: 0x195, Hi: 0x195, Stride: 0x1}, + unicode.Range16{Lo: 0x199, Hi: 0x19b, Stride: 0x1}, + unicode.Range16{Lo: 0x19e, Hi: 0x19e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1, Hi: 0x1a1, Stride: 0x1}, + unicode.Range16{Lo: 0x1a3, Hi: 0x1a3, Stride: 0x1}, + unicode.Range16{Lo: 0x1a5, Hi: 0x1a5, Stride: 0x1}, + unicode.Range16{Lo: 0x1a8, Hi: 0x1a8, Stride: 0x1}, + unicode.Range16{Lo: 0x1aa, Hi: 0x1ab, Stride: 0x1}, + unicode.Range16{Lo: 0x1ad, Hi: 0x1ad, Stride: 0x1}, + unicode.Range16{Lo: 0x1b0, Hi: 0x1b0, Stride: 0x1}, + unicode.Range16{Lo: 0x1b4, Hi: 0x1b4, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6, Hi: 0x1b6, Stride: 0x1}, + unicode.Range16{Lo: 0x1b9, Hi: 0x1ba, Stride: 0x1}, + unicode.Range16{Lo: 0x1bd, Hi: 0x1bf, Stride: 0x1}, + unicode.Range16{Lo: 0x1c6, Hi: 0x1c6, Stride: 0x1}, + unicode.Range16{Lo: 0x1c9, Hi: 0x1c9, Stride: 0x1}, + unicode.Range16{Lo: 0x1cc, Hi: 0x1cc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce, Hi: 0x1ce, Stride: 0x1}, + unicode.Range16{Lo: 0x1d0, Hi: 0x1d0, Stride: 0x1}, + unicode.Range16{Lo: 0x1d2, Hi: 0x1d2, Stride: 0x1}, + unicode.Range16{Lo: 0x1d4, Hi: 0x1d4, Stride: 0x1}, + unicode.Range16{Lo: 0x1d6, Hi: 0x1d6, Stride: 0x1}, + unicode.Range16{Lo: 0x1d8, Hi: 0x1d8, Stride: 0x1}, + unicode.Range16{Lo: 0x1da, Hi: 0x1da, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc, Hi: 0x1dd, Stride: 0x1}, + unicode.Range16{Lo: 0x1df, Hi: 0x1df, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1, Hi: 0x1e1, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3, Hi: 0x1e3, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5, Hi: 0x1e5, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7, Hi: 0x1e7, Stride: 0x1}, + unicode.Range16{Lo: 0x1e9, Hi: 0x1e9, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb, Hi: 0x1eb, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed, Hi: 0x1ed, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef, Hi: 0x1f0, Stride: 0x1}, + unicode.Range16{Lo: 0x1f3, Hi: 0x1f3, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5, Hi: 0x1f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1f9, Hi: 0x1f9, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb, Hi: 0x1fb, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd, Hi: 0x1fd, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff, Hi: 0x1ff, Stride: 0x1}, + unicode.Range16{Lo: 0x201, Hi: 0x201, Stride: 0x1}, + unicode.Range16{Lo: 0x203, Hi: 0x203, Stride: 0x1}, + unicode.Range16{Lo: 0x205, Hi: 0x205, Stride: 0x1}, + unicode.Range16{Lo: 0x207, Hi: 0x207, Stride: 0x1}, + unicode.Range16{Lo: 0x209, Hi: 0x209, Stride: 0x1}, + unicode.Range16{Lo: 0x20b, Hi: 0x20b, Stride: 0x1}, + unicode.Range16{Lo: 0x20d, Hi: 0x20d, Stride: 0x1}, + unicode.Range16{Lo: 0x20f, Hi: 0x20f, Stride: 0x1}, + unicode.Range16{Lo: 0x211, Hi: 0x211, Stride: 0x1}, + unicode.Range16{Lo: 0x213, Hi: 0x213, Stride: 0x1}, + unicode.Range16{Lo: 0x215, Hi: 0x215, Stride: 0x1}, + unicode.Range16{Lo: 0x217, Hi: 0x217, Stride: 0x1}, + unicode.Range16{Lo: 0x219, Hi: 0x219, Stride: 0x1}, + unicode.Range16{Lo: 0x21b, Hi: 0x21b, Stride: 0x1}, + unicode.Range16{Lo: 0x21d, Hi: 0x21d, Stride: 0x1}, + unicode.Range16{Lo: 0x21f, Hi: 0x21f, Stride: 0x1}, + unicode.Range16{Lo: 0x221, Hi: 0x221, Stride: 0x1}, + unicode.Range16{Lo: 0x223, Hi: 0x223, Stride: 0x1}, + unicode.Range16{Lo: 0x225, Hi: 0x225, Stride: 0x1}, + unicode.Range16{Lo: 0x227, Hi: 0x227, Stride: 0x1}, + unicode.Range16{Lo: 0x229, Hi: 0x229, Stride: 0x1}, + unicode.Range16{Lo: 0x22b, Hi: 0x22b, Stride: 0x1}, + unicode.Range16{Lo: 0x22d, Hi: 0x22d, Stride: 0x1}, + unicode.Range16{Lo: 0x22f, Hi: 0x22f, Stride: 0x1}, + unicode.Range16{Lo: 0x231, Hi: 0x231, Stride: 0x1}, + unicode.Range16{Lo: 0x233, Hi: 0x239, Stride: 0x1}, + unicode.Range16{Lo: 0x23c, Hi: 0x23c, Stride: 0x1}, + unicode.Range16{Lo: 0x23f, Hi: 0x240, Stride: 0x1}, + unicode.Range16{Lo: 0x242, Hi: 0x242, Stride: 0x1}, + unicode.Range16{Lo: 0x247, Hi: 0x247, Stride: 0x1}, + unicode.Range16{Lo: 0x249, Hi: 0x249, Stride: 0x1}, + unicode.Range16{Lo: 0x24b, Hi: 0x24b, Stride: 0x1}, + unicode.Range16{Lo: 0x24d, Hi: 0x24d, Stride: 0x1}, + unicode.Range16{Lo: 0x24f, Hi: 0x293, Stride: 0x1}, + unicode.Range16{Lo: 0x295, Hi: 0x2af, Stride: 0x1}, + unicode.Range16{Lo: 0x2b0, Hi: 0x2b8, Stride: 0x1}, + unicode.Range16{Lo: 0x2c0, Hi: 0x2c1, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0, Hi: 0x2e4, Stride: 0x1}, + unicode.Range16{Lo: 0x371, Hi: 0x371, Stride: 0x1}, + unicode.Range16{Lo: 0x373, Hi: 0x373, Stride: 0x1}, + unicode.Range16{Lo: 0x377, Hi: 0x377, Stride: 0x1}, + unicode.Range16{Lo: 0x37a, Hi: 0x37a, Stride: 0x1}, + unicode.Range16{Lo: 0x37b, Hi: 0x37d, Stride: 0x1}, + unicode.Range16{Lo: 0x390, Hi: 0x390, Stride: 0x1}, + unicode.Range16{Lo: 0x3ac, Hi: 0x3ce, Stride: 0x1}, + unicode.Range16{Lo: 0x3d0, Hi: 0x3d1, Stride: 0x1}, + unicode.Range16{Lo: 0x3d5, Hi: 0x3d7, Stride: 0x1}, + unicode.Range16{Lo: 0x3d9, Hi: 0x3d9, Stride: 0x1}, + unicode.Range16{Lo: 0x3db, Hi: 0x3db, Stride: 0x1}, + unicode.Range16{Lo: 0x3dd, Hi: 0x3dd, Stride: 0x1}, + unicode.Range16{Lo: 0x3df, Hi: 0x3df, Stride: 0x1}, + unicode.Range16{Lo: 0x3e1, Hi: 0x3e1, Stride: 0x1}, + unicode.Range16{Lo: 0x3e3, Hi: 0x3e3, Stride: 0x1}, + unicode.Range16{Lo: 0x3e5, Hi: 0x3e5, Stride: 0x1}, + unicode.Range16{Lo: 0x3e7, Hi: 0x3e7, Stride: 0x1}, + unicode.Range16{Lo: 0x3e9, Hi: 0x3e9, Stride: 0x1}, + unicode.Range16{Lo: 0x3eb, Hi: 0x3eb, Stride: 0x1}, + unicode.Range16{Lo: 0x3ed, Hi: 0x3ed, Stride: 0x1}, + unicode.Range16{Lo: 0x3ef, Hi: 0x3f3, Stride: 0x1}, + unicode.Range16{Lo: 0x3f5, Hi: 0x3f5, Stride: 0x1}, + unicode.Range16{Lo: 0x3f8, Hi: 0x3f8, Stride: 0x1}, + unicode.Range16{Lo: 0x3fb, Hi: 0x3fc, Stride: 0x1}, + unicode.Range16{Lo: 0x430, Hi: 0x45f, Stride: 0x1}, + unicode.Range16{Lo: 0x461, Hi: 0x461, Stride: 0x1}, + unicode.Range16{Lo: 0x463, Hi: 0x463, Stride: 0x1}, + unicode.Range16{Lo: 0x465, Hi: 0x465, Stride: 0x1}, + unicode.Range16{Lo: 0x467, Hi: 0x467, Stride: 0x1}, + unicode.Range16{Lo: 0x469, Hi: 0x469, Stride: 0x1}, + unicode.Range16{Lo: 0x46b, Hi: 0x46b, Stride: 0x1}, + unicode.Range16{Lo: 0x46d, Hi: 0x46d, Stride: 0x1}, + unicode.Range16{Lo: 0x46f, Hi: 0x46f, Stride: 0x1}, + unicode.Range16{Lo: 0x471, Hi: 0x471, Stride: 0x1}, + unicode.Range16{Lo: 0x473, Hi: 0x473, Stride: 0x1}, + unicode.Range16{Lo: 0x475, Hi: 0x475, Stride: 0x1}, + unicode.Range16{Lo: 0x477, Hi: 0x477, Stride: 0x1}, + unicode.Range16{Lo: 0x479, Hi: 0x479, Stride: 0x1}, + unicode.Range16{Lo: 0x47b, Hi: 0x47b, Stride: 0x1}, + unicode.Range16{Lo: 0x47d, Hi: 0x47d, Stride: 0x1}, + unicode.Range16{Lo: 0x47f, Hi: 0x47f, Stride: 0x1}, + unicode.Range16{Lo: 0x481, Hi: 0x481, Stride: 0x1}, + unicode.Range16{Lo: 0x48b, Hi: 0x48b, Stride: 0x1}, + unicode.Range16{Lo: 0x48d, Hi: 0x48d, Stride: 0x1}, + unicode.Range16{Lo: 0x48f, Hi: 0x48f, Stride: 0x1}, + unicode.Range16{Lo: 0x491, Hi: 0x491, Stride: 0x1}, + unicode.Range16{Lo: 0x493, Hi: 0x493, Stride: 0x1}, + unicode.Range16{Lo: 0x495, Hi: 0x495, Stride: 0x1}, + unicode.Range16{Lo: 0x497, Hi: 0x497, Stride: 0x1}, + unicode.Range16{Lo: 0x499, Hi: 0x499, Stride: 0x1}, + unicode.Range16{Lo: 0x49b, Hi: 0x49b, Stride: 0x1}, + unicode.Range16{Lo: 0x49d, Hi: 0x49d, Stride: 0x1}, + unicode.Range16{Lo: 0x49f, Hi: 0x49f, Stride: 0x1}, + unicode.Range16{Lo: 0x4a1, Hi: 0x4a1, Stride: 0x1}, + unicode.Range16{Lo: 0x4a3, Hi: 0x4a3, Stride: 0x1}, + unicode.Range16{Lo: 0x4a5, Hi: 0x4a5, Stride: 0x1}, + unicode.Range16{Lo: 0x4a7, Hi: 0x4a7, Stride: 0x1}, + unicode.Range16{Lo: 0x4a9, Hi: 0x4a9, Stride: 0x1}, + unicode.Range16{Lo: 0x4ab, Hi: 0x4ab, Stride: 0x1}, + unicode.Range16{Lo: 0x4ad, Hi: 0x4ad, Stride: 0x1}, + unicode.Range16{Lo: 0x4af, Hi: 0x4af, Stride: 0x1}, + unicode.Range16{Lo: 0x4b1, Hi: 0x4b1, Stride: 0x1}, + unicode.Range16{Lo: 0x4b3, Hi: 0x4b3, Stride: 0x1}, + unicode.Range16{Lo: 0x4b5, Hi: 0x4b5, Stride: 0x1}, + unicode.Range16{Lo: 0x4b7, Hi: 0x4b7, Stride: 0x1}, + unicode.Range16{Lo: 0x4b9, Hi: 0x4b9, Stride: 0x1}, + unicode.Range16{Lo: 0x4bb, Hi: 0x4bb, Stride: 0x1}, + unicode.Range16{Lo: 0x4bd, Hi: 0x4bd, Stride: 0x1}, + unicode.Range16{Lo: 0x4bf, Hi: 0x4bf, Stride: 0x1}, + unicode.Range16{Lo: 0x4c2, Hi: 0x4c2, Stride: 0x1}, + unicode.Range16{Lo: 0x4c4, Hi: 0x4c4, Stride: 0x1}, + unicode.Range16{Lo: 0x4c6, Hi: 0x4c6, Stride: 0x1}, + unicode.Range16{Lo: 0x4c8, Hi: 0x4c8, Stride: 0x1}, + unicode.Range16{Lo: 0x4ca, Hi: 0x4ca, Stride: 0x1}, + unicode.Range16{Lo: 0x4cc, Hi: 0x4cc, Stride: 0x1}, + unicode.Range16{Lo: 0x4ce, Hi: 0x4cf, Stride: 0x1}, + unicode.Range16{Lo: 0x4d1, Hi: 0x4d1, Stride: 0x1}, + unicode.Range16{Lo: 0x4d3, Hi: 0x4d3, Stride: 0x1}, + unicode.Range16{Lo: 0x4d5, Hi: 0x4d5, Stride: 0x1}, + unicode.Range16{Lo: 0x4d7, Hi: 0x4d7, Stride: 0x1}, + unicode.Range16{Lo: 0x4d9, Hi: 0x4d9, Stride: 0x1}, + unicode.Range16{Lo: 0x4db, Hi: 0x4db, Stride: 0x1}, + unicode.Range16{Lo: 0x4dd, Hi: 0x4dd, Stride: 0x1}, + unicode.Range16{Lo: 0x4df, Hi: 0x4df, Stride: 0x1}, + unicode.Range16{Lo: 0x4e1, Hi: 0x4e1, Stride: 0x1}, + unicode.Range16{Lo: 0x4e3, Hi: 0x4e3, Stride: 0x1}, + unicode.Range16{Lo: 0x4e5, Hi: 0x4e5, Stride: 0x1}, + unicode.Range16{Lo: 0x4e7, Hi: 0x4e7, Stride: 0x1}, + unicode.Range16{Lo: 0x4e9, Hi: 0x4e9, Stride: 0x1}, + unicode.Range16{Lo: 0x4eb, Hi: 0x4eb, Stride: 0x1}, + unicode.Range16{Lo: 0x4ed, Hi: 0x4ed, Stride: 0x1}, + unicode.Range16{Lo: 0x4ef, Hi: 0x4ef, Stride: 0x1}, + unicode.Range16{Lo: 0x4f1, Hi: 0x4f1, Stride: 0x1}, + unicode.Range16{Lo: 0x4f3, Hi: 0x4f3, Stride: 0x1}, + unicode.Range16{Lo: 0x4f5, Hi: 0x4f5, Stride: 0x1}, + unicode.Range16{Lo: 0x4f7, Hi: 0x4f7, Stride: 0x1}, + unicode.Range16{Lo: 0x4f9, Hi: 0x4f9, Stride: 0x1}, + unicode.Range16{Lo: 0x4fb, Hi: 0x4fb, Stride: 0x1}, + unicode.Range16{Lo: 0x4fd, Hi: 0x4fd, Stride: 0x1}, + unicode.Range16{Lo: 0x4ff, Hi: 0x4ff, Stride: 0x1}, + unicode.Range16{Lo: 0x501, Hi: 0x501, Stride: 0x1}, + unicode.Range16{Lo: 0x503, Hi: 0x503, Stride: 0x1}, + unicode.Range16{Lo: 0x505, Hi: 0x505, Stride: 0x1}, + unicode.Range16{Lo: 0x507, Hi: 0x507, Stride: 0x1}, + unicode.Range16{Lo: 0x509, Hi: 0x509, Stride: 0x1}, + unicode.Range16{Lo: 0x50b, Hi: 0x50b, Stride: 0x1}, + unicode.Range16{Lo: 0x50d, Hi: 0x50d, Stride: 0x1}, + unicode.Range16{Lo: 0x50f, Hi: 0x50f, Stride: 0x1}, + unicode.Range16{Lo: 0x511, Hi: 0x511, Stride: 0x1}, + unicode.Range16{Lo: 0x513, Hi: 0x513, Stride: 0x1}, + unicode.Range16{Lo: 0x515, Hi: 0x515, Stride: 0x1}, + unicode.Range16{Lo: 0x517, Hi: 0x517, Stride: 0x1}, + unicode.Range16{Lo: 0x519, Hi: 0x519, Stride: 0x1}, + unicode.Range16{Lo: 0x51b, Hi: 0x51b, Stride: 0x1}, + unicode.Range16{Lo: 0x51d, Hi: 0x51d, Stride: 0x1}, + unicode.Range16{Lo: 0x51f, Hi: 0x51f, Stride: 0x1}, + unicode.Range16{Lo: 0x521, Hi: 0x521, Stride: 0x1}, + unicode.Range16{Lo: 0x523, Hi: 0x523, Stride: 0x1}, + unicode.Range16{Lo: 0x525, Hi: 0x525, Stride: 0x1}, + unicode.Range16{Lo: 0x527, Hi: 0x527, Stride: 0x1}, + unicode.Range16{Lo: 0x529, Hi: 0x529, Stride: 0x1}, + unicode.Range16{Lo: 0x52b, Hi: 0x52b, Stride: 0x1}, + unicode.Range16{Lo: 0x52d, Hi: 0x52d, Stride: 0x1}, + unicode.Range16{Lo: 0x52f, Hi: 0x52f, Stride: 0x1}, + unicode.Range16{Lo: 0x561, Hi: 0x587, Stride: 0x1}, + unicode.Range16{Lo: 0x13f8, Hi: 0x13fd, Stride: 0x1}, + unicode.Range16{Lo: 0x1c80, Hi: 0x1c88, Stride: 0x1}, + unicode.Range16{Lo: 0x1d00, Hi: 0x1d2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1d2c, Hi: 0x1d6a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d6b, Hi: 0x1d77, Stride: 0x1}, + unicode.Range16{Lo: 0x1d78, Hi: 0x1d78, Stride: 0x1}, + unicode.Range16{Lo: 0x1d79, Hi: 0x1d9a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d9b, Hi: 0x1dbf, Stride: 0x1}, + unicode.Range16{Lo: 0x1e01, Hi: 0x1e01, Stride: 0x1}, + unicode.Range16{Lo: 0x1e03, Hi: 0x1e03, Stride: 0x1}, + unicode.Range16{Lo: 0x1e05, Hi: 0x1e05, Stride: 0x1}, + unicode.Range16{Lo: 0x1e07, Hi: 0x1e07, Stride: 0x1}, + unicode.Range16{Lo: 0x1e09, Hi: 0x1e09, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0b, Hi: 0x1e0b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0d, Hi: 0x1e0d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0f, Hi: 0x1e0f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e11, Hi: 0x1e11, Stride: 0x1}, + unicode.Range16{Lo: 0x1e13, Hi: 0x1e13, Stride: 0x1}, + unicode.Range16{Lo: 0x1e15, Hi: 0x1e15, Stride: 0x1}, + unicode.Range16{Lo: 0x1e17, Hi: 0x1e17, Stride: 0x1}, + unicode.Range16{Lo: 0x1e19, Hi: 0x1e19, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1b, Hi: 0x1e1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1d, Hi: 0x1e1d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1f, Hi: 0x1e1f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e21, Hi: 0x1e21, Stride: 0x1}, + unicode.Range16{Lo: 0x1e23, Hi: 0x1e23, Stride: 0x1}, + unicode.Range16{Lo: 0x1e25, Hi: 0x1e25, Stride: 0x1}, + unicode.Range16{Lo: 0x1e27, Hi: 0x1e27, Stride: 0x1}, + unicode.Range16{Lo: 0x1e29, Hi: 0x1e29, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2b, Hi: 0x1e2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2d, Hi: 0x1e2d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2f, Hi: 0x1e2f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e31, Hi: 0x1e31, Stride: 0x1}, + unicode.Range16{Lo: 0x1e33, Hi: 0x1e33, Stride: 0x1}, + unicode.Range16{Lo: 0x1e35, Hi: 0x1e35, Stride: 0x1}, + unicode.Range16{Lo: 0x1e37, Hi: 0x1e37, Stride: 0x1}, + unicode.Range16{Lo: 0x1e39, Hi: 0x1e39, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3b, Hi: 0x1e3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3d, Hi: 0x1e3d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3f, Hi: 0x1e3f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e41, Hi: 0x1e41, Stride: 0x1}, + unicode.Range16{Lo: 0x1e43, Hi: 0x1e43, Stride: 0x1}, + unicode.Range16{Lo: 0x1e45, Hi: 0x1e45, Stride: 0x1}, + unicode.Range16{Lo: 0x1e47, Hi: 0x1e47, Stride: 0x1}, + unicode.Range16{Lo: 0x1e49, Hi: 0x1e49, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4b, Hi: 0x1e4b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4d, Hi: 0x1e4d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4f, Hi: 0x1e4f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e51, Hi: 0x1e51, Stride: 0x1}, + unicode.Range16{Lo: 0x1e53, Hi: 0x1e53, Stride: 0x1}, + unicode.Range16{Lo: 0x1e55, Hi: 0x1e55, Stride: 0x1}, + unicode.Range16{Lo: 0x1e57, Hi: 0x1e57, Stride: 0x1}, + unicode.Range16{Lo: 0x1e59, Hi: 0x1e59, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5b, Hi: 0x1e5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5d, Hi: 0x1e5d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5f, Hi: 0x1e5f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e61, Hi: 0x1e61, Stride: 0x1}, + unicode.Range16{Lo: 0x1e63, Hi: 0x1e63, Stride: 0x1}, + unicode.Range16{Lo: 0x1e65, Hi: 0x1e65, Stride: 0x1}, + unicode.Range16{Lo: 0x1e67, Hi: 0x1e67, Stride: 0x1}, + unicode.Range16{Lo: 0x1e69, Hi: 0x1e69, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6b, Hi: 0x1e6b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6d, Hi: 0x1e6d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6f, Hi: 0x1e6f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e71, Hi: 0x1e71, Stride: 0x1}, + unicode.Range16{Lo: 0x1e73, Hi: 0x1e73, Stride: 0x1}, + unicode.Range16{Lo: 0x1e75, Hi: 0x1e75, Stride: 0x1}, + unicode.Range16{Lo: 0x1e77, Hi: 0x1e77, Stride: 0x1}, + unicode.Range16{Lo: 0x1e79, Hi: 0x1e79, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7b, Hi: 0x1e7b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7d, Hi: 0x1e7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7f, Hi: 0x1e7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e81, Hi: 0x1e81, Stride: 0x1}, + unicode.Range16{Lo: 0x1e83, Hi: 0x1e83, Stride: 0x1}, + unicode.Range16{Lo: 0x1e85, Hi: 0x1e85, Stride: 0x1}, + unicode.Range16{Lo: 0x1e87, Hi: 0x1e87, Stride: 0x1}, + unicode.Range16{Lo: 0x1e89, Hi: 0x1e89, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8b, Hi: 0x1e8b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8d, Hi: 0x1e8d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8f, Hi: 0x1e8f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e91, Hi: 0x1e91, Stride: 0x1}, + unicode.Range16{Lo: 0x1e93, Hi: 0x1e93, Stride: 0x1}, + unicode.Range16{Lo: 0x1e95, Hi: 0x1e9d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e9f, Hi: 0x1e9f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea1, Hi: 0x1ea1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea3, Hi: 0x1ea3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea5, Hi: 0x1ea5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea7, Hi: 0x1ea7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea9, Hi: 0x1ea9, Stride: 0x1}, + unicode.Range16{Lo: 0x1eab, Hi: 0x1eab, Stride: 0x1}, + unicode.Range16{Lo: 0x1ead, Hi: 0x1ead, Stride: 0x1}, + unicode.Range16{Lo: 0x1eaf, Hi: 0x1eaf, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb1, Hi: 0x1eb1, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb3, Hi: 0x1eb3, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb5, Hi: 0x1eb5, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb7, Hi: 0x1eb7, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb9, Hi: 0x1eb9, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebb, Hi: 0x1ebb, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebd, Hi: 0x1ebd, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebf, Hi: 0x1ebf, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec1, Hi: 0x1ec1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec3, Hi: 0x1ec3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec5, Hi: 0x1ec5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec7, Hi: 0x1ec7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec9, Hi: 0x1ec9, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecb, Hi: 0x1ecb, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecd, Hi: 0x1ecd, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecf, Hi: 0x1ecf, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed1, Hi: 0x1ed1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed3, Hi: 0x1ed3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed5, Hi: 0x1ed5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed7, Hi: 0x1ed7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed9, Hi: 0x1ed9, Stride: 0x1}, + unicode.Range16{Lo: 0x1edb, Hi: 0x1edb, Stride: 0x1}, + unicode.Range16{Lo: 0x1edd, Hi: 0x1edd, Stride: 0x1}, + unicode.Range16{Lo: 0x1edf, Hi: 0x1edf, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee1, Hi: 0x1ee1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee3, Hi: 0x1ee3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee5, Hi: 0x1ee5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee7, Hi: 0x1ee7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee9, Hi: 0x1ee9, Stride: 0x1}, + unicode.Range16{Lo: 0x1eeb, Hi: 0x1eeb, Stride: 0x1}, + unicode.Range16{Lo: 0x1eed, Hi: 0x1eed, Stride: 0x1}, + unicode.Range16{Lo: 0x1eef, Hi: 0x1eef, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef1, Hi: 0x1ef1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef3, Hi: 0x1ef3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef5, Hi: 0x1ef5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef7, Hi: 0x1ef7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef9, Hi: 0x1ef9, Stride: 0x1}, + unicode.Range16{Lo: 0x1efb, Hi: 0x1efb, Stride: 0x1}, + unicode.Range16{Lo: 0x1efd, Hi: 0x1efd, Stride: 0x1}, + unicode.Range16{Lo: 0x1eff, Hi: 0x1f07, Stride: 0x1}, + unicode.Range16{Lo: 0x1f10, Hi: 0x1f15, Stride: 0x1}, + unicode.Range16{Lo: 0x1f20, Hi: 0x1f27, Stride: 0x1}, + unicode.Range16{Lo: 0x1f30, Hi: 0x1f37, Stride: 0x1}, + unicode.Range16{Lo: 0x1f40, Hi: 0x1f45, Stride: 0x1}, + unicode.Range16{Lo: 0x1f50, Hi: 0x1f57, Stride: 0x1}, + unicode.Range16{Lo: 0x1f60, Hi: 0x1f67, Stride: 0x1}, + unicode.Range16{Lo: 0x1f70, Hi: 0x1f7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f80, Hi: 0x1f87, Stride: 0x1}, + unicode.Range16{Lo: 0x1f90, Hi: 0x1f97, Stride: 0x1}, + unicode.Range16{Lo: 0x1fa0, Hi: 0x1fa7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb0, Hi: 0x1fb4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb6, Hi: 0x1fb7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc6, Hi: 0x1fc7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd6, Hi: 0x1fd7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe0, Hi: 0x1fe7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff6, Hi: 0x1ff7, Stride: 0x1}, + unicode.Range16{Lo: 0x2071, Hi: 0x2071, Stride: 0x1}, + unicode.Range16{Lo: 0x207f, Hi: 0x207f, Stride: 0x1}, + unicode.Range16{Lo: 0x2090, Hi: 0x209c, Stride: 0x1}, + unicode.Range16{Lo: 0x210a, Hi: 0x210a, Stride: 0x1}, + unicode.Range16{Lo: 0x210e, Hi: 0x210f, Stride: 0x1}, + unicode.Range16{Lo: 0x2113, Hi: 0x2113, Stride: 0x1}, + unicode.Range16{Lo: 0x212f, Hi: 0x212f, Stride: 0x1}, + unicode.Range16{Lo: 0x2134, Hi: 0x2134, Stride: 0x1}, + unicode.Range16{Lo: 0x2139, Hi: 0x2139, Stride: 0x1}, + unicode.Range16{Lo: 0x213c, Hi: 0x213d, Stride: 0x1}, + unicode.Range16{Lo: 0x2146, Hi: 0x2149, Stride: 0x1}, + unicode.Range16{Lo: 0x214e, Hi: 0x214e, Stride: 0x1}, + unicode.Range16{Lo: 0x2170, Hi: 0x217f, Stride: 0x1}, + unicode.Range16{Lo: 0x2184, Hi: 0x2184, Stride: 0x1}, + unicode.Range16{Lo: 0x24d0, Hi: 0x24e9, Stride: 0x1}, + unicode.Range16{Lo: 0x2c30, Hi: 0x2c5e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c61, Hi: 0x2c61, Stride: 0x1}, + unicode.Range16{Lo: 0x2c65, Hi: 0x2c66, Stride: 0x1}, + unicode.Range16{Lo: 0x2c68, Hi: 0x2c68, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6a, Hi: 0x2c6a, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6c, Hi: 0x2c6c, Stride: 0x1}, + unicode.Range16{Lo: 0x2c71, Hi: 0x2c71, Stride: 0x1}, + unicode.Range16{Lo: 0x2c73, Hi: 0x2c74, Stride: 0x1}, + unicode.Range16{Lo: 0x2c76, Hi: 0x2c7b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7c, Hi: 0x2c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c81, Hi: 0x2c81, Stride: 0x1}, + unicode.Range16{Lo: 0x2c83, Hi: 0x2c83, Stride: 0x1}, + unicode.Range16{Lo: 0x2c85, Hi: 0x2c85, Stride: 0x1}, + unicode.Range16{Lo: 0x2c87, Hi: 0x2c87, Stride: 0x1}, + unicode.Range16{Lo: 0x2c89, Hi: 0x2c89, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8b, Hi: 0x2c8b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8d, Hi: 0x2c8d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8f, Hi: 0x2c8f, Stride: 0x1}, + unicode.Range16{Lo: 0x2c91, Hi: 0x2c91, Stride: 0x1}, + unicode.Range16{Lo: 0x2c93, Hi: 0x2c93, Stride: 0x1}, + unicode.Range16{Lo: 0x2c95, Hi: 0x2c95, Stride: 0x1}, + unicode.Range16{Lo: 0x2c97, Hi: 0x2c97, Stride: 0x1}, + unicode.Range16{Lo: 0x2c99, Hi: 0x2c99, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9b, Hi: 0x2c9b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9d, Hi: 0x2c9d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9f, Hi: 0x2c9f, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca1, Hi: 0x2ca1, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca3, Hi: 0x2ca3, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca5, Hi: 0x2ca5, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca7, Hi: 0x2ca7, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca9, Hi: 0x2ca9, Stride: 0x1}, + unicode.Range16{Lo: 0x2cab, Hi: 0x2cab, Stride: 0x1}, + unicode.Range16{Lo: 0x2cad, Hi: 0x2cad, Stride: 0x1}, + unicode.Range16{Lo: 0x2caf, Hi: 0x2caf, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb1, Hi: 0x2cb1, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb3, Hi: 0x2cb3, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb5, Hi: 0x2cb5, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb7, Hi: 0x2cb7, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb9, Hi: 0x2cb9, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbb, Hi: 0x2cbb, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbd, Hi: 0x2cbd, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbf, Hi: 0x2cbf, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc1, Hi: 0x2cc1, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc3, Hi: 0x2cc3, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc5, Hi: 0x2cc5, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc7, Hi: 0x2cc7, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc9, Hi: 0x2cc9, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccb, Hi: 0x2ccb, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccd, Hi: 0x2ccd, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccf, Hi: 0x2ccf, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd1, Hi: 0x2cd1, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd3, Hi: 0x2cd3, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd5, Hi: 0x2cd5, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd7, Hi: 0x2cd7, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd9, Hi: 0x2cd9, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdb, Hi: 0x2cdb, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdd, Hi: 0x2cdd, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdf, Hi: 0x2cdf, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce1, Hi: 0x2ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce3, Hi: 0x2ce4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cec, Hi: 0x2cec, Stride: 0x1}, + unicode.Range16{Lo: 0x2cee, Hi: 0x2cee, Stride: 0x1}, + unicode.Range16{Lo: 0x2cf3, Hi: 0x2cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x2d00, Hi: 0x2d25, Stride: 0x1}, + unicode.Range16{Lo: 0x2d27, Hi: 0x2d27, Stride: 0x1}, + unicode.Range16{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 0x1}, + unicode.Range16{Lo: 0xa641, Hi: 0xa641, Stride: 0x1}, + unicode.Range16{Lo: 0xa643, Hi: 0xa643, Stride: 0x1}, + unicode.Range16{Lo: 0xa645, Hi: 0xa645, Stride: 0x1}, + unicode.Range16{Lo: 0xa647, Hi: 0xa647, Stride: 0x1}, + unicode.Range16{Lo: 0xa649, Hi: 0xa649, Stride: 0x1}, + unicode.Range16{Lo: 0xa64b, Hi: 0xa64b, Stride: 0x1}, + unicode.Range16{Lo: 0xa64d, Hi: 0xa64d, Stride: 0x1}, + unicode.Range16{Lo: 0xa64f, Hi: 0xa64f, Stride: 0x1}, + unicode.Range16{Lo: 0xa651, Hi: 0xa651, Stride: 0x1}, + unicode.Range16{Lo: 0xa653, Hi: 0xa653, Stride: 0x1}, + unicode.Range16{Lo: 0xa655, Hi: 0xa655, Stride: 0x1}, + unicode.Range16{Lo: 0xa657, Hi: 0xa657, Stride: 0x1}, + unicode.Range16{Lo: 0xa659, Hi: 0xa659, Stride: 0x1}, + unicode.Range16{Lo: 0xa65b, Hi: 0xa65b, Stride: 0x1}, + unicode.Range16{Lo: 0xa65d, Hi: 0xa65d, Stride: 0x1}, + unicode.Range16{Lo: 0xa65f, Hi: 0xa65f, Stride: 0x1}, + unicode.Range16{Lo: 0xa661, Hi: 0xa661, Stride: 0x1}, + unicode.Range16{Lo: 0xa663, Hi: 0xa663, Stride: 0x1}, + unicode.Range16{Lo: 0xa665, Hi: 0xa665, Stride: 0x1}, + unicode.Range16{Lo: 0xa667, Hi: 0xa667, Stride: 0x1}, + unicode.Range16{Lo: 0xa669, Hi: 0xa669, Stride: 0x1}, + unicode.Range16{Lo: 0xa66b, Hi: 0xa66b, Stride: 0x1}, + unicode.Range16{Lo: 0xa66d, Hi: 0xa66d, Stride: 0x1}, + unicode.Range16{Lo: 0xa681, Hi: 0xa681, Stride: 0x1}, + unicode.Range16{Lo: 0xa683, Hi: 0xa683, Stride: 0x1}, + unicode.Range16{Lo: 0xa685, Hi: 0xa685, Stride: 0x1}, + unicode.Range16{Lo: 0xa687, Hi: 0xa687, Stride: 0x1}, + unicode.Range16{Lo: 0xa689, Hi: 0xa689, Stride: 0x1}, + unicode.Range16{Lo: 0xa68b, Hi: 0xa68b, Stride: 0x1}, + unicode.Range16{Lo: 0xa68d, Hi: 0xa68d, Stride: 0x1}, + unicode.Range16{Lo: 0xa68f, Hi: 0xa68f, Stride: 0x1}, + unicode.Range16{Lo: 0xa691, Hi: 0xa691, Stride: 0x1}, + unicode.Range16{Lo: 0xa693, Hi: 0xa693, Stride: 0x1}, + unicode.Range16{Lo: 0xa695, Hi: 0xa695, Stride: 0x1}, + unicode.Range16{Lo: 0xa697, Hi: 0xa697, Stride: 0x1}, + unicode.Range16{Lo: 0xa699, Hi: 0xa699, Stride: 0x1}, + unicode.Range16{Lo: 0xa69b, Hi: 0xa69b, Stride: 0x1}, + unicode.Range16{Lo: 0xa69c, Hi: 0xa69d, Stride: 0x1}, + unicode.Range16{Lo: 0xa723, Hi: 0xa723, Stride: 0x1}, + unicode.Range16{Lo: 0xa725, Hi: 0xa725, Stride: 0x1}, + unicode.Range16{Lo: 0xa727, Hi: 0xa727, Stride: 0x1}, + unicode.Range16{Lo: 0xa729, Hi: 0xa729, Stride: 0x1}, + unicode.Range16{Lo: 0xa72b, Hi: 0xa72b, Stride: 0x1}, + unicode.Range16{Lo: 0xa72d, Hi: 0xa72d, Stride: 0x1}, + unicode.Range16{Lo: 0xa72f, Hi: 0xa731, Stride: 0x1}, + unicode.Range16{Lo: 0xa733, Hi: 0xa733, Stride: 0x1}, + unicode.Range16{Lo: 0xa735, Hi: 0xa735, Stride: 0x1}, + unicode.Range16{Lo: 0xa737, Hi: 0xa737, Stride: 0x1}, + unicode.Range16{Lo: 0xa739, Hi: 0xa739, Stride: 0x1}, + unicode.Range16{Lo: 0xa73b, Hi: 0xa73b, Stride: 0x1}, + unicode.Range16{Lo: 0xa73d, Hi: 0xa73d, Stride: 0x1}, + unicode.Range16{Lo: 0xa73f, Hi: 0xa73f, Stride: 0x1}, + unicode.Range16{Lo: 0xa741, Hi: 0xa741, Stride: 0x1}, + unicode.Range16{Lo: 0xa743, Hi: 0xa743, Stride: 0x1}, + unicode.Range16{Lo: 0xa745, Hi: 0xa745, Stride: 0x1}, + unicode.Range16{Lo: 0xa747, Hi: 0xa747, Stride: 0x1}, + unicode.Range16{Lo: 0xa749, Hi: 0xa749, Stride: 0x1}, + unicode.Range16{Lo: 0xa74b, Hi: 0xa74b, Stride: 0x1}, + unicode.Range16{Lo: 0xa74d, Hi: 0xa74d, Stride: 0x1}, + unicode.Range16{Lo: 0xa74f, Hi: 0xa74f, Stride: 0x1}, + unicode.Range16{Lo: 0xa751, Hi: 0xa751, Stride: 0x1}, + unicode.Range16{Lo: 0xa753, Hi: 0xa753, Stride: 0x1}, + unicode.Range16{Lo: 0xa755, Hi: 0xa755, Stride: 0x1}, + unicode.Range16{Lo: 0xa757, Hi: 0xa757, Stride: 0x1}, + unicode.Range16{Lo: 0xa759, Hi: 0xa759, Stride: 0x1}, + unicode.Range16{Lo: 0xa75b, Hi: 0xa75b, Stride: 0x1}, + unicode.Range16{Lo: 0xa75d, Hi: 0xa75d, Stride: 0x1}, + unicode.Range16{Lo: 0xa75f, Hi: 0xa75f, Stride: 0x1}, + unicode.Range16{Lo: 0xa761, Hi: 0xa761, Stride: 0x1}, + unicode.Range16{Lo: 0xa763, Hi: 0xa763, Stride: 0x1}, + unicode.Range16{Lo: 0xa765, Hi: 0xa765, Stride: 0x1}, + unicode.Range16{Lo: 0xa767, Hi: 0xa767, Stride: 0x1}, + unicode.Range16{Lo: 0xa769, Hi: 0xa769, Stride: 0x1}, + unicode.Range16{Lo: 0xa76b, Hi: 0xa76b, Stride: 0x1}, + unicode.Range16{Lo: 0xa76d, Hi: 0xa76d, Stride: 0x1}, + unicode.Range16{Lo: 0xa76f, Hi: 0xa76f, Stride: 0x1}, + unicode.Range16{Lo: 0xa770, Hi: 0xa770, Stride: 0x1}, + unicode.Range16{Lo: 0xa771, Hi: 0xa778, Stride: 0x1}, + unicode.Range16{Lo: 0xa77a, Hi: 0xa77a, Stride: 0x1}, + unicode.Range16{Lo: 0xa77c, Hi: 0xa77c, Stride: 0x1}, + unicode.Range16{Lo: 0xa77f, Hi: 0xa77f, Stride: 0x1}, + unicode.Range16{Lo: 0xa781, Hi: 0xa781, Stride: 0x1}, + unicode.Range16{Lo: 0xa783, Hi: 0xa783, Stride: 0x1}, + unicode.Range16{Lo: 0xa785, Hi: 0xa785, Stride: 0x1}, + unicode.Range16{Lo: 0xa787, Hi: 0xa787, Stride: 0x1}, + unicode.Range16{Lo: 0xa78c, Hi: 0xa78c, Stride: 0x1}, + unicode.Range16{Lo: 0xa78e, Hi: 0xa78e, Stride: 0x1}, + unicode.Range16{Lo: 0xa791, Hi: 0xa791, Stride: 0x1}, + unicode.Range16{Lo: 0xa793, Hi: 0xa795, Stride: 0x1}, + unicode.Range16{Lo: 0xa797, Hi: 0xa797, Stride: 0x1}, + unicode.Range16{Lo: 0xa799, Hi: 0xa799, Stride: 0x1}, + unicode.Range16{Lo: 0xa79b, Hi: 0xa79b, Stride: 0x1}, + unicode.Range16{Lo: 0xa79d, Hi: 0xa79d, Stride: 0x1}, + unicode.Range16{Lo: 0xa79f, Hi: 0xa79f, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a1, Hi: 0xa7a1, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a3, Hi: 0xa7a3, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a5, Hi: 0xa7a5, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a7, Hi: 0xa7a7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a9, Hi: 0xa7a9, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b5, Hi: 0xa7b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b7, Hi: 0xa7b7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f8, Hi: 0xa7f9, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fa, Hi: 0xa7fa, Stride: 0x1}, + unicode.Range16{Lo: 0xab30, Hi: 0xab5a, Stride: 0x1}, + unicode.Range16{Lo: 0xab5c, Hi: 0xab5f, Stride: 0x1}, + unicode.Range16{Lo: 0xab60, Hi: 0xab65, Stride: 0x1}, + unicode.Range16{Lo: 0xab70, Hi: 0xabbf, Stride: 0x1}, + unicode.Range16{Lo: 0xfb00, Hi: 0xfb06, Stride: 0x1}, + unicode.Range16{Lo: 0xfb13, Hi: 0xfb17, Stride: 0x1}, + unicode.Range16{Lo: 0xff41, Hi: 0xff5a, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10428, Hi: 0x1044f, Stride: 0x1}, + unicode.Range32{Lo: 0x104d8, Hi: 0x104fb, Stride: 0x1}, + unicode.Range32{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 0x1}, + unicode.Range32{Lo: 0x118c0, Hi: 0x118df, Stride: 0x1}, + unicode.Range32{Lo: 0x1d41a, Hi: 0x1d433, Stride: 0x1}, + unicode.Range32{Lo: 0x1d44e, Hi: 0x1d454, Stride: 0x1}, + unicode.Range32{Lo: 0x1d456, Hi: 0x1d467, Stride: 0x1}, + unicode.Range32{Lo: 0x1d482, Hi: 0x1d49b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4b6, Hi: 0x1d4b9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4c5, Hi: 0x1d4cf, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4ea, Hi: 0x1d503, Stride: 0x1}, + unicode.Range32{Lo: 0x1d51e, Hi: 0x1d537, Stride: 0x1}, + unicode.Range32{Lo: 0x1d552, Hi: 0x1d56b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d586, Hi: 0x1d59f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5ba, Hi: 0x1d5d3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5ee, Hi: 0x1d607, Stride: 0x1}, + unicode.Range32{Lo: 0x1d622, Hi: 0x1d63b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d656, Hi: 0x1d66f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d68a, Hi: 0x1d6a5, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6dc, Hi: 0x1d6e1, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 0x1}, + unicode.Range32{Lo: 0x1d716, Hi: 0x1d71b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d736, Hi: 0x1d74e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d750, Hi: 0x1d755, Stride: 0x1}, + unicode.Range32{Lo: 0x1d770, Hi: 0x1d788, Stride: 0x1}, + unicode.Range32{Lo: 0x1d78a, Hi: 0x1d78f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7c4, Hi: 0x1d7c9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7cb, Hi: 0x1d7cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1e922, Hi: 0x1e943, Stride: 0x1}, + }, + LatinOffset: 6, +} + +var _SentenceNumeric = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x30, Hi: 0x39, Stride: 0x1}, + unicode.Range16{Lo: 0x660, Hi: 0x669, Stride: 0x1}, + unicode.Range16{Lo: 0x66b, Hi: 0x66c, Stride: 0x1}, + unicode.Range16{Lo: 0x6f0, Hi: 0x6f9, Stride: 0x1}, + unicode.Range16{Lo: 0x7c0, Hi: 0x7c9, Stride: 0x1}, + unicode.Range16{Lo: 0x966, Hi: 0x96f, Stride: 0x1}, + unicode.Range16{Lo: 0x9e6, Hi: 0x9ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa66, Hi: 0xa6f, Stride: 0x1}, + unicode.Range16{Lo: 0xae6, Hi: 0xaef, Stride: 0x1}, + unicode.Range16{Lo: 0xb66, Hi: 0xb6f, Stride: 0x1}, + unicode.Range16{Lo: 0xbe6, Hi: 0xbef, Stride: 0x1}, + unicode.Range16{Lo: 0xc66, Hi: 0xc6f, Stride: 0x1}, + unicode.Range16{Lo: 0xce6, Hi: 0xcef, Stride: 0x1}, + unicode.Range16{Lo: 0xd66, Hi: 0xd6f, Stride: 0x1}, + unicode.Range16{Lo: 0xde6, Hi: 0xdef, Stride: 0x1}, + unicode.Range16{Lo: 0xe50, Hi: 0xe59, Stride: 0x1}, + unicode.Range16{Lo: 0xed0, Hi: 0xed9, Stride: 0x1}, + unicode.Range16{Lo: 0xf20, Hi: 0xf29, Stride: 0x1}, + unicode.Range16{Lo: 0x1040, Hi: 0x1049, Stride: 0x1}, + unicode.Range16{Lo: 0x1090, Hi: 0x1099, Stride: 0x1}, + unicode.Range16{Lo: 0x17e0, Hi: 0x17e9, Stride: 0x1}, + unicode.Range16{Lo: 0x1810, Hi: 0x1819, Stride: 0x1}, + unicode.Range16{Lo: 0x1946, Hi: 0x194f, Stride: 0x1}, + unicode.Range16{Lo: 0x19d0, Hi: 0x19d9, Stride: 0x1}, + unicode.Range16{Lo: 0x1a80, Hi: 0x1a89, Stride: 0x1}, + unicode.Range16{Lo: 0x1a90, Hi: 0x1a99, Stride: 0x1}, + unicode.Range16{Lo: 0x1b50, Hi: 0x1b59, Stride: 0x1}, + unicode.Range16{Lo: 0x1bb0, Hi: 0x1bb9, Stride: 0x1}, + unicode.Range16{Lo: 0x1c40, Hi: 0x1c49, Stride: 0x1}, + unicode.Range16{Lo: 0x1c50, Hi: 0x1c59, Stride: 0x1}, + unicode.Range16{Lo: 0xa620, Hi: 0xa629, Stride: 0x1}, + unicode.Range16{Lo: 0xa8d0, Hi: 0xa8d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa900, Hi: 0xa909, Stride: 0x1}, + unicode.Range16{Lo: 0xa9d0, Hi: 0xa9d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9f0, Hi: 0xa9f9, Stride: 0x1}, + unicode.Range16{Lo: 0xaa50, Hi: 0xaa59, Stride: 0x1}, + unicode.Range16{Lo: 0xabf0, Hi: 0xabf9, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x104a0, Hi: 0x104a9, Stride: 0x1}, + unicode.Range32{Lo: 0x11066, Hi: 0x1106f, Stride: 0x1}, + unicode.Range32{Lo: 0x110f0, Hi: 0x110f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11136, Hi: 0x1113f, Stride: 0x1}, + unicode.Range32{Lo: 0x111d0, Hi: 0x111d9, Stride: 0x1}, + unicode.Range32{Lo: 0x112f0, Hi: 0x112f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11450, Hi: 0x11459, Stride: 0x1}, + unicode.Range32{Lo: 0x114d0, Hi: 0x114d9, Stride: 0x1}, + unicode.Range32{Lo: 0x11650, Hi: 0x11659, Stride: 0x1}, + unicode.Range32{Lo: 0x116c0, Hi: 0x116c9, Stride: 0x1}, + unicode.Range32{Lo: 0x11730, Hi: 0x11739, Stride: 0x1}, + unicode.Range32{Lo: 0x118e0, Hi: 0x118e9, Stride: 0x1}, + unicode.Range32{Lo: 0x11c50, Hi: 0x11c59, Stride: 0x1}, + unicode.Range32{Lo: 0x16a60, Hi: 0x16a69, Stride: 0x1}, + unicode.Range32{Lo: 0x16b50, Hi: 0x16b59, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7ce, Hi: 0x1d7ff, Stride: 0x1}, + unicode.Range32{Lo: 0x1e950, Hi: 0x1e959, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceOLetter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x1bb, Hi: 0x1bb, Stride: 0x1}, + unicode.Range16{Lo: 0x1c0, Hi: 0x1c3, Stride: 0x1}, + unicode.Range16{Lo: 0x294, Hi: 0x294, Stride: 0x1}, + unicode.Range16{Lo: 0x2b9, Hi: 0x2bf, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6, Hi: 0x2d1, Stride: 0x1}, + unicode.Range16{Lo: 0x2ec, Hi: 0x2ec, Stride: 0x1}, + unicode.Range16{Lo: 0x2ee, Hi: 0x2ee, Stride: 0x1}, + unicode.Range16{Lo: 0x374, Hi: 0x374, Stride: 0x1}, + unicode.Range16{Lo: 0x559, Hi: 0x559, Stride: 0x1}, + unicode.Range16{Lo: 0x5d0, Hi: 0x5ea, Stride: 0x1}, + unicode.Range16{Lo: 0x5f0, Hi: 0x5f2, Stride: 0x1}, + unicode.Range16{Lo: 0x5f3, Hi: 0x5f3, Stride: 0x1}, + unicode.Range16{Lo: 0x620, Hi: 0x63f, Stride: 0x1}, + unicode.Range16{Lo: 0x640, Hi: 0x640, Stride: 0x1}, + unicode.Range16{Lo: 0x641, Hi: 0x64a, Stride: 0x1}, + unicode.Range16{Lo: 0x66e, Hi: 0x66f, Stride: 0x1}, + unicode.Range16{Lo: 0x671, Hi: 0x6d3, Stride: 0x1}, + unicode.Range16{Lo: 0x6d5, Hi: 0x6d5, Stride: 0x1}, + unicode.Range16{Lo: 0x6e5, Hi: 0x6e6, Stride: 0x1}, + unicode.Range16{Lo: 0x6ee, Hi: 0x6ef, Stride: 0x1}, + unicode.Range16{Lo: 0x6fa, Hi: 0x6fc, Stride: 0x1}, + unicode.Range16{Lo: 0x6ff, Hi: 0x6ff, Stride: 0x1}, + unicode.Range16{Lo: 0x710, Hi: 0x710, Stride: 0x1}, + unicode.Range16{Lo: 0x712, Hi: 0x72f, Stride: 0x1}, + unicode.Range16{Lo: 0x74d, Hi: 0x7a5, Stride: 0x1}, + unicode.Range16{Lo: 0x7b1, Hi: 0x7b1, Stride: 0x1}, + unicode.Range16{Lo: 0x7ca, Hi: 0x7ea, Stride: 0x1}, + unicode.Range16{Lo: 0x7f4, Hi: 0x7f5, Stride: 0x1}, + unicode.Range16{Lo: 0x7fa, Hi: 0x7fa, Stride: 0x1}, + unicode.Range16{Lo: 0x800, Hi: 0x815, Stride: 0x1}, + unicode.Range16{Lo: 0x81a, Hi: 0x81a, Stride: 0x1}, + unicode.Range16{Lo: 0x824, Hi: 0x824, Stride: 0x1}, + unicode.Range16{Lo: 0x828, Hi: 0x828, Stride: 0x1}, + unicode.Range16{Lo: 0x840, Hi: 0x858, Stride: 0x1}, + unicode.Range16{Lo: 0x8a0, Hi: 0x8b4, Stride: 0x1}, + unicode.Range16{Lo: 0x8b6, Hi: 0x8bd, Stride: 0x1}, + unicode.Range16{Lo: 0x904, Hi: 0x939, Stride: 0x1}, + unicode.Range16{Lo: 0x93d, Hi: 0x93d, Stride: 0x1}, + unicode.Range16{Lo: 0x950, Hi: 0x950, Stride: 0x1}, + unicode.Range16{Lo: 0x958, Hi: 0x961, Stride: 0x1}, + unicode.Range16{Lo: 0x971, Hi: 0x971, Stride: 0x1}, + unicode.Range16{Lo: 0x972, Hi: 0x980, Stride: 0x1}, + unicode.Range16{Lo: 0x985, Hi: 0x98c, Stride: 0x1}, + unicode.Range16{Lo: 0x98f, Hi: 0x990, Stride: 0x1}, + unicode.Range16{Lo: 0x993, Hi: 0x9a8, Stride: 0x1}, + unicode.Range16{Lo: 0x9aa, Hi: 0x9b0, Stride: 0x1}, + unicode.Range16{Lo: 0x9b2, Hi: 0x9b2, Stride: 0x1}, + unicode.Range16{Lo: 0x9b6, Hi: 0x9b9, Stride: 0x1}, + unicode.Range16{Lo: 0x9bd, Hi: 0x9bd, Stride: 0x1}, + unicode.Range16{Lo: 0x9ce, Hi: 0x9ce, Stride: 0x1}, + unicode.Range16{Lo: 0x9dc, Hi: 0x9dd, Stride: 0x1}, + unicode.Range16{Lo: 0x9df, Hi: 0x9e1, Stride: 0x1}, + unicode.Range16{Lo: 0x9f0, Hi: 0x9f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa05, Hi: 0xa0a, Stride: 0x1}, + unicode.Range16{Lo: 0xa0f, Hi: 0xa10, Stride: 0x1}, + unicode.Range16{Lo: 0xa13, Hi: 0xa28, Stride: 0x1}, + unicode.Range16{Lo: 0xa2a, Hi: 0xa30, Stride: 0x1}, + unicode.Range16{Lo: 0xa32, Hi: 0xa33, Stride: 0x1}, + unicode.Range16{Lo: 0xa35, Hi: 0xa36, Stride: 0x1}, + unicode.Range16{Lo: 0xa38, Hi: 0xa39, Stride: 0x1}, + unicode.Range16{Lo: 0xa59, Hi: 0xa5c, Stride: 0x1}, + unicode.Range16{Lo: 0xa5e, Hi: 0xa5e, Stride: 0x1}, + unicode.Range16{Lo: 0xa72, Hi: 0xa74, Stride: 0x1}, + unicode.Range16{Lo: 0xa85, Hi: 0xa8d, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f, Hi: 0xa91, Stride: 0x1}, + unicode.Range16{Lo: 0xa93, Hi: 0xaa8, Stride: 0x1}, + unicode.Range16{Lo: 0xaaa, Hi: 0xab0, Stride: 0x1}, + unicode.Range16{Lo: 0xab2, Hi: 0xab3, Stride: 0x1}, + unicode.Range16{Lo: 0xab5, Hi: 0xab9, Stride: 0x1}, + unicode.Range16{Lo: 0xabd, Hi: 0xabd, Stride: 0x1}, + unicode.Range16{Lo: 0xad0, Hi: 0xad0, Stride: 0x1}, + unicode.Range16{Lo: 0xae0, Hi: 0xae1, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9, Hi: 0xaf9, Stride: 0x1}, + unicode.Range16{Lo: 0xb05, Hi: 0xb0c, Stride: 0x1}, + unicode.Range16{Lo: 0xb0f, Hi: 0xb10, Stride: 0x1}, + unicode.Range16{Lo: 0xb13, Hi: 0xb28, Stride: 0x1}, + unicode.Range16{Lo: 0xb2a, Hi: 0xb30, Stride: 0x1}, + unicode.Range16{Lo: 0xb32, Hi: 0xb33, Stride: 0x1}, + unicode.Range16{Lo: 0xb35, Hi: 0xb39, Stride: 0x1}, + unicode.Range16{Lo: 0xb3d, Hi: 0xb3d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5c, Hi: 0xb5d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f, Hi: 0xb61, Stride: 0x1}, + unicode.Range16{Lo: 0xb71, Hi: 0xb71, Stride: 0x1}, + unicode.Range16{Lo: 0xb83, Hi: 0xb83, Stride: 0x1}, + unicode.Range16{Lo: 0xb85, Hi: 0xb8a, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e, Hi: 0xb90, Stride: 0x1}, + unicode.Range16{Lo: 0xb92, Hi: 0xb95, Stride: 0x1}, + unicode.Range16{Lo: 0xb99, Hi: 0xb9a, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c, Hi: 0xb9c, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e, Hi: 0xb9f, Stride: 0x1}, + unicode.Range16{Lo: 0xba3, Hi: 0xba4, Stride: 0x1}, + unicode.Range16{Lo: 0xba8, Hi: 0xbaa, Stride: 0x1}, + unicode.Range16{Lo: 0xbae, Hi: 0xbb9, Stride: 0x1}, + unicode.Range16{Lo: 0xbd0, Hi: 0xbd0, Stride: 0x1}, + unicode.Range16{Lo: 0xc05, Hi: 0xc0c, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e, Hi: 0xc10, Stride: 0x1}, + unicode.Range16{Lo: 0xc12, Hi: 0xc28, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a, Hi: 0xc39, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d, Hi: 0xc3d, Stride: 0x1}, + unicode.Range16{Lo: 0xc58, Hi: 0xc5a, Stride: 0x1}, + unicode.Range16{Lo: 0xc60, Hi: 0xc61, Stride: 0x1}, + unicode.Range16{Lo: 0xc80, Hi: 0xc80, Stride: 0x1}, + unicode.Range16{Lo: 0xc85, Hi: 0xc8c, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e, Hi: 0xc90, Stride: 0x1}, + unicode.Range16{Lo: 0xc92, Hi: 0xca8, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa, Hi: 0xcb3, Stride: 0x1}, + unicode.Range16{Lo: 0xcb5, Hi: 0xcb9, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd, Hi: 0xcbd, Stride: 0x1}, + unicode.Range16{Lo: 0xcde, Hi: 0xcde, Stride: 0x1}, + unicode.Range16{Lo: 0xce0, Hi: 0xce1, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1, Hi: 0xcf2, Stride: 0x1}, + unicode.Range16{Lo: 0xd05, Hi: 0xd0c, Stride: 0x1}, + unicode.Range16{Lo: 0xd0e, Hi: 0xd10, Stride: 0x1}, + unicode.Range16{Lo: 0xd12, Hi: 0xd3a, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d, Hi: 0xd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1}, + unicode.Range16{Lo: 0xd54, Hi: 0xd56, Stride: 0x1}, + unicode.Range16{Lo: 0xd5f, Hi: 0xd61, Stride: 0x1}, + unicode.Range16{Lo: 0xd7a, Hi: 0xd7f, Stride: 0x1}, + unicode.Range16{Lo: 0xd85, Hi: 0xd96, Stride: 0x1}, + unicode.Range16{Lo: 0xd9a, Hi: 0xdb1, Stride: 0x1}, + unicode.Range16{Lo: 0xdb3, Hi: 0xdbb, Stride: 0x1}, + unicode.Range16{Lo: 0xdbd, Hi: 0xdbd, Stride: 0x1}, + unicode.Range16{Lo: 0xdc0, Hi: 0xdc6, Stride: 0x1}, + unicode.Range16{Lo: 0xe01, Hi: 0xe30, Stride: 0x1}, + unicode.Range16{Lo: 0xe32, Hi: 0xe33, Stride: 0x1}, + unicode.Range16{Lo: 0xe40, Hi: 0xe45, Stride: 0x1}, + unicode.Range16{Lo: 0xe46, Hi: 0xe46, Stride: 0x1}, + unicode.Range16{Lo: 0xe81, Hi: 0xe82, Stride: 0x1}, + unicode.Range16{Lo: 0xe84, Hi: 0xe84, Stride: 0x1}, + unicode.Range16{Lo: 0xe87, Hi: 0xe88, Stride: 0x1}, + unicode.Range16{Lo: 0xe8a, Hi: 0xe8a, Stride: 0x1}, + unicode.Range16{Lo: 0xe8d, Hi: 0xe8d, Stride: 0x1}, + unicode.Range16{Lo: 0xe94, Hi: 0xe97, Stride: 0x1}, + unicode.Range16{Lo: 0xe99, Hi: 0xe9f, Stride: 0x1}, + unicode.Range16{Lo: 0xea1, Hi: 0xea3, Stride: 0x1}, + unicode.Range16{Lo: 0xea5, Hi: 0xea5, Stride: 0x1}, + unicode.Range16{Lo: 0xea7, Hi: 0xea7, Stride: 0x1}, + unicode.Range16{Lo: 0xeaa, Hi: 0xeab, Stride: 0x1}, + unicode.Range16{Lo: 0xead, Hi: 0xeb0, Stride: 0x1}, + unicode.Range16{Lo: 0xeb2, Hi: 0xeb3, Stride: 0x1}, + unicode.Range16{Lo: 0xebd, Hi: 0xebd, Stride: 0x1}, + unicode.Range16{Lo: 0xec0, Hi: 0xec4, Stride: 0x1}, + unicode.Range16{Lo: 0xec6, Hi: 0xec6, Stride: 0x1}, + unicode.Range16{Lo: 0xedc, Hi: 0xedf, Stride: 0x1}, + unicode.Range16{Lo: 0xf00, Hi: 0xf00, Stride: 0x1}, + unicode.Range16{Lo: 0xf40, Hi: 0xf47, Stride: 0x1}, + unicode.Range16{Lo: 0xf49, Hi: 0xf6c, Stride: 0x1}, + unicode.Range16{Lo: 0xf88, Hi: 0xf8c, Stride: 0x1}, + unicode.Range16{Lo: 0x1000, Hi: 0x102a, Stride: 0x1}, + unicode.Range16{Lo: 0x103f, Hi: 0x103f, Stride: 0x1}, + unicode.Range16{Lo: 0x1050, Hi: 0x1055, Stride: 0x1}, + unicode.Range16{Lo: 0x105a, Hi: 0x105d, Stride: 0x1}, + unicode.Range16{Lo: 0x1061, Hi: 0x1061, Stride: 0x1}, + unicode.Range16{Lo: 0x1065, Hi: 0x1066, Stride: 0x1}, + unicode.Range16{Lo: 0x106e, Hi: 0x1070, Stride: 0x1}, + unicode.Range16{Lo: 0x1075, Hi: 0x1081, Stride: 0x1}, + unicode.Range16{Lo: 0x108e, Hi: 0x108e, Stride: 0x1}, + unicode.Range16{Lo: 0x10d0, Hi: 0x10fa, Stride: 0x1}, + unicode.Range16{Lo: 0x10fc, Hi: 0x10fc, Stride: 0x1}, + unicode.Range16{Lo: 0x10fd, Hi: 0x1248, Stride: 0x1}, + unicode.Range16{Lo: 0x124a, Hi: 0x124d, Stride: 0x1}, + unicode.Range16{Lo: 0x1250, Hi: 0x1256, Stride: 0x1}, + unicode.Range16{Lo: 0x1258, Hi: 0x1258, Stride: 0x1}, + unicode.Range16{Lo: 0x125a, Hi: 0x125d, Stride: 0x1}, + unicode.Range16{Lo: 0x1260, Hi: 0x1288, Stride: 0x1}, + unicode.Range16{Lo: 0x128a, Hi: 0x128d, Stride: 0x1}, + unicode.Range16{Lo: 0x1290, Hi: 0x12b0, Stride: 0x1}, + unicode.Range16{Lo: 0x12b2, Hi: 0x12b5, Stride: 0x1}, + unicode.Range16{Lo: 0x12b8, Hi: 0x12be, Stride: 0x1}, + unicode.Range16{Lo: 0x12c0, Hi: 0x12c0, Stride: 0x1}, + unicode.Range16{Lo: 0x12c2, Hi: 0x12c5, Stride: 0x1}, + unicode.Range16{Lo: 0x12c8, Hi: 0x12d6, Stride: 0x1}, + unicode.Range16{Lo: 0x12d8, Hi: 0x1310, Stride: 0x1}, + unicode.Range16{Lo: 0x1312, Hi: 0x1315, Stride: 0x1}, + unicode.Range16{Lo: 0x1318, Hi: 0x135a, Stride: 0x1}, + unicode.Range16{Lo: 0x1380, Hi: 0x138f, Stride: 0x1}, + unicode.Range16{Lo: 0x1401, Hi: 0x166c, Stride: 0x1}, + unicode.Range16{Lo: 0x166f, Hi: 0x167f, Stride: 0x1}, + unicode.Range16{Lo: 0x1681, Hi: 0x169a, Stride: 0x1}, + unicode.Range16{Lo: 0x16a0, Hi: 0x16ea, Stride: 0x1}, + unicode.Range16{Lo: 0x16ee, Hi: 0x16f0, Stride: 0x1}, + unicode.Range16{Lo: 0x16f1, Hi: 0x16f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1700, Hi: 0x170c, Stride: 0x1}, + unicode.Range16{Lo: 0x170e, Hi: 0x1711, Stride: 0x1}, + unicode.Range16{Lo: 0x1720, Hi: 0x1731, Stride: 0x1}, + unicode.Range16{Lo: 0x1740, Hi: 0x1751, Stride: 0x1}, + unicode.Range16{Lo: 0x1760, Hi: 0x176c, Stride: 0x1}, + unicode.Range16{Lo: 0x176e, Hi: 0x1770, Stride: 0x1}, + unicode.Range16{Lo: 0x1780, Hi: 0x17b3, Stride: 0x1}, + unicode.Range16{Lo: 0x17d7, Hi: 0x17d7, Stride: 0x1}, + unicode.Range16{Lo: 0x17dc, Hi: 0x17dc, Stride: 0x1}, + unicode.Range16{Lo: 0x1820, Hi: 0x1842, Stride: 0x1}, + unicode.Range16{Lo: 0x1843, Hi: 0x1843, Stride: 0x1}, + unicode.Range16{Lo: 0x1844, Hi: 0x1877, Stride: 0x1}, + unicode.Range16{Lo: 0x1880, Hi: 0x1884, Stride: 0x1}, + unicode.Range16{Lo: 0x1887, Hi: 0x18a8, Stride: 0x1}, + unicode.Range16{Lo: 0x18aa, Hi: 0x18aa, Stride: 0x1}, + unicode.Range16{Lo: 0x18b0, Hi: 0x18f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1900, Hi: 0x191e, Stride: 0x1}, + unicode.Range16{Lo: 0x1950, Hi: 0x196d, Stride: 0x1}, + unicode.Range16{Lo: 0x1970, Hi: 0x1974, Stride: 0x1}, + unicode.Range16{Lo: 0x1980, Hi: 0x19ab, Stride: 0x1}, + unicode.Range16{Lo: 0x19b0, Hi: 0x19c9, Stride: 0x1}, + unicode.Range16{Lo: 0x1a00, Hi: 0x1a16, Stride: 0x1}, + unicode.Range16{Lo: 0x1a20, Hi: 0x1a54, Stride: 0x1}, + unicode.Range16{Lo: 0x1aa7, Hi: 0x1aa7, Stride: 0x1}, + unicode.Range16{Lo: 0x1b05, Hi: 0x1b33, Stride: 0x1}, + unicode.Range16{Lo: 0x1b45, Hi: 0x1b4b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b83, Hi: 0x1ba0, Stride: 0x1}, + unicode.Range16{Lo: 0x1bae, Hi: 0x1baf, Stride: 0x1}, + unicode.Range16{Lo: 0x1bba, Hi: 0x1be5, Stride: 0x1}, + unicode.Range16{Lo: 0x1c00, Hi: 0x1c23, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 0x1}, + unicode.Range16{Lo: 0x1c5a, Hi: 0x1c77, Stride: 0x1}, + unicode.Range16{Lo: 0x1c78, Hi: 0x1c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce9, Hi: 0x1cec, Stride: 0x1}, + unicode.Range16{Lo: 0x1cee, Hi: 0x1cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 0x1}, + unicode.Range16{Lo: 0x2135, Hi: 0x2138, Stride: 0x1}, + unicode.Range16{Lo: 0x2180, Hi: 0x2182, Stride: 0x1}, + unicode.Range16{Lo: 0x2185, Hi: 0x2188, Stride: 0x1}, + unicode.Range16{Lo: 0x2d30, Hi: 0x2d67, Stride: 0x1}, + unicode.Range16{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 0x1}, + unicode.Range16{Lo: 0x2d80, Hi: 0x2d96, Stride: 0x1}, + unicode.Range16{Lo: 0x2da0, Hi: 0x2da6, Stride: 0x1}, + unicode.Range16{Lo: 0x2da8, Hi: 0x2dae, Stride: 0x1}, + unicode.Range16{Lo: 0x2db0, Hi: 0x2db6, Stride: 0x1}, + unicode.Range16{Lo: 0x2db8, Hi: 0x2dbe, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc8, Hi: 0x2dce, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd8, Hi: 0x2dde, Stride: 0x1}, + unicode.Range16{Lo: 0x2e2f, Hi: 0x2e2f, Stride: 0x1}, + unicode.Range16{Lo: 0x3005, Hi: 0x3005, Stride: 0x1}, + unicode.Range16{Lo: 0x3006, Hi: 0x3006, Stride: 0x1}, + unicode.Range16{Lo: 0x3007, Hi: 0x3007, Stride: 0x1}, + unicode.Range16{Lo: 0x3021, Hi: 0x3029, Stride: 0x1}, + unicode.Range16{Lo: 0x3031, Hi: 0x3035, Stride: 0x1}, + unicode.Range16{Lo: 0x3038, Hi: 0x303a, Stride: 0x1}, + unicode.Range16{Lo: 0x303b, Hi: 0x303b, Stride: 0x1}, + unicode.Range16{Lo: 0x303c, Hi: 0x303c, Stride: 0x1}, + unicode.Range16{Lo: 0x3041, Hi: 0x3096, Stride: 0x1}, + unicode.Range16{Lo: 0x309d, Hi: 0x309e, Stride: 0x1}, + unicode.Range16{Lo: 0x309f, Hi: 0x309f, Stride: 0x1}, + unicode.Range16{Lo: 0x30a1, Hi: 0x30fa, Stride: 0x1}, + unicode.Range16{Lo: 0x30fc, Hi: 0x30fe, Stride: 0x1}, + unicode.Range16{Lo: 0x30ff, Hi: 0x30ff, Stride: 0x1}, + unicode.Range16{Lo: 0x3105, Hi: 0x312d, Stride: 0x1}, + unicode.Range16{Lo: 0x3131, Hi: 0x318e, Stride: 0x1}, + unicode.Range16{Lo: 0x31a0, Hi: 0x31ba, Stride: 0x1}, + unicode.Range16{Lo: 0x31f0, Hi: 0x31ff, Stride: 0x1}, + unicode.Range16{Lo: 0x3400, Hi: 0x4db5, Stride: 0x1}, + unicode.Range16{Lo: 0x4e00, Hi: 0x9fd5, Stride: 0x1}, + unicode.Range16{Lo: 0xa000, Hi: 0xa014, Stride: 0x1}, + unicode.Range16{Lo: 0xa015, Hi: 0xa015, Stride: 0x1}, + unicode.Range16{Lo: 0xa016, Hi: 0xa48c, Stride: 0x1}, + unicode.Range16{Lo: 0xa4d0, Hi: 0xa4f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa4f8, Hi: 0xa4fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa500, Hi: 0xa60b, Stride: 0x1}, + unicode.Range16{Lo: 0xa60c, Hi: 0xa60c, Stride: 0x1}, + unicode.Range16{Lo: 0xa610, Hi: 0xa61f, Stride: 0x1}, + unicode.Range16{Lo: 0xa62a, Hi: 0xa62b, Stride: 0x1}, + unicode.Range16{Lo: 0xa66e, Hi: 0xa66e, Stride: 0x1}, + unicode.Range16{Lo: 0xa67f, Hi: 0xa67f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6a0, Hi: 0xa6e5, Stride: 0x1}, + unicode.Range16{Lo: 0xa6e6, Hi: 0xa6ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa717, Hi: 0xa71f, Stride: 0x1}, + unicode.Range16{Lo: 0xa788, Hi: 0xa788, Stride: 0x1}, + unicode.Range16{Lo: 0xa78f, Hi: 0xa78f, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f7, Hi: 0xa7f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fb, Hi: 0xa801, Stride: 0x1}, + unicode.Range16{Lo: 0xa803, Hi: 0xa805, Stride: 0x1}, + unicode.Range16{Lo: 0xa807, Hi: 0xa80a, Stride: 0x1}, + unicode.Range16{Lo: 0xa80c, Hi: 0xa822, Stride: 0x1}, + unicode.Range16{Lo: 0xa840, Hi: 0xa873, Stride: 0x1}, + unicode.Range16{Lo: 0xa882, Hi: 0xa8b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f2, Hi: 0xa8f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fd, Hi: 0xa8fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa90a, Hi: 0xa925, Stride: 0x1}, + unicode.Range16{Lo: 0xa930, Hi: 0xa946, Stride: 0x1}, + unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1}, + unicode.Range16{Lo: 0xa984, Hi: 0xa9b2, Stride: 0x1}, + unicode.Range16{Lo: 0xa9cf, Hi: 0xa9cf, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e0, Hi: 0xa9e4, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e6, Hi: 0xa9e6, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e7, Hi: 0xa9ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa9fa, Hi: 0xa9fe, Stride: 0x1}, + unicode.Range16{Lo: 0xaa00, Hi: 0xaa28, Stride: 0x1}, + unicode.Range16{Lo: 0xaa40, Hi: 0xaa42, Stride: 0x1}, + unicode.Range16{Lo: 0xaa44, Hi: 0xaa4b, Stride: 0x1}, + unicode.Range16{Lo: 0xaa60, Hi: 0xaa6f, Stride: 0x1}, + unicode.Range16{Lo: 0xaa70, Hi: 0xaa70, Stride: 0x1}, + unicode.Range16{Lo: 0xaa71, Hi: 0xaa76, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7a, Hi: 0xaa7a, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7e, Hi: 0xaaaf, Stride: 0x1}, + unicode.Range16{Lo: 0xaab1, Hi: 0xaab1, Stride: 0x1}, + unicode.Range16{Lo: 0xaab5, Hi: 0xaab6, Stride: 0x1}, + unicode.Range16{Lo: 0xaab9, Hi: 0xaabd, Stride: 0x1}, + unicode.Range16{Lo: 0xaac0, Hi: 0xaac0, Stride: 0x1}, + unicode.Range16{Lo: 0xaac2, Hi: 0xaac2, Stride: 0x1}, + unicode.Range16{Lo: 0xaadb, Hi: 0xaadc, Stride: 0x1}, + unicode.Range16{Lo: 0xaadd, Hi: 0xaadd, Stride: 0x1}, + unicode.Range16{Lo: 0xaae0, Hi: 0xaaea, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf2, Hi: 0xaaf2, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf3, Hi: 0xaaf4, Stride: 0x1}, + unicode.Range16{Lo: 0xab01, Hi: 0xab06, Stride: 0x1}, + unicode.Range16{Lo: 0xab09, Hi: 0xab0e, Stride: 0x1}, + unicode.Range16{Lo: 0xab11, Hi: 0xab16, Stride: 0x1}, + unicode.Range16{Lo: 0xab20, Hi: 0xab26, Stride: 0x1}, + unicode.Range16{Lo: 0xab28, Hi: 0xab2e, Stride: 0x1}, + unicode.Range16{Lo: 0xabc0, Hi: 0xabe2, Stride: 0x1}, + unicode.Range16{Lo: 0xac00, Hi: 0xd7a3, Stride: 0x1}, + unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1}, + unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1}, + unicode.Range16{Lo: 0xf900, Hi: 0xfa6d, Stride: 0x1}, + unicode.Range16{Lo: 0xfa70, Hi: 0xfad9, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1f, Hi: 0xfb28, Stride: 0x1}, + unicode.Range16{Lo: 0xfb2a, Hi: 0xfb36, Stride: 0x1}, + unicode.Range16{Lo: 0xfb38, Hi: 0xfb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfb40, Hi: 0xfb41, Stride: 0x1}, + unicode.Range16{Lo: 0xfb43, Hi: 0xfb44, Stride: 0x1}, + unicode.Range16{Lo: 0xfb46, Hi: 0xfbb1, Stride: 0x1}, + unicode.Range16{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xfd50, Hi: 0xfd8f, Stride: 0x1}, + unicode.Range16{Lo: 0xfd92, Hi: 0xfdc7, Stride: 0x1}, + unicode.Range16{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 0x1}, + unicode.Range16{Lo: 0xfe70, Hi: 0xfe74, Stride: 0x1}, + unicode.Range16{Lo: 0xfe76, Hi: 0xfefc, Stride: 0x1}, + unicode.Range16{Lo: 0xff66, Hi: 0xff6f, Stride: 0x1}, + unicode.Range16{Lo: 0xff70, Hi: 0xff70, Stride: 0x1}, + unicode.Range16{Lo: 0xff71, Hi: 0xff9d, Stride: 0x1}, + unicode.Range16{Lo: 0xffa0, Hi: 0xffbe, Stride: 0x1}, + unicode.Range16{Lo: 0xffc2, Hi: 0xffc7, Stride: 0x1}, + unicode.Range16{Lo: 0xffca, Hi: 0xffcf, Stride: 0x1}, + unicode.Range16{Lo: 0xffd2, Hi: 0xffd7, Stride: 0x1}, + unicode.Range16{Lo: 0xffda, Hi: 0xffdc, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10000, Hi: 0x1000b, Stride: 0x1}, + unicode.Range32{Lo: 0x1000d, Hi: 0x10026, Stride: 0x1}, + unicode.Range32{Lo: 0x10028, Hi: 0x1003a, Stride: 0x1}, + unicode.Range32{Lo: 0x1003c, Hi: 0x1003d, Stride: 0x1}, + unicode.Range32{Lo: 0x1003f, Hi: 0x1004d, Stride: 0x1}, + unicode.Range32{Lo: 0x10050, Hi: 0x1005d, Stride: 0x1}, + unicode.Range32{Lo: 0x10080, Hi: 0x100fa, Stride: 0x1}, + unicode.Range32{Lo: 0x10140, Hi: 0x10174, Stride: 0x1}, + unicode.Range32{Lo: 0x10280, Hi: 0x1029c, Stride: 0x1}, + unicode.Range32{Lo: 0x102a0, Hi: 0x102d0, Stride: 0x1}, + unicode.Range32{Lo: 0x10300, Hi: 0x1031f, Stride: 0x1}, + unicode.Range32{Lo: 0x10330, Hi: 0x10340, Stride: 0x1}, + unicode.Range32{Lo: 0x10341, Hi: 0x10341, Stride: 0x1}, + unicode.Range32{Lo: 0x10342, Hi: 0x10349, Stride: 0x1}, + unicode.Range32{Lo: 0x1034a, Hi: 0x1034a, Stride: 0x1}, + unicode.Range32{Lo: 0x10350, Hi: 0x10375, Stride: 0x1}, + unicode.Range32{Lo: 0x10380, Hi: 0x1039d, Stride: 0x1}, + unicode.Range32{Lo: 0x103a0, Hi: 0x103c3, Stride: 0x1}, + unicode.Range32{Lo: 0x103c8, Hi: 0x103cf, Stride: 0x1}, + unicode.Range32{Lo: 0x103d1, Hi: 0x103d5, Stride: 0x1}, + unicode.Range32{Lo: 0x10450, Hi: 0x1049d, Stride: 0x1}, + unicode.Range32{Lo: 0x10500, Hi: 0x10527, Stride: 0x1}, + unicode.Range32{Lo: 0x10530, Hi: 0x10563, Stride: 0x1}, + unicode.Range32{Lo: 0x10600, Hi: 0x10736, Stride: 0x1}, + unicode.Range32{Lo: 0x10740, Hi: 0x10755, Stride: 0x1}, + unicode.Range32{Lo: 0x10760, Hi: 0x10767, Stride: 0x1}, + unicode.Range32{Lo: 0x10800, Hi: 0x10805, Stride: 0x1}, + unicode.Range32{Lo: 0x10808, Hi: 0x10808, Stride: 0x1}, + unicode.Range32{Lo: 0x1080a, Hi: 0x10835, Stride: 0x1}, + unicode.Range32{Lo: 0x10837, Hi: 0x10838, Stride: 0x1}, + unicode.Range32{Lo: 0x1083c, Hi: 0x1083c, Stride: 0x1}, + unicode.Range32{Lo: 0x1083f, Hi: 0x10855, Stride: 0x1}, + unicode.Range32{Lo: 0x10860, Hi: 0x10876, Stride: 0x1}, + unicode.Range32{Lo: 0x10880, Hi: 0x1089e, Stride: 0x1}, + unicode.Range32{Lo: 0x108e0, Hi: 0x108f2, Stride: 0x1}, + unicode.Range32{Lo: 0x108f4, Hi: 0x108f5, Stride: 0x1}, + unicode.Range32{Lo: 0x10900, Hi: 0x10915, Stride: 0x1}, + unicode.Range32{Lo: 0x10920, Hi: 0x10939, Stride: 0x1}, + unicode.Range32{Lo: 0x10980, Hi: 0x109b7, Stride: 0x1}, + unicode.Range32{Lo: 0x109be, Hi: 0x109bf, Stride: 0x1}, + unicode.Range32{Lo: 0x10a00, Hi: 0x10a00, Stride: 0x1}, + unicode.Range32{Lo: 0x10a10, Hi: 0x10a13, Stride: 0x1}, + unicode.Range32{Lo: 0x10a15, Hi: 0x10a17, Stride: 0x1}, + unicode.Range32{Lo: 0x10a19, Hi: 0x10a33, Stride: 0x1}, + unicode.Range32{Lo: 0x10a60, Hi: 0x10a7c, Stride: 0x1}, + unicode.Range32{Lo: 0x10a80, Hi: 0x10a9c, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac9, Hi: 0x10ae4, Stride: 0x1}, + unicode.Range32{Lo: 0x10b00, Hi: 0x10b35, Stride: 0x1}, + unicode.Range32{Lo: 0x10b40, Hi: 0x10b55, Stride: 0x1}, + unicode.Range32{Lo: 0x10b60, Hi: 0x10b72, Stride: 0x1}, + unicode.Range32{Lo: 0x10b80, Hi: 0x10b91, Stride: 0x1}, + unicode.Range32{Lo: 0x10c00, Hi: 0x10c48, Stride: 0x1}, + unicode.Range32{Lo: 0x11003, Hi: 0x11037, Stride: 0x1}, + unicode.Range32{Lo: 0x11083, Hi: 0x110af, Stride: 0x1}, + unicode.Range32{Lo: 0x110d0, Hi: 0x110e8, Stride: 0x1}, + unicode.Range32{Lo: 0x11103, Hi: 0x11126, Stride: 0x1}, + unicode.Range32{Lo: 0x11150, Hi: 0x11172, Stride: 0x1}, + unicode.Range32{Lo: 0x11176, Hi: 0x11176, Stride: 0x1}, + unicode.Range32{Lo: 0x11183, Hi: 0x111b2, Stride: 0x1}, + unicode.Range32{Lo: 0x111c1, Hi: 0x111c4, Stride: 0x1}, + unicode.Range32{Lo: 0x111da, Hi: 0x111da, Stride: 0x1}, + unicode.Range32{Lo: 0x111dc, Hi: 0x111dc, Stride: 0x1}, + unicode.Range32{Lo: 0x11200, Hi: 0x11211, Stride: 0x1}, + unicode.Range32{Lo: 0x11213, Hi: 0x1122b, Stride: 0x1}, + unicode.Range32{Lo: 0x11280, Hi: 0x11286, Stride: 0x1}, + unicode.Range32{Lo: 0x11288, Hi: 0x11288, Stride: 0x1}, + unicode.Range32{Lo: 0x1128a, Hi: 0x1128d, Stride: 0x1}, + unicode.Range32{Lo: 0x1128f, Hi: 0x1129d, Stride: 0x1}, + unicode.Range32{Lo: 0x1129f, Hi: 0x112a8, Stride: 0x1}, + unicode.Range32{Lo: 0x112b0, Hi: 0x112de, Stride: 0x1}, + unicode.Range32{Lo: 0x11305, Hi: 0x1130c, Stride: 0x1}, + unicode.Range32{Lo: 0x1130f, Hi: 0x11310, Stride: 0x1}, + unicode.Range32{Lo: 0x11313, Hi: 0x11328, Stride: 0x1}, + unicode.Range32{Lo: 0x1132a, Hi: 0x11330, Stride: 0x1}, + unicode.Range32{Lo: 0x11332, Hi: 0x11333, Stride: 0x1}, + unicode.Range32{Lo: 0x11335, Hi: 0x11339, Stride: 0x1}, + unicode.Range32{Lo: 0x1133d, Hi: 0x1133d, Stride: 0x1}, + unicode.Range32{Lo: 0x11350, Hi: 0x11350, Stride: 0x1}, + unicode.Range32{Lo: 0x1135d, Hi: 0x11361, Stride: 0x1}, + unicode.Range32{Lo: 0x11400, Hi: 0x11434, Stride: 0x1}, + unicode.Range32{Lo: 0x11447, Hi: 0x1144a, Stride: 0x1}, + unicode.Range32{Lo: 0x11480, Hi: 0x114af, Stride: 0x1}, + unicode.Range32{Lo: 0x114c4, Hi: 0x114c5, Stride: 0x1}, + unicode.Range32{Lo: 0x114c7, Hi: 0x114c7, Stride: 0x1}, + unicode.Range32{Lo: 0x11580, Hi: 0x115ae, Stride: 0x1}, + unicode.Range32{Lo: 0x115d8, Hi: 0x115db, Stride: 0x1}, + unicode.Range32{Lo: 0x11600, Hi: 0x1162f, Stride: 0x1}, + unicode.Range32{Lo: 0x11644, Hi: 0x11644, Stride: 0x1}, + unicode.Range32{Lo: 0x11680, Hi: 0x116aa, Stride: 0x1}, + unicode.Range32{Lo: 0x11700, Hi: 0x11719, Stride: 0x1}, + unicode.Range32{Lo: 0x118ff, Hi: 0x118ff, Stride: 0x1}, + unicode.Range32{Lo: 0x11ac0, Hi: 0x11af8, Stride: 0x1}, + unicode.Range32{Lo: 0x11c00, Hi: 0x11c08, Stride: 0x1}, + unicode.Range32{Lo: 0x11c0a, Hi: 0x11c2e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c40, Hi: 0x11c40, Stride: 0x1}, + unicode.Range32{Lo: 0x11c72, Hi: 0x11c8f, Stride: 0x1}, + unicode.Range32{Lo: 0x12000, Hi: 0x12399, Stride: 0x1}, + unicode.Range32{Lo: 0x12400, Hi: 0x1246e, Stride: 0x1}, + unicode.Range32{Lo: 0x12480, Hi: 0x12543, Stride: 0x1}, + unicode.Range32{Lo: 0x13000, Hi: 0x1342e, Stride: 0x1}, + unicode.Range32{Lo: 0x14400, Hi: 0x14646, Stride: 0x1}, + unicode.Range32{Lo: 0x16800, Hi: 0x16a38, Stride: 0x1}, + unicode.Range32{Lo: 0x16a40, Hi: 0x16a5e, Stride: 0x1}, + unicode.Range32{Lo: 0x16ad0, Hi: 0x16aed, Stride: 0x1}, + unicode.Range32{Lo: 0x16b00, Hi: 0x16b2f, Stride: 0x1}, + unicode.Range32{Lo: 0x16b40, Hi: 0x16b43, Stride: 0x1}, + unicode.Range32{Lo: 0x16b63, Hi: 0x16b77, Stride: 0x1}, + unicode.Range32{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 0x1}, + unicode.Range32{Lo: 0x16f00, Hi: 0x16f44, Stride: 0x1}, + unicode.Range32{Lo: 0x16f50, Hi: 0x16f50, Stride: 0x1}, + unicode.Range32{Lo: 0x16f93, Hi: 0x16f9f, Stride: 0x1}, + unicode.Range32{Lo: 0x16fe0, Hi: 0x16fe0, Stride: 0x1}, + unicode.Range32{Lo: 0x17000, Hi: 0x187ec, Stride: 0x1}, + unicode.Range32{Lo: 0x18800, Hi: 0x18af2, Stride: 0x1}, + unicode.Range32{Lo: 0x1b000, Hi: 0x1b001, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 0x1}, + unicode.Range32{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 0x1}, + unicode.Range32{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 0x1}, + unicode.Range32{Lo: 0x20000, Hi: 0x2a6d6, Stride: 0x1}, + unicode.Range32{Lo: 0x2a700, Hi: 0x2b734, Stride: 0x1}, + unicode.Range32{Lo: 0x2b740, Hi: 0x2b81d, Stride: 0x1}, + unicode.Range32{Lo: 0x2b820, Hi: 0x2cea1, Stride: 0x1}, + unicode.Range32{Lo: 0x2f800, Hi: 0x2fa1d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _SentenceSContinue = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2c, Hi: 0x2c, Stride: 0x1}, + unicode.Range16{Lo: 0x2d, Hi: 0x2d, Stride: 0x1}, + unicode.Range16{Lo: 0x3a, Hi: 0x3a, Stride: 0x1}, + unicode.Range16{Lo: 0x55d, Hi: 0x55d, Stride: 0x1}, + unicode.Range16{Lo: 0x60c, Hi: 0x60d, Stride: 0x1}, + unicode.Range16{Lo: 0x7f8, Hi: 0x7f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1802, Hi: 0x1802, Stride: 0x1}, + unicode.Range16{Lo: 0x1808, Hi: 0x1808, Stride: 0x1}, + unicode.Range16{Lo: 0x2013, Hi: 0x2014, Stride: 0x1}, + unicode.Range16{Lo: 0x3001, Hi: 0x3001, Stride: 0x1}, + unicode.Range16{Lo: 0xfe10, Hi: 0xfe11, Stride: 0x1}, + unicode.Range16{Lo: 0xfe13, Hi: 0xfe13, Stride: 0x1}, + unicode.Range16{Lo: 0xfe31, Hi: 0xfe32, Stride: 0x1}, + unicode.Range16{Lo: 0xfe50, Hi: 0xfe51, Stride: 0x1}, + unicode.Range16{Lo: 0xfe55, Hi: 0xfe55, Stride: 0x1}, + unicode.Range16{Lo: 0xfe58, Hi: 0xfe58, Stride: 0x1}, + unicode.Range16{Lo: 0xfe63, Hi: 0xfe63, Stride: 0x1}, + unicode.Range16{Lo: 0xff0c, Hi: 0xff0c, Stride: 0x1}, + unicode.Range16{Lo: 0xff0d, Hi: 0xff0d, Stride: 0x1}, + unicode.Range16{Lo: 0xff1a, Hi: 0xff1a, Stride: 0x1}, + unicode.Range16{Lo: 0xff64, Hi: 0xff64, Stride: 0x1}, + }, + LatinOffset: 3, +} + +var _SentenceSTerm = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x21, Hi: 0x21, Stride: 0x1}, + unicode.Range16{Lo: 0x3f, Hi: 0x3f, Stride: 0x1}, + unicode.Range16{Lo: 0x589, Hi: 0x589, Stride: 0x1}, + unicode.Range16{Lo: 0x61f, Hi: 0x61f, Stride: 0x1}, + unicode.Range16{Lo: 0x6d4, Hi: 0x6d4, Stride: 0x1}, + unicode.Range16{Lo: 0x700, Hi: 0x702, Stride: 0x1}, + unicode.Range16{Lo: 0x7f9, Hi: 0x7f9, Stride: 0x1}, + unicode.Range16{Lo: 0x964, Hi: 0x965, Stride: 0x1}, + unicode.Range16{Lo: 0x104a, Hi: 0x104b, Stride: 0x1}, + unicode.Range16{Lo: 0x1362, Hi: 0x1362, Stride: 0x1}, + unicode.Range16{Lo: 0x1367, Hi: 0x1368, Stride: 0x1}, + unicode.Range16{Lo: 0x166e, Hi: 0x166e, Stride: 0x1}, + unicode.Range16{Lo: 0x1735, Hi: 0x1736, Stride: 0x1}, + unicode.Range16{Lo: 0x1803, Hi: 0x1803, Stride: 0x1}, + unicode.Range16{Lo: 0x1809, Hi: 0x1809, Stride: 0x1}, + unicode.Range16{Lo: 0x1944, Hi: 0x1945, Stride: 0x1}, + unicode.Range16{Lo: 0x1aa8, Hi: 0x1aab, Stride: 0x1}, + unicode.Range16{Lo: 0x1b5a, Hi: 0x1b5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b5e, Hi: 0x1b5f, Stride: 0x1}, + unicode.Range16{Lo: 0x1c3b, Hi: 0x1c3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1c7e, Hi: 0x1c7f, Stride: 0x1}, + unicode.Range16{Lo: 0x203c, Hi: 0x203d, Stride: 0x1}, + unicode.Range16{Lo: 0x2047, Hi: 0x2049, Stride: 0x1}, + unicode.Range16{Lo: 0x2e2e, Hi: 0x2e2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2e3c, Hi: 0x2e3c, Stride: 0x1}, + unicode.Range16{Lo: 0x3002, Hi: 0x3002, Stride: 0x1}, + unicode.Range16{Lo: 0xa4ff, Hi: 0xa4ff, Stride: 0x1}, + unicode.Range16{Lo: 0xa60e, Hi: 0xa60f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f3, Hi: 0xa6f3, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f7, Hi: 0xa6f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa876, Hi: 0xa877, Stride: 0x1}, + unicode.Range16{Lo: 0xa8ce, Hi: 0xa8cf, Stride: 0x1}, + unicode.Range16{Lo: 0xa92f, Hi: 0xa92f, Stride: 0x1}, + unicode.Range16{Lo: 0xa9c8, Hi: 0xa9c9, Stride: 0x1}, + unicode.Range16{Lo: 0xaa5d, Hi: 0xaa5f, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf0, Hi: 0xaaf1, Stride: 0x1}, + unicode.Range16{Lo: 0xabeb, Hi: 0xabeb, Stride: 0x1}, + unicode.Range16{Lo: 0xfe56, Hi: 0xfe57, Stride: 0x1}, + unicode.Range16{Lo: 0xff01, Hi: 0xff01, Stride: 0x1}, + unicode.Range16{Lo: 0xff1f, Hi: 0xff1f, Stride: 0x1}, + unicode.Range16{Lo: 0xff61, Hi: 0xff61, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10a56, Hi: 0x10a57, Stride: 0x1}, + unicode.Range32{Lo: 0x11047, Hi: 0x11048, Stride: 0x1}, + unicode.Range32{Lo: 0x110be, Hi: 0x110c1, Stride: 0x1}, + unicode.Range32{Lo: 0x11141, Hi: 0x11143, Stride: 0x1}, + unicode.Range32{Lo: 0x111c5, Hi: 0x111c6, Stride: 0x1}, + unicode.Range32{Lo: 0x111cd, Hi: 0x111cd, Stride: 0x1}, + unicode.Range32{Lo: 0x111de, Hi: 0x111df, Stride: 0x1}, + unicode.Range32{Lo: 0x11238, Hi: 0x11239, Stride: 0x1}, + unicode.Range32{Lo: 0x1123b, Hi: 0x1123c, Stride: 0x1}, + unicode.Range32{Lo: 0x112a9, Hi: 0x112a9, Stride: 0x1}, + unicode.Range32{Lo: 0x1144b, Hi: 0x1144c, Stride: 0x1}, + unicode.Range32{Lo: 0x115c2, Hi: 0x115c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115c9, Hi: 0x115d7, Stride: 0x1}, + unicode.Range32{Lo: 0x11641, Hi: 0x11642, Stride: 0x1}, + unicode.Range32{Lo: 0x1173c, Hi: 0x1173e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c41, Hi: 0x11c42, Stride: 0x1}, + unicode.Range32{Lo: 0x16a6e, Hi: 0x16a6f, Stride: 0x1}, + unicode.Range32{Lo: 0x16af5, Hi: 0x16af5, Stride: 0x1}, + unicode.Range32{Lo: 0x16b37, Hi: 0x16b38, Stride: 0x1}, + unicode.Range32{Lo: 0x16b44, Hi: 0x16b44, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9f, Hi: 0x1bc9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1da88, Hi: 0x1da88, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _SentenceSep = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x85, Hi: 0x85, Stride: 0x1}, + unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1}, + unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceSp = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x9, Hi: 0x9, Stride: 0x1}, + unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1}, + unicode.Range16{Lo: 0x20, Hi: 0x20, Stride: 0x1}, + unicode.Range16{Lo: 0xa0, Hi: 0xa0, Stride: 0x1}, + unicode.Range16{Lo: 0x1680, Hi: 0x1680, Stride: 0x1}, + unicode.Range16{Lo: 0x2000, Hi: 0x200a, Stride: 0x1}, + unicode.Range16{Lo: 0x202f, Hi: 0x202f, Stride: 0x1}, + unicode.Range16{Lo: 0x205f, Hi: 0x205f, Stride: 0x1}, + unicode.Range16{Lo: 0x3000, Hi: 0x3000, Stride: 0x1}, + }, + LatinOffset: 4, +} + +var _SentenceUpper = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x41, Hi: 0x5a, Stride: 0x1}, + unicode.Range16{Lo: 0xc0, Hi: 0xd6, Stride: 0x1}, + unicode.Range16{Lo: 0xd8, Hi: 0xde, Stride: 0x1}, + unicode.Range16{Lo: 0x100, Hi: 0x100, Stride: 0x1}, + unicode.Range16{Lo: 0x102, Hi: 0x102, Stride: 0x1}, + unicode.Range16{Lo: 0x104, Hi: 0x104, Stride: 0x1}, + unicode.Range16{Lo: 0x106, Hi: 0x106, Stride: 0x1}, + unicode.Range16{Lo: 0x108, Hi: 0x108, Stride: 0x1}, + unicode.Range16{Lo: 0x10a, Hi: 0x10a, Stride: 0x1}, + unicode.Range16{Lo: 0x10c, Hi: 0x10c, Stride: 0x1}, + unicode.Range16{Lo: 0x10e, Hi: 0x10e, Stride: 0x1}, + unicode.Range16{Lo: 0x110, Hi: 0x110, Stride: 0x1}, + unicode.Range16{Lo: 0x112, Hi: 0x112, Stride: 0x1}, + unicode.Range16{Lo: 0x114, Hi: 0x114, Stride: 0x1}, + unicode.Range16{Lo: 0x116, Hi: 0x116, Stride: 0x1}, + unicode.Range16{Lo: 0x118, Hi: 0x118, Stride: 0x1}, + unicode.Range16{Lo: 0x11a, Hi: 0x11a, Stride: 0x1}, + unicode.Range16{Lo: 0x11c, Hi: 0x11c, Stride: 0x1}, + unicode.Range16{Lo: 0x11e, Hi: 0x11e, Stride: 0x1}, + unicode.Range16{Lo: 0x120, Hi: 0x120, Stride: 0x1}, + unicode.Range16{Lo: 0x122, Hi: 0x122, Stride: 0x1}, + unicode.Range16{Lo: 0x124, Hi: 0x124, Stride: 0x1}, + unicode.Range16{Lo: 0x126, Hi: 0x126, Stride: 0x1}, + unicode.Range16{Lo: 0x128, Hi: 0x128, Stride: 0x1}, + unicode.Range16{Lo: 0x12a, Hi: 0x12a, Stride: 0x1}, + unicode.Range16{Lo: 0x12c, Hi: 0x12c, Stride: 0x1}, + unicode.Range16{Lo: 0x12e, Hi: 0x12e, Stride: 0x1}, + unicode.Range16{Lo: 0x130, Hi: 0x130, Stride: 0x1}, + unicode.Range16{Lo: 0x132, Hi: 0x132, Stride: 0x1}, + unicode.Range16{Lo: 0x134, Hi: 0x134, Stride: 0x1}, + unicode.Range16{Lo: 0x136, Hi: 0x136, Stride: 0x1}, + unicode.Range16{Lo: 0x139, Hi: 0x139, Stride: 0x1}, + unicode.Range16{Lo: 0x13b, Hi: 0x13b, Stride: 0x1}, + unicode.Range16{Lo: 0x13d, Hi: 0x13d, Stride: 0x1}, + unicode.Range16{Lo: 0x13f, Hi: 0x13f, Stride: 0x1}, + unicode.Range16{Lo: 0x141, Hi: 0x141, Stride: 0x1}, + unicode.Range16{Lo: 0x143, Hi: 0x143, Stride: 0x1}, + unicode.Range16{Lo: 0x145, Hi: 0x145, Stride: 0x1}, + unicode.Range16{Lo: 0x147, Hi: 0x147, Stride: 0x1}, + unicode.Range16{Lo: 0x14a, Hi: 0x14a, Stride: 0x1}, + unicode.Range16{Lo: 0x14c, Hi: 0x14c, Stride: 0x1}, + unicode.Range16{Lo: 0x14e, Hi: 0x14e, Stride: 0x1}, + unicode.Range16{Lo: 0x150, Hi: 0x150, Stride: 0x1}, + unicode.Range16{Lo: 0x152, Hi: 0x152, Stride: 0x1}, + unicode.Range16{Lo: 0x154, Hi: 0x154, Stride: 0x1}, + unicode.Range16{Lo: 0x156, Hi: 0x156, Stride: 0x1}, + unicode.Range16{Lo: 0x158, Hi: 0x158, Stride: 0x1}, + unicode.Range16{Lo: 0x15a, Hi: 0x15a, Stride: 0x1}, + unicode.Range16{Lo: 0x15c, Hi: 0x15c, Stride: 0x1}, + unicode.Range16{Lo: 0x15e, Hi: 0x15e, Stride: 0x1}, + unicode.Range16{Lo: 0x160, Hi: 0x160, Stride: 0x1}, + unicode.Range16{Lo: 0x162, Hi: 0x162, Stride: 0x1}, + unicode.Range16{Lo: 0x164, Hi: 0x164, Stride: 0x1}, + unicode.Range16{Lo: 0x166, Hi: 0x166, Stride: 0x1}, + unicode.Range16{Lo: 0x168, Hi: 0x168, Stride: 0x1}, + unicode.Range16{Lo: 0x16a, Hi: 0x16a, Stride: 0x1}, + unicode.Range16{Lo: 0x16c, Hi: 0x16c, Stride: 0x1}, + unicode.Range16{Lo: 0x16e, Hi: 0x16e, Stride: 0x1}, + unicode.Range16{Lo: 0x170, Hi: 0x170, Stride: 0x1}, + unicode.Range16{Lo: 0x172, Hi: 0x172, Stride: 0x1}, + unicode.Range16{Lo: 0x174, Hi: 0x174, Stride: 0x1}, + unicode.Range16{Lo: 0x176, Hi: 0x176, Stride: 0x1}, + unicode.Range16{Lo: 0x178, Hi: 0x179, Stride: 0x1}, + unicode.Range16{Lo: 0x17b, Hi: 0x17b, Stride: 0x1}, + unicode.Range16{Lo: 0x17d, Hi: 0x17d, Stride: 0x1}, + unicode.Range16{Lo: 0x181, Hi: 0x182, Stride: 0x1}, + unicode.Range16{Lo: 0x184, Hi: 0x184, Stride: 0x1}, + unicode.Range16{Lo: 0x186, Hi: 0x187, Stride: 0x1}, + unicode.Range16{Lo: 0x189, Hi: 0x18b, Stride: 0x1}, + unicode.Range16{Lo: 0x18e, Hi: 0x191, Stride: 0x1}, + unicode.Range16{Lo: 0x193, Hi: 0x194, Stride: 0x1}, + unicode.Range16{Lo: 0x196, Hi: 0x198, Stride: 0x1}, + unicode.Range16{Lo: 0x19c, Hi: 0x19d, Stride: 0x1}, + unicode.Range16{Lo: 0x19f, Hi: 0x1a0, Stride: 0x1}, + unicode.Range16{Lo: 0x1a2, Hi: 0x1a2, Stride: 0x1}, + unicode.Range16{Lo: 0x1a4, Hi: 0x1a4, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6, Hi: 0x1a7, Stride: 0x1}, + unicode.Range16{Lo: 0x1a9, Hi: 0x1a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1ac, Hi: 0x1ac, Stride: 0x1}, + unicode.Range16{Lo: 0x1ae, Hi: 0x1af, Stride: 0x1}, + unicode.Range16{Lo: 0x1b1, Hi: 0x1b3, Stride: 0x1}, + unicode.Range16{Lo: 0x1b5, Hi: 0x1b5, Stride: 0x1}, + unicode.Range16{Lo: 0x1b7, Hi: 0x1b8, Stride: 0x1}, + unicode.Range16{Lo: 0x1bc, Hi: 0x1bc, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4, Hi: 0x1c5, Stride: 0x1}, + unicode.Range16{Lo: 0x1c7, Hi: 0x1c8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ca, Hi: 0x1cb, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd, Hi: 0x1cd, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf, Hi: 0x1cf, Stride: 0x1}, + unicode.Range16{Lo: 0x1d1, Hi: 0x1d1, Stride: 0x1}, + unicode.Range16{Lo: 0x1d3, Hi: 0x1d3, Stride: 0x1}, + unicode.Range16{Lo: 0x1d5, Hi: 0x1d5, Stride: 0x1}, + unicode.Range16{Lo: 0x1d7, Hi: 0x1d7, Stride: 0x1}, + unicode.Range16{Lo: 0x1d9, Hi: 0x1d9, Stride: 0x1}, + unicode.Range16{Lo: 0x1db, Hi: 0x1db, Stride: 0x1}, + unicode.Range16{Lo: 0x1de, Hi: 0x1de, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0, Hi: 0x1e0, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2, Hi: 0x1e2, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4, Hi: 0x1e4, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6, Hi: 0x1e6, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8, Hi: 0x1e8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea, Hi: 0x1ea, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec, Hi: 0x1ec, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee, Hi: 0x1ee, Stride: 0x1}, + unicode.Range16{Lo: 0x1f1, Hi: 0x1f2, Stride: 0x1}, + unicode.Range16{Lo: 0x1f4, Hi: 0x1f4, Stride: 0x1}, + unicode.Range16{Lo: 0x1f6, Hi: 0x1f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1fa, Hi: 0x1fa, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc, Hi: 0x1fc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe, Hi: 0x1fe, Stride: 0x1}, + unicode.Range16{Lo: 0x200, Hi: 0x200, Stride: 0x1}, + unicode.Range16{Lo: 0x202, Hi: 0x202, Stride: 0x1}, + unicode.Range16{Lo: 0x204, Hi: 0x204, Stride: 0x1}, + unicode.Range16{Lo: 0x206, Hi: 0x206, Stride: 0x1}, + unicode.Range16{Lo: 0x208, Hi: 0x208, Stride: 0x1}, + unicode.Range16{Lo: 0x20a, Hi: 0x20a, Stride: 0x1}, + unicode.Range16{Lo: 0x20c, Hi: 0x20c, Stride: 0x1}, + unicode.Range16{Lo: 0x20e, Hi: 0x20e, Stride: 0x1}, + unicode.Range16{Lo: 0x210, Hi: 0x210, Stride: 0x1}, + unicode.Range16{Lo: 0x212, Hi: 0x212, Stride: 0x1}, + unicode.Range16{Lo: 0x214, Hi: 0x214, Stride: 0x1}, + unicode.Range16{Lo: 0x216, Hi: 0x216, Stride: 0x1}, + unicode.Range16{Lo: 0x218, Hi: 0x218, Stride: 0x1}, + unicode.Range16{Lo: 0x21a, Hi: 0x21a, Stride: 0x1}, + unicode.Range16{Lo: 0x21c, Hi: 0x21c, Stride: 0x1}, + unicode.Range16{Lo: 0x21e, Hi: 0x21e, Stride: 0x1}, + unicode.Range16{Lo: 0x220, Hi: 0x220, Stride: 0x1}, + unicode.Range16{Lo: 0x222, Hi: 0x222, Stride: 0x1}, + unicode.Range16{Lo: 0x224, Hi: 0x224, Stride: 0x1}, + unicode.Range16{Lo: 0x226, Hi: 0x226, Stride: 0x1}, + unicode.Range16{Lo: 0x228, Hi: 0x228, Stride: 0x1}, + unicode.Range16{Lo: 0x22a, Hi: 0x22a, Stride: 0x1}, + unicode.Range16{Lo: 0x22c, Hi: 0x22c, Stride: 0x1}, + unicode.Range16{Lo: 0x22e, Hi: 0x22e, Stride: 0x1}, + unicode.Range16{Lo: 0x230, Hi: 0x230, Stride: 0x1}, + unicode.Range16{Lo: 0x232, Hi: 0x232, Stride: 0x1}, + unicode.Range16{Lo: 0x23a, Hi: 0x23b, Stride: 0x1}, + unicode.Range16{Lo: 0x23d, Hi: 0x23e, Stride: 0x1}, + unicode.Range16{Lo: 0x241, Hi: 0x241, Stride: 0x1}, + unicode.Range16{Lo: 0x243, Hi: 0x246, Stride: 0x1}, + unicode.Range16{Lo: 0x248, Hi: 0x248, Stride: 0x1}, + unicode.Range16{Lo: 0x24a, Hi: 0x24a, Stride: 0x1}, + unicode.Range16{Lo: 0x24c, Hi: 0x24c, Stride: 0x1}, + unicode.Range16{Lo: 0x24e, Hi: 0x24e, Stride: 0x1}, + unicode.Range16{Lo: 0x370, Hi: 0x370, Stride: 0x1}, + unicode.Range16{Lo: 0x372, Hi: 0x372, Stride: 0x1}, + unicode.Range16{Lo: 0x376, Hi: 0x376, Stride: 0x1}, + unicode.Range16{Lo: 0x37f, Hi: 0x37f, Stride: 0x1}, + unicode.Range16{Lo: 0x386, Hi: 0x386, Stride: 0x1}, + unicode.Range16{Lo: 0x388, Hi: 0x38a, Stride: 0x1}, + unicode.Range16{Lo: 0x38c, Hi: 0x38c, Stride: 0x1}, + unicode.Range16{Lo: 0x38e, Hi: 0x38f, Stride: 0x1}, + unicode.Range16{Lo: 0x391, Hi: 0x3a1, Stride: 0x1}, + unicode.Range16{Lo: 0x3a3, Hi: 0x3ab, Stride: 0x1}, + unicode.Range16{Lo: 0x3cf, Hi: 0x3cf, Stride: 0x1}, + unicode.Range16{Lo: 0x3d2, Hi: 0x3d4, Stride: 0x1}, + unicode.Range16{Lo: 0x3d8, Hi: 0x3d8, Stride: 0x1}, + unicode.Range16{Lo: 0x3da, Hi: 0x3da, Stride: 0x1}, + unicode.Range16{Lo: 0x3dc, Hi: 0x3dc, Stride: 0x1}, + unicode.Range16{Lo: 0x3de, Hi: 0x3de, Stride: 0x1}, + unicode.Range16{Lo: 0x3e0, Hi: 0x3e0, Stride: 0x1}, + unicode.Range16{Lo: 0x3e2, Hi: 0x3e2, Stride: 0x1}, + unicode.Range16{Lo: 0x3e4, Hi: 0x3e4, Stride: 0x1}, + unicode.Range16{Lo: 0x3e6, Hi: 0x3e6, Stride: 0x1}, + unicode.Range16{Lo: 0x3e8, Hi: 0x3e8, Stride: 0x1}, + unicode.Range16{Lo: 0x3ea, Hi: 0x3ea, Stride: 0x1}, + unicode.Range16{Lo: 0x3ec, Hi: 0x3ec, Stride: 0x1}, + unicode.Range16{Lo: 0x3ee, Hi: 0x3ee, Stride: 0x1}, + unicode.Range16{Lo: 0x3f4, Hi: 0x3f4, Stride: 0x1}, + unicode.Range16{Lo: 0x3f7, Hi: 0x3f7, Stride: 0x1}, + unicode.Range16{Lo: 0x3f9, Hi: 0x3fa, Stride: 0x1}, + unicode.Range16{Lo: 0x3fd, Hi: 0x42f, Stride: 0x1}, + unicode.Range16{Lo: 0x460, Hi: 0x460, Stride: 0x1}, + unicode.Range16{Lo: 0x462, Hi: 0x462, Stride: 0x1}, + unicode.Range16{Lo: 0x464, Hi: 0x464, Stride: 0x1}, + unicode.Range16{Lo: 0x466, Hi: 0x466, Stride: 0x1}, + unicode.Range16{Lo: 0x468, Hi: 0x468, Stride: 0x1}, + unicode.Range16{Lo: 0x46a, Hi: 0x46a, Stride: 0x1}, + unicode.Range16{Lo: 0x46c, Hi: 0x46c, Stride: 0x1}, + unicode.Range16{Lo: 0x46e, Hi: 0x46e, Stride: 0x1}, + unicode.Range16{Lo: 0x470, Hi: 0x470, Stride: 0x1}, + unicode.Range16{Lo: 0x472, Hi: 0x472, Stride: 0x1}, + unicode.Range16{Lo: 0x474, Hi: 0x474, Stride: 0x1}, + unicode.Range16{Lo: 0x476, Hi: 0x476, Stride: 0x1}, + unicode.Range16{Lo: 0x478, Hi: 0x478, Stride: 0x1}, + unicode.Range16{Lo: 0x47a, Hi: 0x47a, Stride: 0x1}, + unicode.Range16{Lo: 0x47c, Hi: 0x47c, Stride: 0x1}, + unicode.Range16{Lo: 0x47e, Hi: 0x47e, Stride: 0x1}, + unicode.Range16{Lo: 0x480, Hi: 0x480, Stride: 0x1}, + unicode.Range16{Lo: 0x48a, Hi: 0x48a, Stride: 0x1}, + unicode.Range16{Lo: 0x48c, Hi: 0x48c, Stride: 0x1}, + unicode.Range16{Lo: 0x48e, Hi: 0x48e, Stride: 0x1}, + unicode.Range16{Lo: 0x490, Hi: 0x490, Stride: 0x1}, + unicode.Range16{Lo: 0x492, Hi: 0x492, Stride: 0x1}, + unicode.Range16{Lo: 0x494, Hi: 0x494, Stride: 0x1}, + unicode.Range16{Lo: 0x496, Hi: 0x496, Stride: 0x1}, + unicode.Range16{Lo: 0x498, Hi: 0x498, Stride: 0x1}, + unicode.Range16{Lo: 0x49a, Hi: 0x49a, Stride: 0x1}, + unicode.Range16{Lo: 0x49c, Hi: 0x49c, Stride: 0x1}, + unicode.Range16{Lo: 0x49e, Hi: 0x49e, Stride: 0x1}, + unicode.Range16{Lo: 0x4a0, Hi: 0x4a0, Stride: 0x1}, + unicode.Range16{Lo: 0x4a2, Hi: 0x4a2, Stride: 0x1}, + unicode.Range16{Lo: 0x4a4, Hi: 0x4a4, Stride: 0x1}, + unicode.Range16{Lo: 0x4a6, Hi: 0x4a6, Stride: 0x1}, + unicode.Range16{Lo: 0x4a8, Hi: 0x4a8, Stride: 0x1}, + unicode.Range16{Lo: 0x4aa, Hi: 0x4aa, Stride: 0x1}, + unicode.Range16{Lo: 0x4ac, Hi: 0x4ac, Stride: 0x1}, + unicode.Range16{Lo: 0x4ae, Hi: 0x4ae, Stride: 0x1}, + unicode.Range16{Lo: 0x4b0, Hi: 0x4b0, Stride: 0x1}, + unicode.Range16{Lo: 0x4b2, Hi: 0x4b2, Stride: 0x1}, + unicode.Range16{Lo: 0x4b4, Hi: 0x4b4, Stride: 0x1}, + unicode.Range16{Lo: 0x4b6, Hi: 0x4b6, Stride: 0x1}, + unicode.Range16{Lo: 0x4b8, Hi: 0x4b8, Stride: 0x1}, + unicode.Range16{Lo: 0x4ba, Hi: 0x4ba, Stride: 0x1}, + unicode.Range16{Lo: 0x4bc, Hi: 0x4bc, Stride: 0x1}, + unicode.Range16{Lo: 0x4be, Hi: 0x4be, Stride: 0x1}, + unicode.Range16{Lo: 0x4c0, Hi: 0x4c1, Stride: 0x1}, + unicode.Range16{Lo: 0x4c3, Hi: 0x4c3, Stride: 0x1}, + unicode.Range16{Lo: 0x4c5, Hi: 0x4c5, Stride: 0x1}, + unicode.Range16{Lo: 0x4c7, Hi: 0x4c7, Stride: 0x1}, + unicode.Range16{Lo: 0x4c9, Hi: 0x4c9, Stride: 0x1}, + unicode.Range16{Lo: 0x4cb, Hi: 0x4cb, Stride: 0x1}, + unicode.Range16{Lo: 0x4cd, Hi: 0x4cd, Stride: 0x1}, + unicode.Range16{Lo: 0x4d0, Hi: 0x4d0, Stride: 0x1}, + unicode.Range16{Lo: 0x4d2, Hi: 0x4d2, Stride: 0x1}, + unicode.Range16{Lo: 0x4d4, Hi: 0x4d4, Stride: 0x1}, + unicode.Range16{Lo: 0x4d6, Hi: 0x4d6, Stride: 0x1}, + unicode.Range16{Lo: 0x4d8, Hi: 0x4d8, Stride: 0x1}, + unicode.Range16{Lo: 0x4da, Hi: 0x4da, Stride: 0x1}, + unicode.Range16{Lo: 0x4dc, Hi: 0x4dc, Stride: 0x1}, + unicode.Range16{Lo: 0x4de, Hi: 0x4de, Stride: 0x1}, + unicode.Range16{Lo: 0x4e0, Hi: 0x4e0, Stride: 0x1}, + unicode.Range16{Lo: 0x4e2, Hi: 0x4e2, Stride: 0x1}, + unicode.Range16{Lo: 0x4e4, Hi: 0x4e4, Stride: 0x1}, + unicode.Range16{Lo: 0x4e6, Hi: 0x4e6, Stride: 0x1}, + unicode.Range16{Lo: 0x4e8, Hi: 0x4e8, Stride: 0x1}, + unicode.Range16{Lo: 0x4ea, Hi: 0x4ea, Stride: 0x1}, + unicode.Range16{Lo: 0x4ec, Hi: 0x4ec, Stride: 0x1}, + unicode.Range16{Lo: 0x4ee, Hi: 0x4ee, Stride: 0x1}, + unicode.Range16{Lo: 0x4f0, Hi: 0x4f0, Stride: 0x1}, + unicode.Range16{Lo: 0x4f2, Hi: 0x4f2, Stride: 0x1}, + unicode.Range16{Lo: 0x4f4, Hi: 0x4f4, Stride: 0x1}, + unicode.Range16{Lo: 0x4f6, Hi: 0x4f6, Stride: 0x1}, + unicode.Range16{Lo: 0x4f8, Hi: 0x4f8, Stride: 0x1}, + unicode.Range16{Lo: 0x4fa, Hi: 0x4fa, Stride: 0x1}, + unicode.Range16{Lo: 0x4fc, Hi: 0x4fc, Stride: 0x1}, + unicode.Range16{Lo: 0x4fe, Hi: 0x4fe, Stride: 0x1}, + unicode.Range16{Lo: 0x500, Hi: 0x500, Stride: 0x1}, + unicode.Range16{Lo: 0x502, Hi: 0x502, Stride: 0x1}, + unicode.Range16{Lo: 0x504, Hi: 0x504, Stride: 0x1}, + unicode.Range16{Lo: 0x506, Hi: 0x506, Stride: 0x1}, + unicode.Range16{Lo: 0x508, Hi: 0x508, Stride: 0x1}, + unicode.Range16{Lo: 0x50a, Hi: 0x50a, Stride: 0x1}, + unicode.Range16{Lo: 0x50c, Hi: 0x50c, Stride: 0x1}, + unicode.Range16{Lo: 0x50e, Hi: 0x50e, Stride: 0x1}, + unicode.Range16{Lo: 0x510, Hi: 0x510, Stride: 0x1}, + unicode.Range16{Lo: 0x512, Hi: 0x512, Stride: 0x1}, + unicode.Range16{Lo: 0x514, Hi: 0x514, Stride: 0x1}, + unicode.Range16{Lo: 0x516, Hi: 0x516, Stride: 0x1}, + unicode.Range16{Lo: 0x518, Hi: 0x518, Stride: 0x1}, + unicode.Range16{Lo: 0x51a, Hi: 0x51a, Stride: 0x1}, + unicode.Range16{Lo: 0x51c, Hi: 0x51c, Stride: 0x1}, + unicode.Range16{Lo: 0x51e, Hi: 0x51e, Stride: 0x1}, + unicode.Range16{Lo: 0x520, Hi: 0x520, Stride: 0x1}, + unicode.Range16{Lo: 0x522, Hi: 0x522, Stride: 0x1}, + unicode.Range16{Lo: 0x524, Hi: 0x524, Stride: 0x1}, + unicode.Range16{Lo: 0x526, Hi: 0x526, Stride: 0x1}, + unicode.Range16{Lo: 0x528, Hi: 0x528, Stride: 0x1}, + unicode.Range16{Lo: 0x52a, Hi: 0x52a, Stride: 0x1}, + unicode.Range16{Lo: 0x52c, Hi: 0x52c, Stride: 0x1}, + unicode.Range16{Lo: 0x52e, Hi: 0x52e, Stride: 0x1}, + unicode.Range16{Lo: 0x531, Hi: 0x556, Stride: 0x1}, + unicode.Range16{Lo: 0x10a0, Hi: 0x10c5, Stride: 0x1}, + unicode.Range16{Lo: 0x10c7, Hi: 0x10c7, Stride: 0x1}, + unicode.Range16{Lo: 0x10cd, Hi: 0x10cd, Stride: 0x1}, + unicode.Range16{Lo: 0x13a0, Hi: 0x13f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1e00, Hi: 0x1e00, Stride: 0x1}, + unicode.Range16{Lo: 0x1e02, Hi: 0x1e02, Stride: 0x1}, + unicode.Range16{Lo: 0x1e04, Hi: 0x1e04, Stride: 0x1}, + unicode.Range16{Lo: 0x1e06, Hi: 0x1e06, Stride: 0x1}, + unicode.Range16{Lo: 0x1e08, Hi: 0x1e08, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0a, Hi: 0x1e0a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0c, Hi: 0x1e0c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0e, Hi: 0x1e0e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e10, Hi: 0x1e10, Stride: 0x1}, + unicode.Range16{Lo: 0x1e12, Hi: 0x1e12, Stride: 0x1}, + unicode.Range16{Lo: 0x1e14, Hi: 0x1e14, Stride: 0x1}, + unicode.Range16{Lo: 0x1e16, Hi: 0x1e16, Stride: 0x1}, + unicode.Range16{Lo: 0x1e18, Hi: 0x1e18, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1a, Hi: 0x1e1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1c, Hi: 0x1e1c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1e, Hi: 0x1e1e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e20, Hi: 0x1e20, Stride: 0x1}, + unicode.Range16{Lo: 0x1e22, Hi: 0x1e22, Stride: 0x1}, + unicode.Range16{Lo: 0x1e24, Hi: 0x1e24, Stride: 0x1}, + unicode.Range16{Lo: 0x1e26, Hi: 0x1e26, Stride: 0x1}, + unicode.Range16{Lo: 0x1e28, Hi: 0x1e28, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2a, Hi: 0x1e2a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2c, Hi: 0x1e2c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2e, Hi: 0x1e2e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e30, Hi: 0x1e30, Stride: 0x1}, + unicode.Range16{Lo: 0x1e32, Hi: 0x1e32, Stride: 0x1}, + unicode.Range16{Lo: 0x1e34, Hi: 0x1e34, Stride: 0x1}, + unicode.Range16{Lo: 0x1e36, Hi: 0x1e36, Stride: 0x1}, + unicode.Range16{Lo: 0x1e38, Hi: 0x1e38, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3a, Hi: 0x1e3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3c, Hi: 0x1e3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3e, Hi: 0x1e3e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e40, Hi: 0x1e40, Stride: 0x1}, + unicode.Range16{Lo: 0x1e42, Hi: 0x1e42, Stride: 0x1}, + unicode.Range16{Lo: 0x1e44, Hi: 0x1e44, Stride: 0x1}, + unicode.Range16{Lo: 0x1e46, Hi: 0x1e46, Stride: 0x1}, + unicode.Range16{Lo: 0x1e48, Hi: 0x1e48, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4a, Hi: 0x1e4a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4c, Hi: 0x1e4c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4e, Hi: 0x1e4e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e50, Hi: 0x1e50, Stride: 0x1}, + unicode.Range16{Lo: 0x1e52, Hi: 0x1e52, Stride: 0x1}, + unicode.Range16{Lo: 0x1e54, Hi: 0x1e54, Stride: 0x1}, + unicode.Range16{Lo: 0x1e56, Hi: 0x1e56, Stride: 0x1}, + unicode.Range16{Lo: 0x1e58, Hi: 0x1e58, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5a, Hi: 0x1e5a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5c, Hi: 0x1e5c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5e, Hi: 0x1e5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e60, Hi: 0x1e60, Stride: 0x1}, + unicode.Range16{Lo: 0x1e62, Hi: 0x1e62, Stride: 0x1}, + unicode.Range16{Lo: 0x1e64, Hi: 0x1e64, Stride: 0x1}, + unicode.Range16{Lo: 0x1e66, Hi: 0x1e66, Stride: 0x1}, + unicode.Range16{Lo: 0x1e68, Hi: 0x1e68, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6a, Hi: 0x1e6a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6c, Hi: 0x1e6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6e, Hi: 0x1e6e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e70, Hi: 0x1e70, Stride: 0x1}, + unicode.Range16{Lo: 0x1e72, Hi: 0x1e72, Stride: 0x1}, + unicode.Range16{Lo: 0x1e74, Hi: 0x1e74, Stride: 0x1}, + unicode.Range16{Lo: 0x1e76, Hi: 0x1e76, Stride: 0x1}, + unicode.Range16{Lo: 0x1e78, Hi: 0x1e78, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7a, Hi: 0x1e7a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7c, Hi: 0x1e7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7e, Hi: 0x1e7e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e80, Hi: 0x1e80, Stride: 0x1}, + unicode.Range16{Lo: 0x1e82, Hi: 0x1e82, Stride: 0x1}, + unicode.Range16{Lo: 0x1e84, Hi: 0x1e84, Stride: 0x1}, + unicode.Range16{Lo: 0x1e86, Hi: 0x1e86, Stride: 0x1}, + unicode.Range16{Lo: 0x1e88, Hi: 0x1e88, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8a, Hi: 0x1e8a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8c, Hi: 0x1e8c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8e, Hi: 0x1e8e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e90, Hi: 0x1e90, Stride: 0x1}, + unicode.Range16{Lo: 0x1e92, Hi: 0x1e92, Stride: 0x1}, + unicode.Range16{Lo: 0x1e94, Hi: 0x1e94, Stride: 0x1}, + unicode.Range16{Lo: 0x1e9e, Hi: 0x1e9e, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea0, Hi: 0x1ea0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea2, Hi: 0x1ea2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea4, Hi: 0x1ea4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea6, Hi: 0x1ea6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea8, Hi: 0x1ea8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eaa, Hi: 0x1eaa, Stride: 0x1}, + unicode.Range16{Lo: 0x1eac, Hi: 0x1eac, Stride: 0x1}, + unicode.Range16{Lo: 0x1eae, Hi: 0x1eae, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb0, Hi: 0x1eb0, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb2, Hi: 0x1eb2, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb4, Hi: 0x1eb4, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb6, Hi: 0x1eb6, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb8, Hi: 0x1eb8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eba, Hi: 0x1eba, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebc, Hi: 0x1ebc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebe, Hi: 0x1ebe, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec0, Hi: 0x1ec0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec2, Hi: 0x1ec2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec4, Hi: 0x1ec4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec6, Hi: 0x1ec6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec8, Hi: 0x1ec8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eca, Hi: 0x1eca, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecc, Hi: 0x1ecc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ece, Hi: 0x1ece, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed0, Hi: 0x1ed0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed2, Hi: 0x1ed2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed4, Hi: 0x1ed4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed6, Hi: 0x1ed6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed8, Hi: 0x1ed8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eda, Hi: 0x1eda, Stride: 0x1}, + unicode.Range16{Lo: 0x1edc, Hi: 0x1edc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ede, Hi: 0x1ede, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee0, Hi: 0x1ee0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee2, Hi: 0x1ee2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee4, Hi: 0x1ee4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee6, Hi: 0x1ee6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee8, Hi: 0x1ee8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eea, Hi: 0x1eea, Stride: 0x1}, + unicode.Range16{Lo: 0x1eec, Hi: 0x1eec, Stride: 0x1}, + unicode.Range16{Lo: 0x1eee, Hi: 0x1eee, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef0, Hi: 0x1ef0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef2, Hi: 0x1ef2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef4, Hi: 0x1ef4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef6, Hi: 0x1ef6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef8, Hi: 0x1ef8, Stride: 0x1}, + unicode.Range16{Lo: 0x1efa, Hi: 0x1efa, Stride: 0x1}, + unicode.Range16{Lo: 0x1efc, Hi: 0x1efc, Stride: 0x1}, + unicode.Range16{Lo: 0x1efe, Hi: 0x1efe, Stride: 0x1}, + unicode.Range16{Lo: 0x1f08, Hi: 0x1f0f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f18, Hi: 0x1f1d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f28, Hi: 0x1f2f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f38, Hi: 0x1f3f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f48, Hi: 0x1f4d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f59, Hi: 0x1f59, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5f, Hi: 0x1f5f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f68, Hi: 0x1f6f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f88, Hi: 0x1f8f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f98, Hi: 0x1f9f, Stride: 0x1}, + unicode.Range16{Lo: 0x1fa8, Hi: 0x1faf, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb8, Hi: 0x1fbc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc8, Hi: 0x1fcc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd8, Hi: 0x1fdb, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe8, Hi: 0x1fec, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff8, Hi: 0x1ffc, Stride: 0x1}, + unicode.Range16{Lo: 0x2102, Hi: 0x2102, Stride: 0x1}, + unicode.Range16{Lo: 0x2107, Hi: 0x2107, Stride: 0x1}, + unicode.Range16{Lo: 0x210b, Hi: 0x210d, Stride: 0x1}, + unicode.Range16{Lo: 0x2110, Hi: 0x2112, Stride: 0x1}, + unicode.Range16{Lo: 0x2115, Hi: 0x2115, Stride: 0x1}, + unicode.Range16{Lo: 0x2119, Hi: 0x211d, Stride: 0x1}, + unicode.Range16{Lo: 0x2124, Hi: 0x2124, Stride: 0x1}, + unicode.Range16{Lo: 0x2126, Hi: 0x2126, Stride: 0x1}, + unicode.Range16{Lo: 0x2128, Hi: 0x2128, Stride: 0x1}, + unicode.Range16{Lo: 0x212a, Hi: 0x212d, Stride: 0x1}, + unicode.Range16{Lo: 0x2130, Hi: 0x2133, Stride: 0x1}, + unicode.Range16{Lo: 0x213e, Hi: 0x213f, Stride: 0x1}, + unicode.Range16{Lo: 0x2145, Hi: 0x2145, Stride: 0x1}, + unicode.Range16{Lo: 0x2160, Hi: 0x216f, Stride: 0x1}, + unicode.Range16{Lo: 0x2183, Hi: 0x2183, Stride: 0x1}, + unicode.Range16{Lo: 0x24b6, Hi: 0x24cf, Stride: 0x1}, + unicode.Range16{Lo: 0x2c00, Hi: 0x2c2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c60, Hi: 0x2c60, Stride: 0x1}, + unicode.Range16{Lo: 0x2c62, Hi: 0x2c64, Stride: 0x1}, + unicode.Range16{Lo: 0x2c67, Hi: 0x2c67, Stride: 0x1}, + unicode.Range16{Lo: 0x2c69, Hi: 0x2c69, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6b, Hi: 0x2c6b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6d, Hi: 0x2c70, Stride: 0x1}, + unicode.Range16{Lo: 0x2c72, Hi: 0x2c72, Stride: 0x1}, + unicode.Range16{Lo: 0x2c75, Hi: 0x2c75, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7e, Hi: 0x2c80, Stride: 0x1}, + unicode.Range16{Lo: 0x2c82, Hi: 0x2c82, Stride: 0x1}, + unicode.Range16{Lo: 0x2c84, Hi: 0x2c84, Stride: 0x1}, + unicode.Range16{Lo: 0x2c86, Hi: 0x2c86, Stride: 0x1}, + unicode.Range16{Lo: 0x2c88, Hi: 0x2c88, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8a, Hi: 0x2c8a, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8c, Hi: 0x2c8c, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8e, Hi: 0x2c8e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c90, Hi: 0x2c90, Stride: 0x1}, + unicode.Range16{Lo: 0x2c92, Hi: 0x2c92, Stride: 0x1}, + unicode.Range16{Lo: 0x2c94, Hi: 0x2c94, Stride: 0x1}, + unicode.Range16{Lo: 0x2c96, Hi: 0x2c96, Stride: 0x1}, + unicode.Range16{Lo: 0x2c98, Hi: 0x2c98, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9a, Hi: 0x2c9a, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9c, Hi: 0x2c9c, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9e, Hi: 0x2c9e, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca0, Hi: 0x2ca0, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca2, Hi: 0x2ca2, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca4, Hi: 0x2ca4, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca6, Hi: 0x2ca6, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca8, Hi: 0x2ca8, Stride: 0x1}, + unicode.Range16{Lo: 0x2caa, Hi: 0x2caa, Stride: 0x1}, + unicode.Range16{Lo: 0x2cac, Hi: 0x2cac, Stride: 0x1}, + unicode.Range16{Lo: 0x2cae, Hi: 0x2cae, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb0, Hi: 0x2cb0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb2, Hi: 0x2cb2, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb4, Hi: 0x2cb4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb6, Hi: 0x2cb6, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb8, Hi: 0x2cb8, Stride: 0x1}, + unicode.Range16{Lo: 0x2cba, Hi: 0x2cba, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbc, Hi: 0x2cbc, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbe, Hi: 0x2cbe, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc0, Hi: 0x2cc0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc2, Hi: 0x2cc2, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc4, Hi: 0x2cc4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc6, Hi: 0x2cc6, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc8, Hi: 0x2cc8, Stride: 0x1}, + unicode.Range16{Lo: 0x2cca, Hi: 0x2cca, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccc, Hi: 0x2ccc, Stride: 0x1}, + unicode.Range16{Lo: 0x2cce, Hi: 0x2cce, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd0, Hi: 0x2cd0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd2, Hi: 0x2cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd4, Hi: 0x2cd4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd6, Hi: 0x2cd6, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd8, Hi: 0x2cd8, Stride: 0x1}, + unicode.Range16{Lo: 0x2cda, Hi: 0x2cda, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdc, Hi: 0x2cdc, Stride: 0x1}, + unicode.Range16{Lo: 0x2cde, Hi: 0x2cde, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce0, Hi: 0x2ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce2, Hi: 0x2ce2, Stride: 0x1}, + unicode.Range16{Lo: 0x2ceb, Hi: 0x2ceb, Stride: 0x1}, + unicode.Range16{Lo: 0x2ced, Hi: 0x2ced, Stride: 0x1}, + unicode.Range16{Lo: 0x2cf2, Hi: 0x2cf2, Stride: 0x1}, + unicode.Range16{Lo: 0xa640, Hi: 0xa640, Stride: 0x1}, + unicode.Range16{Lo: 0xa642, Hi: 0xa642, Stride: 0x1}, + unicode.Range16{Lo: 0xa644, Hi: 0xa644, Stride: 0x1}, + unicode.Range16{Lo: 0xa646, Hi: 0xa646, Stride: 0x1}, + unicode.Range16{Lo: 0xa648, Hi: 0xa648, Stride: 0x1}, + unicode.Range16{Lo: 0xa64a, Hi: 0xa64a, Stride: 0x1}, + unicode.Range16{Lo: 0xa64c, Hi: 0xa64c, Stride: 0x1}, + unicode.Range16{Lo: 0xa64e, Hi: 0xa64e, Stride: 0x1}, + unicode.Range16{Lo: 0xa650, Hi: 0xa650, Stride: 0x1}, + unicode.Range16{Lo: 0xa652, Hi: 0xa652, Stride: 0x1}, + unicode.Range16{Lo: 0xa654, Hi: 0xa654, Stride: 0x1}, + unicode.Range16{Lo: 0xa656, Hi: 0xa656, Stride: 0x1}, + unicode.Range16{Lo: 0xa658, Hi: 0xa658, Stride: 0x1}, + unicode.Range16{Lo: 0xa65a, Hi: 0xa65a, Stride: 0x1}, + unicode.Range16{Lo: 0xa65c, Hi: 0xa65c, Stride: 0x1}, + unicode.Range16{Lo: 0xa65e, Hi: 0xa65e, Stride: 0x1}, + unicode.Range16{Lo: 0xa660, Hi: 0xa660, Stride: 0x1}, + unicode.Range16{Lo: 0xa662, Hi: 0xa662, Stride: 0x1}, + unicode.Range16{Lo: 0xa664, Hi: 0xa664, Stride: 0x1}, + unicode.Range16{Lo: 0xa666, Hi: 0xa666, Stride: 0x1}, + unicode.Range16{Lo: 0xa668, Hi: 0xa668, Stride: 0x1}, + unicode.Range16{Lo: 0xa66a, Hi: 0xa66a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66c, Hi: 0xa66c, Stride: 0x1}, + unicode.Range16{Lo: 0xa680, Hi: 0xa680, Stride: 0x1}, + unicode.Range16{Lo: 0xa682, Hi: 0xa682, Stride: 0x1}, + unicode.Range16{Lo: 0xa684, Hi: 0xa684, Stride: 0x1}, + unicode.Range16{Lo: 0xa686, Hi: 0xa686, Stride: 0x1}, + unicode.Range16{Lo: 0xa688, Hi: 0xa688, Stride: 0x1}, + unicode.Range16{Lo: 0xa68a, Hi: 0xa68a, Stride: 0x1}, + unicode.Range16{Lo: 0xa68c, Hi: 0xa68c, Stride: 0x1}, + unicode.Range16{Lo: 0xa68e, Hi: 0xa68e, Stride: 0x1}, + unicode.Range16{Lo: 0xa690, Hi: 0xa690, Stride: 0x1}, + unicode.Range16{Lo: 0xa692, Hi: 0xa692, Stride: 0x1}, + unicode.Range16{Lo: 0xa694, Hi: 0xa694, Stride: 0x1}, + unicode.Range16{Lo: 0xa696, Hi: 0xa696, Stride: 0x1}, + unicode.Range16{Lo: 0xa698, Hi: 0xa698, Stride: 0x1}, + unicode.Range16{Lo: 0xa69a, Hi: 0xa69a, Stride: 0x1}, + unicode.Range16{Lo: 0xa722, Hi: 0xa722, Stride: 0x1}, + unicode.Range16{Lo: 0xa724, Hi: 0xa724, Stride: 0x1}, + unicode.Range16{Lo: 0xa726, Hi: 0xa726, Stride: 0x1}, + unicode.Range16{Lo: 0xa728, Hi: 0xa728, Stride: 0x1}, + unicode.Range16{Lo: 0xa72a, Hi: 0xa72a, Stride: 0x1}, + unicode.Range16{Lo: 0xa72c, Hi: 0xa72c, Stride: 0x1}, + unicode.Range16{Lo: 0xa72e, Hi: 0xa72e, Stride: 0x1}, + unicode.Range16{Lo: 0xa732, Hi: 0xa732, Stride: 0x1}, + unicode.Range16{Lo: 0xa734, Hi: 0xa734, Stride: 0x1}, + unicode.Range16{Lo: 0xa736, Hi: 0xa736, Stride: 0x1}, + unicode.Range16{Lo: 0xa738, Hi: 0xa738, Stride: 0x1}, + unicode.Range16{Lo: 0xa73a, Hi: 0xa73a, Stride: 0x1}, + unicode.Range16{Lo: 0xa73c, Hi: 0xa73c, Stride: 0x1}, + unicode.Range16{Lo: 0xa73e, Hi: 0xa73e, Stride: 0x1}, + unicode.Range16{Lo: 0xa740, Hi: 0xa740, Stride: 0x1}, + unicode.Range16{Lo: 0xa742, Hi: 0xa742, Stride: 0x1}, + unicode.Range16{Lo: 0xa744, Hi: 0xa744, Stride: 0x1}, + unicode.Range16{Lo: 0xa746, Hi: 0xa746, Stride: 0x1}, + unicode.Range16{Lo: 0xa748, Hi: 0xa748, Stride: 0x1}, + unicode.Range16{Lo: 0xa74a, Hi: 0xa74a, Stride: 0x1}, + unicode.Range16{Lo: 0xa74c, Hi: 0xa74c, Stride: 0x1}, + unicode.Range16{Lo: 0xa74e, Hi: 0xa74e, Stride: 0x1}, + unicode.Range16{Lo: 0xa750, Hi: 0xa750, Stride: 0x1}, + unicode.Range16{Lo: 0xa752, Hi: 0xa752, Stride: 0x1}, + unicode.Range16{Lo: 0xa754, Hi: 0xa754, Stride: 0x1}, + unicode.Range16{Lo: 0xa756, Hi: 0xa756, Stride: 0x1}, + unicode.Range16{Lo: 0xa758, Hi: 0xa758, Stride: 0x1}, + unicode.Range16{Lo: 0xa75a, Hi: 0xa75a, Stride: 0x1}, + unicode.Range16{Lo: 0xa75c, Hi: 0xa75c, Stride: 0x1}, + unicode.Range16{Lo: 0xa75e, Hi: 0xa75e, Stride: 0x1}, + unicode.Range16{Lo: 0xa760, Hi: 0xa760, Stride: 0x1}, + unicode.Range16{Lo: 0xa762, Hi: 0xa762, Stride: 0x1}, + unicode.Range16{Lo: 0xa764, Hi: 0xa764, Stride: 0x1}, + unicode.Range16{Lo: 0xa766, Hi: 0xa766, Stride: 0x1}, + unicode.Range16{Lo: 0xa768, Hi: 0xa768, Stride: 0x1}, + unicode.Range16{Lo: 0xa76a, Hi: 0xa76a, Stride: 0x1}, + unicode.Range16{Lo: 0xa76c, Hi: 0xa76c, Stride: 0x1}, + unicode.Range16{Lo: 0xa76e, Hi: 0xa76e, Stride: 0x1}, + unicode.Range16{Lo: 0xa779, Hi: 0xa779, Stride: 0x1}, + unicode.Range16{Lo: 0xa77b, Hi: 0xa77b, Stride: 0x1}, + unicode.Range16{Lo: 0xa77d, Hi: 0xa77e, Stride: 0x1}, + unicode.Range16{Lo: 0xa780, Hi: 0xa780, Stride: 0x1}, + unicode.Range16{Lo: 0xa782, Hi: 0xa782, Stride: 0x1}, + unicode.Range16{Lo: 0xa784, Hi: 0xa784, Stride: 0x1}, + unicode.Range16{Lo: 0xa786, Hi: 0xa786, Stride: 0x1}, + unicode.Range16{Lo: 0xa78b, Hi: 0xa78b, Stride: 0x1}, + unicode.Range16{Lo: 0xa78d, Hi: 0xa78d, Stride: 0x1}, + unicode.Range16{Lo: 0xa790, Hi: 0xa790, Stride: 0x1}, + unicode.Range16{Lo: 0xa792, Hi: 0xa792, Stride: 0x1}, + unicode.Range16{Lo: 0xa796, Hi: 0xa796, Stride: 0x1}, + unicode.Range16{Lo: 0xa798, Hi: 0xa798, Stride: 0x1}, + unicode.Range16{Lo: 0xa79a, Hi: 0xa79a, Stride: 0x1}, + unicode.Range16{Lo: 0xa79c, Hi: 0xa79c, Stride: 0x1}, + unicode.Range16{Lo: 0xa79e, Hi: 0xa79e, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a0, Hi: 0xa7a0, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a2, Hi: 0xa7a2, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a4, Hi: 0xa7a4, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a6, Hi: 0xa7a6, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a8, Hi: 0xa7a8, Stride: 0x1}, + unicode.Range16{Lo: 0xa7aa, Hi: 0xa7ae, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b0, Hi: 0xa7b4, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b6, Hi: 0xa7b6, Stride: 0x1}, + unicode.Range16{Lo: 0xff21, Hi: 0xff3a, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10400, Hi: 0x10427, Stride: 0x1}, + unicode.Range32{Lo: 0x104b0, Hi: 0x104d3, Stride: 0x1}, + unicode.Range32{Lo: 0x10c80, Hi: 0x10cb2, Stride: 0x1}, + unicode.Range32{Lo: 0x118a0, Hi: 0x118bf, Stride: 0x1}, + unicode.Range32{Lo: 0x1d400, Hi: 0x1d419, Stride: 0x1}, + unicode.Range32{Lo: 0x1d434, Hi: 0x1d44d, Stride: 0x1}, + unicode.Range32{Lo: 0x1d468, Hi: 0x1d481, Stride: 0x1}, + unicode.Range32{Lo: 0x1d49c, Hi: 0x1d49c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4ae, Hi: 0x1d4b5, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4d0, Hi: 0x1d4e9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d504, Hi: 0x1d505, Stride: 0x1}, + unicode.Range32{Lo: 0x1d507, Hi: 0x1d50a, Stride: 0x1}, + unicode.Range32{Lo: 0x1d50d, Hi: 0x1d514, Stride: 0x1}, + unicode.Range32{Lo: 0x1d516, Hi: 0x1d51c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d538, Hi: 0x1d539, Stride: 0x1}, + unicode.Range32{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d540, Hi: 0x1d544, Stride: 0x1}, + unicode.Range32{Lo: 0x1d546, Hi: 0x1d546, Stride: 0x1}, + unicode.Range32{Lo: 0x1d54a, Hi: 0x1d550, Stride: 0x1}, + unicode.Range32{Lo: 0x1d56c, Hi: 0x1d585, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5a0, Hi: 0x1d5b9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5d4, Hi: 0x1d5ed, Stride: 0x1}, + unicode.Range32{Lo: 0x1d608, Hi: 0x1d621, Stride: 0x1}, + unicode.Range32{Lo: 0x1d63c, Hi: 0x1d655, Stride: 0x1}, + unicode.Range32{Lo: 0x1d670, Hi: 0x1d689, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6e2, Hi: 0x1d6fa, Stride: 0x1}, + unicode.Range32{Lo: 0x1d71c, Hi: 0x1d734, Stride: 0x1}, + unicode.Range32{Lo: 0x1d756, Hi: 0x1d76e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d790, Hi: 0x1d7a8, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7ca, Hi: 0x1d7ca, Stride: 0x1}, + unicode.Range32{Lo: 0x1e900, Hi: 0x1e921, Stride: 0x1}, + unicode.Range32{Lo: 0x1f130, Hi: 0x1f149, Stride: 0x1}, + unicode.Range32{Lo: 0x1f150, Hi: 0x1f169, Stride: 0x1}, + unicode.Range32{Lo: 0x1f170, Hi: 0x1f189, Stride: 0x1}, + }, + LatinOffset: 3, +} + +type _SentenceRuneRange unicode.RangeTable + +func _SentenceRuneType(r rune) *_SentenceRuneRange { + switch { + case unicode.Is(_SentenceATerm, r): + return (*_SentenceRuneRange)(_SentenceATerm) + case unicode.Is(_SentenceCR, r): + return (*_SentenceRuneRange)(_SentenceCR) + case unicode.Is(_SentenceClose, r): + return (*_SentenceRuneRange)(_SentenceClose) + case unicode.Is(_SentenceExtend, r): + return (*_SentenceRuneRange)(_SentenceExtend) + case unicode.Is(_SentenceFormat, r): + return (*_SentenceRuneRange)(_SentenceFormat) + case unicode.Is(_SentenceLF, r): + return (*_SentenceRuneRange)(_SentenceLF) + case unicode.Is(_SentenceLower, r): + return (*_SentenceRuneRange)(_SentenceLower) + case unicode.Is(_SentenceNumeric, r): + return (*_SentenceRuneRange)(_SentenceNumeric) + case unicode.Is(_SentenceOLetter, r): + return (*_SentenceRuneRange)(_SentenceOLetter) + case unicode.Is(_SentenceSContinue, r): + return (*_SentenceRuneRange)(_SentenceSContinue) + case unicode.Is(_SentenceSTerm, r): + return (*_SentenceRuneRange)(_SentenceSTerm) + case unicode.Is(_SentenceSep, r): + return (*_SentenceRuneRange)(_SentenceSep) + case unicode.Is(_SentenceSp, r): + return (*_SentenceRuneRange)(_SentenceSp) + case unicode.Is(_SentenceUpper, r): + return (*_SentenceRuneRange)(_SentenceUpper) + default: + return nil + } +} +func (rng *_SentenceRuneRange) String() string { + switch (*unicode.RangeTable)(rng) { + case _SentenceATerm: + return "ATerm" + case _SentenceCR: + return "CR" + case _SentenceClose: + return "Close" + case _SentenceExtend: + return "Extend" + case _SentenceFormat: + return "Format" + case _SentenceLF: + return "LF" + case _SentenceLower: + return "Lower" + case _SentenceNumeric: + return "Numeric" + case _SentenceOLetter: + return "OLetter" + case _SentenceSContinue: + return "SContinue" + case _SentenceSTerm: + return "STerm" + case _SentenceSep: + return "Sep" + case _SentenceSp: + return "Sp" + case _SentenceUpper: + return "Upper" + default: + return "Other" + } +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb b/vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb new file mode 100644 index 0000000..422e4e5 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb @@ -0,0 +1,335 @@ +#!/usr/bin/env ruby +# +# This scripted has been updated to accept more command-line arguments: +# +# -u, --url URL to process +# -m, --machine Machine name +# -p, --properties Properties to add to the machine +# -o, --output Write output to file +# +# Updated by: Marty Schoch +# +# This script uses the unicode spec to generate a Ragel state machine +# that recognizes unicode alphanumeric characters. It generates 5 +# character classes: uupper, ulower, ualpha, udigit, and ualnum. +# Currently supported encodings are UTF-8 [default] and UCS-4. +# +# Usage: unicode2ragel.rb [options] +# -e, --encoding [ucs4 | utf8] Data encoding +# -h, --help Show this message +# +# This script was originally written as part of the Ferret search +# engine library. +# +# Author: Rakan El-Khalil + +require 'optparse' +require 'open-uri' + +ENCODINGS = [ :utf8, :ucs4 ] +ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } +DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" +DEFAULT_MACHINE_NAME= "WChar" + +### +# Display vars & default option + +TOTAL_WIDTH = 80 +RANGE_WIDTH = 23 +@encoding = :utf8 +@chart_url = DEFAULT_CHART_URL +machine_name = DEFAULT_MACHINE_NAME +properties = [] +@output = $stdout + +### +# Option parsing + +cli_opts = OptionParser.new do |opts| + opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| + @encoding = o.downcase.to_sym + end + opts.on("-h", "--help", "Show this message") do + puts opts + exit + end + opts.on("-u", "--url URL", "URL to process") do |o| + @chart_url = o + end + opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| + machine_name = o + end + opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| + properties = o + end + opts.on("-o", "--output FILE", "output file") do |o| + @output = File.new(o, "w+") + end +end + +cli_opts.parse(ARGV) +unless ENCODINGS.member? @encoding + puts "Invalid encoding: #{@encoding}" + puts cli_opts + exit +end + +## +# Downloads the document at url and yields every alpha line's hex +# range and description. + +def each_alpha( url, property ) + open( url ) do |file| + file.each_line do |line| + next if line =~ /^#/; + next if line !~ /; #{property} #/; + + range, description = line.split(/;/) + range.strip! + description.gsub!(/.*#/, '').strip! + + if range =~ /\.\./ + start, stop = range.split '..' + else start = stop = range + end + + yield start.hex .. stop.hex, description + end + end +end + +### +# Formats to hex at minimum width + +def to_hex( n ) + r = "%0X" % n + r = "0#{r}" unless (r.length % 2).zero? + r +end + +### +# UCS4 is just a straight hex conversion of the unicode codepoint. + +def to_ucs4( range ) + rangestr = "0x" + to_hex(range.begin) + rangestr << "..0x" + to_hex(range.end) if range.begin != range.end + [ rangestr ] +end + +## +# 0x00 - 0x7f -> 0zzzzzzz[7] +# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] +# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] +# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] + +UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] + +def to_utf8_enc( n ) + r = 0 + if n <= 0x7f + r = n + elsif n <= 0x7ff + y = 0xc0 | (n >> 6) + z = 0x80 | (n & 0x3f) + r = y << 8 | z + elsif n <= 0xffff + x = 0xe0 | (n >> 12) + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = x << 16 | y << 8 | z + elsif n <= 0x10ffff + w = 0xf0 | (n >> 18) + x = 0x80 | (n >> 12) & 0x3f + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = w << 24 | x << 16 | y << 8 | z + end + + to_hex(r) +end + +def from_utf8_enc( n ) + n = n.hex + r = 0 + if n <= 0x7f + r = n + elsif n <= 0xdfff + y = (n >> 8) & 0x1f + z = n & 0x3f + r = y << 6 | z + elsif n <= 0xefffff + x = (n >> 16) & 0x0f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = x << 10 | y << 6 | z + elsif n <= 0xf7ffffff + w = (n >> 24) & 0x07 + x = (n >> 16) & 0x3f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = w << 18 | x << 12 | y << 6 | z + end + r +end + +### +# Given a range, splits it up into ranges that can be continuously +# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] +# This is not strictly needed since the current [5.1] unicode standard +# doesn't have ranges that straddle utf8 boundaries. This is included +# for completeness as there is no telling if that will ever change. + +def utf8_ranges( range ) + ranges = [] + UTF8_BOUNDARIES.each do |max| + if range.begin <= max + if range.end <= max + ranges << range + return ranges + end + + ranges << (range.begin .. max) + range = (max + 1) .. range.end + end + end + ranges +end + +def build_range( start, stop ) + size = start.size/2 + left = size - 1 + return [""] if size < 1 + + a = start[0..1] + b = stop[0..1] + + ### + # Shared prefix + + if a == b + return build_range(start[2..-1], stop[2..-1]).map do |elt| + "0x#{a} " + elt + end + end + + ### + # Unshared prefix, end of run + + return ["0x#{a}..0x#{b} "] if left.zero? + + ### + # Unshared prefix, not end of run + # Range can be 0x123456..0x56789A + # Which is equivalent to: + # 0x123456 .. 0x12FFFF + # 0x130000 .. 0x55FFFF + # 0x560000 .. 0x56789A + + ret = [] + ret << build_range(start, a + "FF" * left) + + ### + # Only generate middle range if need be. + + if a.hex+1 != b.hex + max = to_hex(b.hex - 1) + max = "FF" if b == "FF" + ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left + end + + ### + # Don't generate last range if it is covered by first range + + ret << build_range(b + "00" * left, stop) unless b == "FF" + ret.flatten! +end + +def to_utf8( range ) + utf8_ranges( range ).map do |r| + begin_enc = to_utf8_enc(r.begin) + end_enc = to_utf8_enc(r.end) + build_range begin_enc, end_enc + end.flatten! +end + +## +# Perform a 3-way comparison of the number of codepoints advertised by +# the unicode spec for the given range, the originally parsed range, +# and the resulting utf8 encoded range. + +def count_codepoints( code ) + code.split(' ').inject(1) do |acc, elt| + if elt =~ /0x(.+)\.\.0x(.+)/ + if @encoding == :utf8 + acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) + else + acc * ($2.hex - $1.hex + 1) + end + else + acc + end + end +end + +def is_valid?( range, desc, codes ) + spec_count = 1 + spec_count = $1.to_i if desc =~ /\[(\d+)\]/ + range_count = range.end - range.begin + 1 + + sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } + sum == spec_count and sum == range_count +end + +## +# Generate the state maching to stdout + +def generate_machine( name, property ) + pipe = " " + @output.puts " #{name} = " + each_alpha( @chart_url, property ) do |range, desc| + + codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) + + #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless + # is_valid? range, desc, codes + + range_width = codes.map { |a| a.size }.max + range_width = RANGE_WIDTH if range_width < RANGE_WIDTH + + desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 + desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH + + if desc.size > desc_width + desc = desc[0..desc_width - 4] + "..." + end + + codes.each_with_index do |r, idx| + desc = "" unless idx.zero? + code = "%-#{range_width}s" % r + @output.puts " #{pipe} #{code} ##{desc}" + pipe = "|" + end + end + @output.puts " ;" + @output.puts "" +end + +@output.puts <= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +func (n *node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } + + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.replaceEdge(edge{ + label: search[0], + node: child, + }) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + var parent *node + var label byte + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + parent = n + label = search[0] + n = n.getEdge(label) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should delete this node from the parent + if parent != nil && len(n.edges) == 0 { + parent.delEdge(label) + } + + // Check if we should merge this node + if n != t.root && len(n.edges) == 1 { + n.mergeChild() + } + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + + return leaf.val, true +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + break + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 788fe6e..212fe25 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -15,6 +15,12 @@ type Config struct { Endpoint string SigningRegion string SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overriden based on metadata the + // service has. + SigningNameDerived bool } // ConfigProvider provides a generic way for a service client to receive @@ -85,6 +91,6 @@ func (c *Client) AddDebugHandlers() { return } - c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) - c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 1313478..a397b0d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,11 +1,11 @@ package client import ( - "math/rand" - "sync" + "strconv" "time" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" ) // DefaultRetryer implements basic retry logic using exponential backoff for @@ -15,11 +15,11 @@ import ( // the MaxRetries method: // // type retryer struct { -// service.DefaultRetryer +// client.DefaultRetryer // } // // // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { NumMaxRetries int } @@ -30,25 +30,27 @@ func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } -var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) - // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { // Set the upper limit of delay in retrying at ~five minutes minTime := 30 throttle := d.shouldThrottle(r) if throttle { + if delay, ok := getRetryDelay(r); ok { + return delay + } + minTime = 500 } retryCount := r.RetryCount - if retryCount > 13 { - retryCount = 13 - } else if throttle && retryCount > 8 { + if throttle && retryCount > 8 { retryCount = 8 + } else if retryCount > 13 { + retryCount = 13 } - delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) return time.Duration(delay) * time.Millisecond } @@ -60,7 +62,7 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { return *r.Retryable } - if r.HTTPResponse.StatusCode >= 500 { + if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { return true } return r.IsErrorRetryable() || d.shouldThrottle(r) @@ -68,29 +70,47 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { // ShouldThrottle returns true if the request should be throttled. func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - if r.HTTPResponse.StatusCode == 502 || - r.HTTPResponse.StatusCode == 503 || - r.HTTPResponse.StatusCode == 504 { - return true + switch r.HTTPResponse.StatusCode { + case 429: + case 502: + case 503: + case 504: + default: + return r.IsErrorThrottle() } - return r.IsErrorThrottle() -} -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source + return true } -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true } -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index 1f39c91..ce9fb89 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -44,22 +44,57 @@ func (reader *teeReaderCloser) Close() error { return reader.Source.Close() } +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + func logRequest(r *request.Request) { logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) return } if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's // Body as a NoOpCloser and will not be reset after read by the HTTP // client reader. r.ResetBody() } - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) } const logRespMsg = `DEBUG: Response %s/%s Details: @@ -72,27 +107,44 @@ const logRespErrMsg = `DEBUG ERROR: Response %s/%s: %s -----------------------------------------------------` +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + func logResponse(r *request.Request) { lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } } handlerFn := func(req *request.Request) { - body, err := httputil.DumpResponse(req.HTTPResponse, false) + b, err := httputil.DumpResponse(req.HTTPResponse, false) if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) return } - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body))) - if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) { + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + lw.Logger.Log(string(b)) } } @@ -106,3 +158,27 @@ func logResponse(r *request.Request) { Name: handlerName, Fn: handlerFn, }) } + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 4778056..920e9fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -3,6 +3,7 @@ package metadata // ClientInfo wraps immutable data from the client.Client structure. type ClientInfo struct { ServiceName string + ServiceID string APIVersion string Endpoint string SigningName string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index d1f31f1..5421b5d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -95,7 +95,7 @@ type Config struct { // recoverable failures. // // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. + // the client.DefaultRetryer will be used. // // When both Retryer and MaxRetries are non-nil, the former is used and // the latter ignored. @@ -151,6 +151,15 @@ type Config struct { // with accelerate. S3UseAccelerate *bool + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + // Set this to `true` to disable the EC2Metadata client from overriding the // default http.Client's Timeout. This is helpful if you do not want the // EC2Metadata client to create a new http.Client. This options is only @@ -168,7 +177,7 @@ type Config struct { // EC2MetadataDisableTimeoutOverride *bool - // Instructs the endpiont to be generated for a service client to + // Instructs the endpoint to be generated for a service client to // be the dual stack endpoint. The dual stack endpoint will support // both IPv4 and IPv6 addressing. // @@ -336,6 +345,15 @@ func (c *Config) WithS3Disable100Continue(disable bool) *Config { func (c *Config) WithS3UseAccelerate(enable bool) *Config { c.S3UseAccelerate = &enable return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + } // WithUseDualStack sets a config UseDualStack value returning a Config @@ -435,6 +453,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3UseAccelerate = other.S3UseAccelerate } + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + if other.UseDualStack != nil { dst.UseDualStack = other.UseDualStack } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go index e8cf93d..8fdda53 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go @@ -4,9 +4,9 @@ package aws import "time" -// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This -// is copied to provide a 1.6 and 1.5 safe version of context that is compatible -// with Go 1.7's Context. +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. // // An emptyCtx is never canceled, has no values, and has no deadline. It is not // struct{}, since vars of this type must have distinct addresses. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index 3b73a7d..ff5d58e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -311,6 +311,24 @@ func TimeValue(v *time.Time) time.Time { return time.Time{} } +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + // TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". // The result is undefined if the Unix time cannot be represented by an int64. // Which includes calling TimeUnixMilli on a zero Time is undefined. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 495e3ef..cfcddf3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -3,12 +3,10 @@ package corehandlers import ( "bytes" "fmt" - "io" "io/ioutil" "net/http" "net/url" "regexp" - "runtime" "strconv" "time" @@ -36,18 +34,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { length, _ = strconv.ParseInt(slength, 10, 64) } else { - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } } } @@ -60,13 +53,6 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen } }} -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - var reStatusCode = regexp.MustCompile(`^(\d{3})`) // ValidateReqSigHandler is a request handler to ensure that the request's diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 0000000..a15f496 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec_env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 42416fc..ed08699 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -178,7 +178,8 @@ func (e *Expiry) IsExpired() bool { type Credentials struct { creds Value forceRefresh bool - m sync.Mutex + + m sync.RWMutex provider Provider } @@ -201,6 +202,17 @@ func NewCredentials(provider Provider) *Credentials { // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. func (c *Credentials) Get() (Value, error) { + // Check the cached credentials first with just the read lock. + c.m.RLock() + if !c.isExpired() { + creds := c.creds + c.m.RUnlock() + return creds, nil + } + c.m.RUnlock() + + // Credentials are expired need to retrieve the credentials taking the full + // lock. c.m.Lock() defer c.m.Unlock() @@ -234,8 +246,8 @@ func (c *Credentials) Expire() { // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { - c.m.Lock() - defer c.m.Unlock() + c.m.RLock() + defer c.m.RUnlock() return c.isExpired() } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index c397495..0ed791b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -4,7 +4,6 @@ import ( "bufio" "encoding/json" "fmt" - "path" "strings" "time" @@ -12,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // ProviderName provides a name of EC2Role provider @@ -125,7 +125,7 @@ type ec2RoleCredRespBody struct { Message string } -const iamSecurityCredsPath = "/iam/security-credentials" +const iamSecurityCredsPath = "iam/security-credentials/" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request @@ -153,7 +153,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { // If the credentials cannot be found, or there is an error reading the response // and error will be returned. func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 0000000..152d785 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,46 @@ +// Package csm provides Client Side Monitoring (CSM) which enables sending metrics +// via UDP connection. Using the Start function will enable the reporting of +// metrics on a given port. If Start is called, with different parameters, again, +// a panic will occur. +// +// Pause can be called to pause any metrics publishing on a given port. Sessions +// that have had their handlers modified via InjectHandlers may still be used. +// However, the handlers will act as a no-op meaning no metrics will be published. +// +// Example: +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// r.InjectHandlers(&sess.Handlers) +// +// client := s3.New(sess) +// resp, err := client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +// +// Start returns a Reporter that is used to enable or disable monitoring. If +// access to the Reporter is required later, calling Get will return the Reporter +// singleton. +// +// Example: +// r := csm.Get() +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 0000000..2f0c6ea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,67 @@ +package csm + +import ( + "fmt" + "sync" +) + +var ( + lock sync.Mutex +) + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// Start will start the a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// Example: +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 0000000..4b0d630 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,51 @@ +package csm + +import ( + "strconv" + "time" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 0000000..514fc37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,54 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(&ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(&ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(&ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 0000000..11082e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,231 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // DefaultPort is used when no port is specified + DefaultPort = "31000" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptCount: aws.Int(r.RetryCount + 1), + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + setError(&m, awserr) + } + } + + rep.metricsCh.Push(m) +} + +func setError(m *metric, err awserr.Error) { + msg := err.Error() + code := err.Code() + + switch code { + case "RequestError", + "SerializationError", + request.CanceledErrorCode: + m.SDKException = &code + m.SDKExceptionMessage = &msg + default: + m.AWSException = &code + m.AWSExceptionMessage = &msg + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + } + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from +// being added. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring +// to be resumed. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// Example: +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric} + apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric} + + handlers.Complete.PushFrontNamed(apiCallHandler) + handlers.Complete.PushFrontNamed(apiCallAttemptHandler) + + handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 07afe3b..5040a2f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -9,6 +9,7 @@ package defaults import ( "fmt" + "net" "net/http" "net/url" "os" @@ -72,6 +73,7 @@ func Handlers() request.Handlers { handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) @@ -90,14 +92,25 @@ func Handlers() request.Handlers { func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { return credentials.NewCredentials(&credentials.ChainProvider{ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - }, + Providers: CredProviders(cfg, handlers), }) } +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + const ( httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" @@ -118,14 +131,43 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P return ec2RoleProvider(cfg, handlers) } +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { var errMsg string parsed, err := url.Parse(u) if err != nil { errMsg = fmt.Sprintf("invalid URL, %v", err) - } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { - errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } } if len(errMsg) > 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 984407a..c215cd3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -4,12 +4,12 @@ import ( "encoding/json" "fmt" "net/http" - "path" "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // GetMetadata uses the path provided to request information from the EC2 @@ -19,7 +19,7 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", - HTTPPath: path.Join("/", "meta-data", p), + HTTPPath: sdkuri.PathJoin("/meta-data", p), } output := &metadataOutput{} @@ -35,7 +35,7 @@ func (c *EC2Metadata) GetUserData() (string, error) { op := &request.Operation{ Name: "GetUserData", HTTPMethod: "GET", - HTTPPath: path.Join("/", "user-data"), + HTTPPath: "/user-data", } output := &metadataOutput{} @@ -56,7 +56,7 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { op := &request.Operation{ Name: "GetDynamicData", HTTPMethod: "GET", - HTTPPath: path.Join("/", "dynamic", p), + HTTPPath: sdkuri.PathJoin("/dynamic", p), } output := &metadataOutput{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 5b4379d..ef5f732 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -1,5 +1,10 @@ // Package ec2metadata provides the client for making API calls to the // EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environemnt variable is set to true, (case insensitive). package ec2metadata import ( @@ -7,17 +12,21 @@ import ( "errors" "io" "net/http" + "os" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/request" ) // ServiceName is the name of the service. const ServiceName = "ec2metadata" +const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" // A EC2Metadata is an EC2 Metadata service Client. type EC2Metadata struct { @@ -75,6 +84,21 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.Validate.Clear() svc.Handlers.Validate.PushBack(validateEndpointHandler) + // Disable the EC2 Metadata service if the environment variable is set. + // This shortcirctes the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + // Add additional options to the service config for _, option := range opts { option(svc.Client) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index d6d87e4..8e823be 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -24,6 +24,7 @@ const ( EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). EuWest1RegionID = "eu-west-1" // EU (Ireland). EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -33,7 +34,8 @@ const ( // AWS China partition's regions. const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). ) // AWS GovCloud (US) partition's regions. @@ -43,18 +45,26 @@ const ( // Service identifiers const ( + A4bServiceID = "a4b" // A4b. AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. ApigatewayServiceID = "apigateway" // Apigateway. ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. Appstream2ServiceID = "appstream2" // Appstream2. AthenaServiceID = "athena" // Athena. AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. BatchServiceID = "batch" // Batch. BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + Cloud9ServiceID = "cloud9" // Cloud9. ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. CloudformationServiceID = "cloudformation" // Cloudformation. CloudfrontServiceID = "cloudfront" // Cloudfront. CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. CloudsearchServiceID = "cloudsearch" // Cloudsearch. CloudtrailServiceID = "cloudtrail" // Cloudtrail. CodebuildServiceID = "codebuild" // Codebuild. @@ -65,9 +75,11 @@ const ( CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. CognitoIdpServiceID = "cognito-idp" // CognitoIdp. CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. ConfigServiceID = "config" // Config. CurServiceID = "cur" // Cur. DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. DevicefarmServiceID = "devicefarm" // Devicefarm. DirectconnectServiceID = "directconnect" // Directconnect. DiscoveryServiceID = "discovery" // Discovery. @@ -89,8 +101,12 @@ const ( EsServiceID = "es" // Es. EventsServiceID = "events" // Events. FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. GameliftServiceID = "gamelift" // Gamelift. GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. HealthServiceID = "health" // Health. IamServiceID = "iam" // Iam. ImportexportServiceID = "importexport" // Importexport. @@ -98,17 +114,24 @@ const ( IotServiceID = "iot" // Iot. KinesisServiceID = "kinesis" // Kinesis. KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. KmsServiceID = "kms" // Kms. LambdaServiceID = "lambda" // Lambda. LightsailServiceID = "lightsail" // Lightsail. LogsServiceID = "logs" // Logs. MachinelearningServiceID = "machinelearning" // Machinelearning. MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. ModelsLexServiceID = "models.lex" // ModelsLex. MonitoringServiceID = "monitoring" // Monitoring. MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. OpsworksServiceID = "opsworks" // Opsworks. OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. OrganizationsServiceID = "organizations" // Organizations. @@ -117,12 +140,18 @@ const ( RdsServiceID = "rds" // Rds. RedshiftServiceID = "redshift" // Redshift. RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. Route53ServiceID = "route53" // Route53. Route53domainsServiceID = "route53domains" // Route53domains. RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. S3ServiceID = "s3" // S3. + SagemakerServiceID = "sagemaker" // Sagemaker. SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. ShieldServiceID = "shield" // Shield. SmsServiceID = "sms" // Sms. SnowballServiceID = "snowball" // Snowball. @@ -136,9 +165,11 @@ const ( SupportServiceID = "support" // Support. SwfServiceID = "swf" // Swf. TaggingServiceID = "tagging" // Tagging. + TranslateServiceID = "translate" // Translate. WafServiceID = "waf" // Waf. WafRegionalServiceID = "waf-regional" // WafRegional. WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. WorkspacesServiceID = "workspaces" // Workspaces. XrayServiceID = "xray" // Xray. ) @@ -216,6 +247,9 @@ var awsPartition = partition{ "eu-west-2": region{ Description: "EU (London)", }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, "sa-east-1": region{ Description: "South America (Sao Paulo)", }, @@ -233,6 +267,12 @@ var awsPartition = partition{ }, }, Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "acm": service{ Endpoints: endpoints{ @@ -245,6 +285,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -252,6 +293,43 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, "apigateway": service{ Endpoints: endpoints{ @@ -260,9 +338,12 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -287,6 +368,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -311,9 +393,14 @@ var awsPartition = partition{ "athena": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "autoscaling": service{ @@ -330,6 +417,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -337,12 +425,38 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "autoscaling-plans", + }, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "budgets": service{ @@ -358,11 +472,35 @@ var awsPartition = partition{ }, }, }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "clouddirectory": service{ Endpoints: endpoints{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -382,6 +520,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -418,6 +557,27 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cloudsearch": service{ Endpoints: endpoints{ @@ -445,6 +605,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -456,25 +617,63 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "codecommit": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -490,6 +689,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -501,23 +701,37 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "codestar": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "cognito-identity": service{ @@ -526,6 +740,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, @@ -541,6 +756,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, @@ -556,6 +772,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, @@ -565,6 +782,17 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -577,6 +805,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -600,6 +829,21 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "devicefarm": service{ Endpoints: endpoints{ @@ -618,6 +862,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -643,6 +888,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -655,14 +901,17 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -680,6 +929,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -708,6 +958,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -730,12 +981,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -746,12 +1001,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -770,6 +1029,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -789,6 +1049,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -799,16 +1060,19 @@ var awsPartition = partition{ "elasticfilesystem": service{ Endpoints: endpoints{ + "ap-northeast-2": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, "elasticloadbalancing": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -820,6 +1084,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -844,6 +1109,7 @@ var awsPartition = partition{ }, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", @@ -896,6 +1162,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -915,6 +1182,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -924,6 +1192,24 @@ var awsPartition = partition{ }, "firehose": service{ + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "eu-west-1": endpoint{}, "us-east-1": endpoint{}, @@ -937,10 +1223,15 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -952,25 +1243,79 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "health": service{ + "glue": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, - "iam": service{ - PartitionEndpoint: "aws-global", + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, Endpoints: endpoints{ @@ -1004,8 +1349,11 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1018,6 +1366,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -1040,6 +1389,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1055,6 +1405,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "kms": service{ Endpoints: endpoints{ @@ -1067,6 +1427,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1082,9 +1443,12 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1095,12 +1459,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1118,6 +1485,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1138,6 +1506,65 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "metering.marketplace": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -1154,6 +1581,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1161,6 +1589,12 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "mgh": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, "mobileanalytics": service{ Endpoints: endpoints{ @@ -1174,7 +1608,9 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "monitoring": service{ @@ -1191,6 +1627,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1208,6 +1645,35 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "opsworks": service{ Endpoints: endpoints{ @@ -1216,9 +1682,11 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1229,9 +1697,15 @@ var awsPartition = partition{ "opsworks-cm": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "organizations": service{ @@ -1260,10 +1734,21 @@ var awsPartition = partition{ "polly": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "rds": service{ @@ -1278,6 +1763,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{dnsSuffix}", @@ -1299,6 +1785,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1309,9 +1796,31 @@ var awsPartition = partition{ "rekognition": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "route53": service{ @@ -1340,7 +1849,19 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "s3": service{ @@ -1355,26 +1876,27 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{ - Hostname: "s3-ap-northeast-1.amazonaws.com", + Hostname: "s3.ap-northeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{ - Hostname: "s3-ap-southeast-1.amazonaws.com", + Hostname: "s3.ap-southeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "ap-southeast-2": endpoint{ - Hostname: "s3-ap-southeast-2.amazonaws.com", + Hostname: "s3.ap-southeast-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{ - Hostname: "s3-eu-west-1.amazonaws.com", + Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -1383,7 +1905,7 @@ var awsPartition = partition{ }, }, "sa-east-1": endpoint{ - Hostname: "s3-sa-east-1.amazonaws.com", + Hostname: "s3.sa-east-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "us-east-1": endpoint{ @@ -1392,15 +1914,26 @@ var awsPartition = partition{ }, "us-east-2": endpoint{}, "us-west-1": endpoint{ - Hostname: "s3-us-west-1.amazonaws.com", + Hostname: "s3.us-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "us-west-2": endpoint{ - Hostname: "s3-us-west-2.amazonaws.com", + Hostname: "s3.us-west-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, }, }, + "sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "sdb": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -1419,21 +1952,104 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, "servicecatalog": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "shield": service{ IsRegionalized: boxedFalse, Defaults: endpoint{ @@ -1447,19 +2063,36 @@ var awsPartition = partition{ "sms": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "snowball": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1480,6 +2113,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1502,7 +2136,32 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "queue.{dnsSuffix}", }, @@ -1523,6 +2182,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1534,10 +2194,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1553,6 +2219,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1577,6 +2244,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -1615,6 +2283,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -1664,6 +2333,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1683,6 +2353,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1690,6 +2361,17 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "waf": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -1707,8 +2389,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1723,14 +2409,28 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "workspaces": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1781,30 +2481,63 @@ var awscnPartition = partition{ "cn-north-1": region{ Description: "China (Beijing)", }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, }, Services: services{ + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "cloudformation": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "cloudtrail": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "codedeploy": service{ + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: endpoints{ "cn-north-1": endpoint{}, }, @@ -1812,13 +2545,22 @@ var awscnPartition = partition{ "config": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "directconnect": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "dynamodb": service{ @@ -1826,7 +2568,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ec2": service{ @@ -1834,7 +2577,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ec2metadata": service{ @@ -1848,24 +2592,41 @@ var awscnPartition = partition{ }, }, }, + "ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticbeanstalk": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticloadbalancing": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticmapreduce": service{ @@ -1873,13 +2634,21 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, }, }, "events": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "glacier": service{ @@ -1887,7 +2656,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "iam": service{ @@ -1903,16 +2673,35 @@ var awscnPartition = partition{ }, }, }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "logs": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "monitoring": service{ @@ -1920,19 +2709,22 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "rds": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "redshift": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "s3": service{ @@ -1940,6 +2732,20 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + Endpoints: endpoints{ "cn-north-1": endpoint{}, }, @@ -1949,7 +2755,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "sqs": service{ @@ -1958,7 +2765,15 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "storagegateway": service{ @@ -1975,25 +2790,29 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "sts": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "swf": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "tagging": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, }, @@ -2025,6 +2844,18 @@ var awsusgovPartition = partition{ }, }, Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "autoscaling": service{ Endpoints: endpoints{ @@ -2045,6 +2876,16 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "cloudtrail": service{ Endpoints: endpoints{ @@ -2069,10 +2910,22 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "dms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "dynamodb": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "ec2": service{ @@ -2092,12 +2945,30 @@ var awsusgovPartition = partition{ }, }, }, + "ecr": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "elasticloadbalancing": service{ Endpoints: endpoints{ @@ -2114,6 +2985,12 @@ var awsusgovPartition = partition{ }, }, }, + "es": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "events": service{ Endpoints: endpoints{ @@ -2141,6 +3018,12 @@ var awsusgovPartition = partition{ }, }, }, + "inspector": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ @@ -2165,12 +3048,28 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "monitoring": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, + "polly": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ @@ -2183,6 +3082,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "s3": service{ Defaults: endpoint{ SignatureVersions: []string{"s3", "s3v4"}, @@ -2195,11 +3100,17 @@ var awsusgovPartition = partition{ }, }, "us-gov-west-1": endpoint{ - Hostname: "s3-us-gov-west-1.amazonaws.com", + Hostname: "s3.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, }, }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "snowball": service{ Endpoints: endpoints{ @@ -2223,6 +3134,18 @@ var awsusgovPartition = partition{ }, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "streams.dynamodb": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -2231,6 +3154,12 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "sts": service{ @@ -2245,5 +3174,19 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go index a0e9bc4..84316b9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -21,12 +21,12 @@ // partitions := resolver.(endpoints.EnumPartitions).Partitions() // // for _, p := range partitions { -// fmt.Println("Regions for", p.Name) +// fmt.Println("Regions for", p.ID()) // for id, _ := range p.Regions() { // fmt.Println("*", id) // } // -// fmt.Println("Services for", p.Name) +// fmt.Println("Services for", p.ID()) // for id, _ := range p.Services() { // fmt.Println("*", id) // } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 9c3eedb..e29c095 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -206,10 +206,11 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) ( // enumerating over the regions in a partition. func (p Partition) Regions() map[string]Region { rs := map[string]Region{} - for id := range p.p.Regions { + for id, r := range p.p.Regions { rs[id] = Region{ - id: id, - p: p.p, + id: id, + desc: r.Description, + p: p.p, } } @@ -240,6 +241,10 @@ type Region struct { // ID returns the region's identifier. func (r Region) ID() string { return r.id } +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + // ResolveEndpoint resolves an endpoint from the context of the region given // a service. See Partition.EndpointFor for usage and errors that can be returned. func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { @@ -284,10 +289,11 @@ func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (Resolve func (s Service) Regions() map[string]Region { rs := map[string]Region{} for id := range s.p.Services[s.id].Endpoints { - if _, ok := s.p.Regions[id]; ok { + if r, ok := s.p.Regions[id]; ok { rs[id] = Region{ - id: id, - p: s.p, + id: id, + desc: r.Description, + p: s.p, } } } @@ -347,6 +353,10 @@ type ResolvedEndpoint struct { // The service name that should be used for signing requests. SigningName string + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + // The signing method that should be used for signing requests. SigningMethod string } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index 13d968a..ff6f76d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -226,16 +226,20 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op if len(signingRegion) == 0 { signingRegion = region } + signingName := e.CredentialScope.Service + var signingNameDerived bool if len(signingName) == 0 { signingName = service + signingNameDerived = true } return ResolvedEndpoint{ - URL: u, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index db87188..6ed15b2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -26,14 +26,14 @@ func (l *LogLevelType) Value() LogLevelType { // Matches returns true if the v LogLevel is enabled by this LogLevel. Should be // used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nill, will default to LogOff comparison. +// LogLevel is nil, will default to LogOff comparison. func (l *LogLevelType) Matches(v LogLevelType) bool { c := l.Value() return c&v == v } // AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default // to LogOff comparison. func (l *LogLevelType) AtLeast(v LogLevelType) bool { c := l.Value() @@ -71,6 +71,12 @@ const ( // LogDebugWithRequestErrors states the SDK should log when service requests fail // to build, send, validate, or unmarshal. LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody ) // A Logger is a minimalistic interface for the SDK to log messages to. Should diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 802ac88..605a72d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -14,6 +14,7 @@ type Handlers struct { Send HandlerList ValidateResponse HandlerList Unmarshal HandlerList + UnmarshalStream HandlerList UnmarshalMeta HandlerList UnmarshalError HandlerList Retry HandlerList @@ -30,6 +31,7 @@ func (h *Handlers) Copy() Handlers { Send: h.Send.copy(), ValidateResponse: h.ValidateResponse.copy(), Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), UnmarshalError: h.UnmarshalError.copy(), UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), @@ -45,6 +47,7 @@ func (h *Handlers) Clear() { h.Send.Clear() h.Sign.Clear() h.Unmarshal.Clear() + h.UnmarshalStream.Clear() h.UnmarshalMeta.Clear() h.UnmarshalError.Clear() h.ValidateResponse.Clear() @@ -172,6 +175,21 @@ func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { return swapped } +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + // SetBackNamed will replace the named handler if it exists in the handler list. // If the handler does not exist the handler will be added to the end of the list. func (l *HandlerList) SetBackNamed(n NamedHandler) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index 02f07f4..b0c2ef4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -3,6 +3,8 @@ package request import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) // offsetReader is a thread-safe io.ReadCloser to prevent racing @@ -15,7 +17,7 @@ type offsetReader struct { func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { reader := &offsetReader{} - buf.Seek(offset, 0) + buf.Seek(offset, sdkio.SeekStart) reader.buf = buf return reader diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 299dc37..75f0fe0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" ) const ( @@ -24,10 +25,14 @@ const ( // ErrCodeRead is an error that is returned during HTTP reads. ErrCodeRead = "ReadError" - // ErrCodeResponseTimeout is the connection timeout error that is recieved + // ErrCodeResponseTimeout is the connection timeout error that is received // during body reads. ErrCodeResponseTimeout = "ResponseTimeout" + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + // CanceledErrorCode is the error code that will be returned by an // API request that was canceled. Requests given a aws.Context may // return this error when canceled. @@ -41,8 +46,8 @@ type Request struct { Handlers Handlers Retryer + AttemptTime time.Time Time time.Time - ExpireTime time.Duration Operation *Operation HTTPRequest *http.Request HTTPResponse *http.Response @@ -60,6 +65,11 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + context aws.Context built bool @@ -104,12 +114,15 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } + SanitizeHostForHeader(httpReq) + r := &Request{ Config: cfg, ClientInfo: clientInfo, Handlers: handlers.Copy(), Retryer: retryer, + AttemptTime: time.Now(), Time: time.Now(), ExpireTime: 0, Operation: operation, @@ -214,6 +227,9 @@ func (r *Request) SetContext(ctx aws.Context) { // WillRetry returns if the request's can be retried. func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } @@ -245,39 +261,70 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader + r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. r.ResetBody() } // Presign returns the request's signed URL. Error will be returned // if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. r.NotHoist = false + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + if r.Operation.BeforePresignFn != nil { - r = r.copy() - err := r.Operation.BeforePresignFn(r) - if err != nil { - return "", err + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err } } - r.Sign() - if r.Error != nil { - return "", r.Error + if err := r.Sign(); err != nil { + return "", nil, err } - return r.HTTPRequest.URL.String(), nil -} -// PresignRequest behaves just like presign, but hoists all headers and signs them. -// Also returns the signed hash back to the user -func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { - r.ExpireTime = expireTime - r.NotHoist = true - r.Sign() - if r.Error != nil { - return "", nil, r.Error - } return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } @@ -297,7 +344,7 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) { // Build will build the request's object so it can be signed and sent // to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run +// Any additional build Handlers set on this request will be run // in the order they were set. // // The request will only be built once. Multiple calls to build will have @@ -323,9 +370,9 @@ func (r *Request) Build() error { return r.Error } -// Sign will sign the request returning error if errors are encountered. +// Sign will sign the request, returning error if errors are encountered. // -// Send will build the request prior to signing. All Sign Handlers will +// Sign will build the request prior to signing. All Sign Handlers will // be executed in the order they were set. func (r *Request) Sign() error { r.Build() @@ -358,7 +405,7 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // of the SDK if they used that field. // // Related golang/go#18257 - l, err := computeBodyLength(r.Body) + l, err := aws.SeekerLen(r.Body) if err != nil { return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) } @@ -376,7 +423,8 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // Transfer-Encoding: chunked bodies for these methods. // // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker. + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. switch r.Operation.HTTPMethod { case "GET", "HEAD", "DELETE": body = NoBody @@ -388,49 +436,13 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { return body, nil } -// Attempts to compute the length of the body of the reader using the -// io.Seeker interface. If the value is not seekable because of being -// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. -// If no error occurs the length of the body will be returned. -func computeBodyLength(r io.ReadSeeker) (int64, error) { - seekable := true - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := r.(type) { - case aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - case *aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - } - if !seekable { - return -1, nil - } - - curOffset, err := r.Seek(0, 1) - if err != nil { - return 0, err - } - - endOffset, err := r.Seek(0, 2) - if err != nil { - return 0, err - } - - _, err = r.Seek(curOffset, 0) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - // GetBody will return an io.ReadSeeker of the Request's underlying // input body with a concurrency safe wrapper. func (r *Request) GetBody() io.ReadSeeker { return r.safeBody } -// Send will send the request returning error if errors are encountered. +// Send will send the request, returning error if errors are encountered. // // Send will sign the request prior to sending. All Send Handlers will // be executed in the order they were set. @@ -451,6 +463,7 @@ func (r *Request) Send() error { }() for { + r.AttemptTime = time.Now() if aws.BoolValue(r.Retryable) { if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", @@ -573,3 +586,72 @@ func shouldRetryCancel(r *Request) bool { errStr != "net/http: request canceled while waiting for connection") } + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go index 869b97a..e36e468 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -21,7 +21,7 @@ func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } var NoBody = noBody{} // ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior +// sets the HTTP Request body reference. When the body is read prior // to being sent in the HTTP request it will need to be rewound. // // ResetBody will automatically be called by the SDK's build handler, but if diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index c32fc69..7c6a800 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -11,7 +11,7 @@ import ( var NoBody = http.NoBody // ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior +// sets the HTTP Request body reference. When the body is read prior // to being sent in the HTTP request it will need to be rewound. // // ResetBody will automatically be called by the SDK's build handler, but if diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index 59de673..a633ed5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -35,8 +35,12 @@ type Pagination struct { // NewRequest should always be built from the same API operations. It is // undefined if different API operations are returned on subsequent calls. NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool started bool + prevTokens []interface{} nextTokens []interface{} err error @@ -49,7 +53,15 @@ type Pagination struct { // // Will always return true if Next has not been called yet. func (p *Pagination) HasNextPage() bool { - return !(p.started && len(p.nextTokens) == 0) + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage } // Err returns the error Pagination encountered when retrieving the next page. @@ -96,6 +108,7 @@ func (p *Pagination) Next() bool { return false } + p.prevTokens = p.nextTokens p.nextTokens = req.nextPageTokens() p.curPage = req.Data @@ -142,13 +155,28 @@ func (r *Request) nextPageTokens() []interface{} { tokens := []interface{}{} tokenAdded := false for _, outToken := range r.Operation.OutputTokens { - v, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(v) > 0 { - tokens = append(tokens, v[0]) - tokenAdded = true - } else { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } } + + tokenAdded = true + tokens = append(tokens, v) } if !tokenAdded { return nil diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 8d369c1..7d52702 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -8,7 +8,7 @@ import ( ) // Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer +// The default implementation used by most services is the client.DefaultRetryer // structure, which contains basic retry logic using exponential backoff. type Retryer interface { RetryRules(*Request) time.Duration @@ -70,8 +70,8 @@ func isCodeExpiredCreds(code string) bool { } var validParentCodes = map[string]struct{}{ - ErrCodeSerialization: struct{}{}, - ErrCodeRead: struct{}{}, + ErrCodeSerialization: {}, + ErrCodeRead: {}, } type temporaryError interface { @@ -97,7 +97,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { } if t, ok := err.(temporaryError); ok { - return t.Temporary() + return t.Temporary() || isErrConnectionReset(err) } return isErrConnectionReset(err) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go index 2520286..4012462 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -220,7 +220,7 @@ type ErrParamMinLen struct { func NewErrParamMinLen(field string, min int) *ErrParamMinLen { return &ErrParamMinLen{ errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, + code: ParamMinLenErrCode, field: field, msg: fmt.Sprintf("minimum field size of %v", min), }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go index 22d2f80..4601f88 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -79,8 +79,9 @@ type Waiter struct { MaxAttempts int Delay WaiterDelay - RequestOptions []Option - NewRequest func([]Option) (*Request, error) + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error } // ApplyOptions updates the waiter with the list of waiter options provided. @@ -195,8 +196,15 @@ func (w Waiter) WaitWithContext(ctx aws.Context) error { if sleepFn := req.Config.SleepDelay; sleepFn != nil { // Support SleepDelay for backwards compatibility and testing sleepFn(delay) - } else if err := aws.SleepWithContext(ctx, delay); err != nil { - return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index ea7b886..98d420f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -128,7 +128,7 @@ read. The Session will be created from configuration values from the shared credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). Credentials are the values the SDK should use for authenticating requests with -AWS Services. They arfrom a configuration file will need to include both +AWS Services. They are from a configuration file will need to include both aws_access_key_id and aws_secret_access_key must be provided together in the same file to be considered valid. The values will be ignored if not a complete group. aws_session_token is an optional field that can be provided if both of diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 7357e54..82e04d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -5,8 +5,12 @@ import ( "strconv" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" ) +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + // envConfig is a collection of environment values the SDK will read // setup config from. All environment values are optional. But some values // such as credentials require multiple values to be complete or the values @@ -76,7 +80,7 @@ type envConfig struct { SharedConfigFile string // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file - // that the SDK will use instead of the the system's root CA bundle. + // that the SDK will use instead of the system's root CA bundle. // Only use this if you want to configure the SDK to use a custom set // of CAs. // @@ -92,9 +96,23 @@ type envConfig struct { // // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle CustomCABundle string + + csmEnabled string + CSMEnabled bool + CSMPort string + CSMClientID string } var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } credAccessEnvKey = []string{ "AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY", @@ -153,11 +171,17 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + // Require logical grouping of credentials if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { cfg.Creds = credentials.Value{} } else { - cfg.Creds.ProviderName = "EnvConfigCredentials" + cfg.Creds.ProviderName = EnvProviderName } regionKeys := regionEnvKeys @@ -173,6 +197,13 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") return cfg diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 9f75d5a..51f3055 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -15,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" @@ -26,7 +27,7 @@ import ( // Sessions are safe to create service clients concurrently, but it is not safe // to mutate the Session concurrently. // -// The Session satisfies the service client's client.ClientConfigProvider. +// The Session satisfies the service client's client.ConfigProvider. type Session struct { Config *aws.Config Handlers request.Handlers @@ -58,7 +59,12 @@ func New(cfgs ...*aws.Config) *Session { envCfg := loadEnvConfig() if envCfg.EnableSharedConfig { - s, err := newSession(Options{}, envCfg, cfgs...) + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) if err != nil { // Old session.New expected all errors to be discovered when // a request is made, and would report the errors then. This @@ -76,10 +82,16 @@ func New(cfgs ...*aws.Config) *Session { r.Error = err }) } + return s } - return deprecatedNewSession(cfgs...) + s := deprecatedNewSession(cfgs...) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } + + return s } // NewSession returns a new Session created from SDK defaults, config files, @@ -243,13 +255,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg.EnableSharedConfig = true } - if len(envCfg.SharedCredentialsFile) == 0 { - envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() - } - if len(envCfg.SharedConfigFile) == 0 { - envCfg.SharedConfigFile = defaults.SharedConfigFilename() - } - // Only use AWS_CA_BUNDLE if session option is not provided. if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { f, err := os.Open(envCfg.CustomCABundle) @@ -302,10 +307,22 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { } initHandlers(s) - return s } +func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { + logger.Log("Enabling CSM") + if len(port) == 0 { + port = csm.DefaultPort + } + + r, err := csm.Start(clientID, "127.0.0.1:"+port) + if err != nil { + return + } + r.InjectHandlers(handlers) +} + func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() handlers := defaults.Handlers() @@ -345,6 +362,9 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } // Setup HTTP client with custom cert bundle if enabled if opts.CustomCABundle != nil { @@ -573,11 +593,12 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( } return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, }, err } @@ -597,10 +618,11 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf } return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 434ac87..8aa0681 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -45,7 +45,7 @@ // If signing a request intended for HTTP2 server, and you're using Go 1.6.2 // through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the // request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP // message. URL.Opaque generally will force Go to make requests with absolute URL. // URL.RawPath does not do this, but RawPath must be a valid escaping of Path // or url.EscapedPath will ignore the RawPath escaping. @@ -55,7 +55,6 @@ package v4 import ( - "bytes" "crypto/hmac" "crypto/sha256" "encoding/hex" @@ -72,6 +71,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" "github.com/aws/aws-sdk-go/private/protocol/rest" ) @@ -98,25 +98,25 @@ var ignoredHeaders = rules{ var requiredSignedHeaders = rules{ whitelist{ mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, @@ -135,6 +135,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, "X-Amz-Storage-Class": struct{}{}, "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, }, }, patterns{"X-Amz-Meta-"}, @@ -269,7 +270,7 @@ type signingCtx struct { // "X-Amz-Content-Sha256" header with a precomputed value. The signer will // only compute the hash if the request header value is empty. func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, signTime) + return v4.signWithBody(r, body, service, region, 0, false, signTime) } // Presign signs AWS v4 requests with the provided body, service name, region @@ -303,10 +304,10 @@ func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region strin // presigned request's signature you can set the "X-Amz-Content-Sha256" // HTTP header and that will be included in the request's signature. func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, signTime) + return v4.signWithBody(r, body, service, region, exp, true, signTime) } -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { currentTimeFn := v4.currentTimeFn if currentTimeFn == nil { currentTimeFn = time.Now @@ -318,7 +319,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi Query: r.URL.Query(), Time: signTime, ExpireTime: exp, - isPresign: exp != 0, + isPresign: isPresign, ServiceName: service, Region: region, DisableURIPathEscaping: v4.DisableURIPathEscaping, @@ -340,8 +341,11 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return http.Header{}, err } + ctx.sanitizeHostForHeader() ctx.assignAmzQueryValues() - ctx.build(v4.DisableHeaderHoisting) + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } // If the request is not presigned the body should be attached to it. This // prevents the confusion of wanting to send a signed request without @@ -364,6 +368,10 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return ctx.SignedHeaderVals, nil } +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + func (ctx *signingCtx) handlePresignRemoval() { if !ctx.isPresign { return @@ -402,7 +410,7 @@ var SignRequestHandler = request.NamedHandler{ } // SignSDKRequest signs an AWS request with the V4 signature. This -// request handler is bested used only with the SDK's built in service client's +// request handler should only be used with the SDK's built in service client's // API operation requests. // // This function should not be used on its on its own, but in conjunction with @@ -468,7 +476,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time } signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, signingTime, + name, region, req.ExpireTime, req.ExpireTime > 0, signingTime, ) if err != nil { req.Error = err @@ -499,10 +507,14 @@ func (v4 *Signer) logSigningInfo(ctx *signingCtx) { v4.Logger.Log(msg) } -func (ctx *signingCtx) build(disableHeaderHoisting bool) { +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { ctx.buildTime() // no depends ctx.buildCredentialString() // no depends + if err := ctx.buildBodyDigest(); err != nil { + return err + } + unsignedHeaders := ctx.Request.Header if ctx.isPresign { if !disableHeaderHoisting { @@ -514,7 +526,6 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } } - ctx.buildBodyDigest() ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) ctx.buildCanonicalString() // depends on canon headers / signed headers ctx.buildStringToSign() // depends on canon string @@ -530,6 +541,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) } + + return nil } func (ctx *signingCtx) buildTime() { @@ -604,14 +617,18 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { headerValues := make([]string, len(headers)) for i, k := range headers { if k == "host" { - headerValues[i] = "host:" + ctx.Request.URL.Host + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } } else { headerValues[i] = k + ":" + strings.Join(ctx.SignedHeaderVals[k], ",") } } - - ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") } func (ctx *signingCtx) buildCanonicalString() { @@ -652,21 +669,34 @@ func (ctx *signingCtx) buildSignature() { ctx.signature = hex.EncodeToString(signature) } -func (ctx *signingCtx) buildBodyDigest() { +func (ctx *signingCtx) buildBodyDigest() error { hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") if hash == "" { - if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign } else if ctx.Body == nil { hash = emptyStringSHA256 } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) } - if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + + if includeSHA256Header { ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) } } ctx.bodyDigest = hash + + return nil } // isRequestSigned returns if the request is currently signed or presigned @@ -706,56 +736,53 @@ func makeSha256(data []byte) []byte { func makeSha256Reader(reader io.ReadSeeker) []byte { hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) + start, _ := reader.Seek(0, sdkio.SeekCurrent) + defer reader.Seek(start, sdkio.SeekStart) io.Copy(hash, reader) return hash.Sum(nil) } -const doubleSpaces = " " - -var doubleSpaceBytes = []byte(doubleSpaces) +const doubleSpace = " " -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - // Trim leading and trailing spaces - trimmed := strings.TrimSpace(str) +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain muliple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } - idx := strings.Index(trimmed, doubleSpaces) - var buf []byte - for idx > -1 { - // Multiple adjacent spaces found - if buf == nil { - // first time create the buffer - buf = []byte(trimmed) - } + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] - stripToIdx := -1 - for j := idx + 1; j < len(buf); j++ { - if buf[j] != ' ' { - buf = append(buf[:idx+1], buf[j:]...) - stripToIdx = j - break - } - } + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } - if stripToIdx >= 0 { - idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) - if idx >= 0 { - idx += stripToIdx + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ } + spaces++ } else { - idx = -1 + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ } } - if buf != nil { - vals[i] = string(buf) - } else { - vals[i] = trimmed - } + vals[i] = string(buf[:m]) } - return vals } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 0e2d864..8b6f234 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -3,6 +3,8 @@ package aws import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) // ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should @@ -22,6 +24,22 @@ type ReaderSeekerCloser struct { r io.Reader } +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // @@ -56,6 +74,71 @@ func (r ReaderSeekerCloser) IsSeeker() bool { return ok } +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + // Close closes the ReaderSeekerCloser. // // If the ReaderSeekerCloser is not an io.Closer nothing will be done. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index bbf4c2f..c4d155c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.8.34" +const SDKVersion = "1.14.31" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 0000000..5aa9137 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 0000000..e5f0056 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 0000000..0c9802d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 0000000..38ea61a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go new file mode 100644 index 0000000..ecc7bf8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + (*hs) = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go new file mode 100644 index 0000000..4b972b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -0,0 +1,199 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + r io.Reader + logger aws.Logger +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the stream. +func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { + reader := d.r + if d.logger != nil { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.logger, debugMsgBuf, m, err) + }() + } + + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +// UseLogger specifies the Logger that that the decoder should use to log the +// message decode to. +func (d *Decoder) UseLogger(logger aws.Logger) { + d.logger = logger +} + +func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "Decode error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return uint8(v), err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return uint8(b[0]), err +} +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go new file mode 100644 index 0000000..150a609 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -0,0 +1,114 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash" + "hash/crc32" + "io" +) + +// Encoder provides EventStream message encoding. +type Encoder struct { + w io.Writer + + headersBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages to an io.Writer. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + headersBuf: bytes.NewBuffer(nil), + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(msg Message) error { + e.headersBuf.Reset() + + err := encodeHeaders(e.headersBuf, msg.Headers) + if err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(e.w, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err := io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err := hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + return binary.Write(e.w, binary.BigEndian, msgCRC) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +func encodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go new file mode 100644 index 0000000..5481ef3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go new file mode 100644 index 0000000..97937c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go @@ -0,0 +1,196 @@ +package eventstreamapi + +import ( + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Unmarshaler provides the interface for unmarshaling a EventStream +// message into a SDK type. +type Unmarshaler interface { + UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error +} + +// EventStream headers with specific meaning to async API functionality. +const ( + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) + +// EventReader provides reading from the EventStream of an reader. +type EventReader struct { + reader io.ReadCloser + decoder *eventstream.Decoder + + unmarshalerForEventType func(string) (Unmarshaler, error) + payloadUnmarshaler protocol.PayloadUnmarshaler + + payloadBuf []byte +} + +// NewEventReader returns a EventReader built from the reader and unmarshaler +// provided. Use ReadStream method to start reading from the EventStream. +func NewEventReader( + reader io.ReadCloser, + payloadUnmarshaler protocol.PayloadUnmarshaler, + unmarshalerForEventType func(string) (Unmarshaler, error), +) *EventReader { + return &EventReader{ + reader: reader, + decoder: eventstream.NewDecoder(reader), + payloadUnmarshaler: payloadUnmarshaler, + unmarshalerForEventType: unmarshalerForEventType, + payloadBuf: make([]byte, 10*1024), + } +} + +// UseLogger instructs the EventReader to use the logger and log level +// specified. +func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) { + if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) { + r.decoder.UseLogger(logger) + } +} + +// ReadEvent attempts to read a message from the EventStream and return the +// unmarshaled event value that the message is for. +// +// For EventStream API errors check if the returned error satisfies the +// awserr.Error interface to get the error's Code and Message components. +// +// EventUnmarshalers called with EventStream messages must take copies of the +// message's Payload. The payload will is reused between events read. +func (r *EventReader) ReadEvent() (event interface{}, err error) { + msg, err := r.decoder.Decode(r.payloadBuf) + if err != nil { + return nil, err + } + defer func() { + // Reclaim payload buffer for next message read. + r.payloadBuf = msg.Payload[0:0] + }() + + typ, err := GetHeaderString(msg, MessageTypeHeader) + if err != nil { + return nil, err + } + + switch typ { + case EventMessageType: + return r.unmarshalEventMessage(msg) + case ExceptionMessageType: + err = r.unmarshalEventException(msg) + return nil, err + case ErrorMessageType: + return nil, r.unmarshalErrorMessage(msg) + default: + return nil, fmt.Errorf("unknown eventstream message type, %v", typ) + } +} + +func (r *EventReader) unmarshalEventMessage( + msg eventstream.Message, +) (event interface{}, err error) { + eventType, err := GetHeaderString(msg, EventTypeHeader) + if err != nil { + return nil, err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return nil, err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return nil, err + } + + return ev, nil +} + +func (r *EventReader) unmarshalEventException( + msg eventstream.Message, +) (err error) { + eventType, err := GetHeaderString(msg, ExceptionTypeHeader) + if err != nil { + return err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return err + } + + var ok bool + err, ok = ev.(error) + if !ok { + err = messageError{ + code: "SerializationError", + msg: fmt.Sprintf( + "event stream exception %s mapped to non-error %T, %v", + eventType, ev, ev, + ), + } + } + + return err +} + +func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { + var msgErr messageError + + msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) + if err != nil { + return err + } + + msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) + if err != nil { + return err + } + + return msgErr +} + +// Close closes the EventReader's EventStream reader. +func (r *EventReader) Close() error { + return r.reader.Close() +} + +// GetHeaderString returns the value of the header as a string. If the header +// is not set or the value is not a string an error will be returned. +func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { + headerVal := msg.Headers.Get(headerName) + if headerVal == nil { + return "", fmt.Errorf("error header %s not present", headerName) + } + + v, ok := headerVal.Get().(string) + if !ok { + return "", fmt.Errorf("error header value is not a string, %T", headerVal) + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go new file mode 100644 index 0000000..5ea5a98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -0,0 +1,24 @@ +package eventstreamapi + +import "fmt" + +type messageError struct { + code string + msg string +} + +func (e messageError) Code() string { + return e.code +} + +func (e messageError) Message() string { + return e.msg +} + +func (e messageError) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.msg) +} + +func (e messageError) OrigErr() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go new file mode 100644 index 0000000..3b44dde --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -0,0 +1,166 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go new file mode 100644 index 0000000..e3fc076 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -0,0 +1,501 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go new file mode 100644 index 0000000..2dc012a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -0,0 +1,103 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := encodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 0000000..776d110 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 0000000..e21614a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "GET"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 18169f0..60e5b09 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -25,7 +25,7 @@ func Build(r *request.Request) { return } - if r.ExpireTime == 0 { + if !r.IsPresigned() { r.HTTPRequest.Method = "POST" r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") r.SetBufferBody([]byte(body.Encode())) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 524ca95..75866d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -121,6 +121,10 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string return nil } + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + // check for unflattened list member if !q.isEC2 && tag.Get("flattened") == "" { if listName := tag.Get("locationNameList"); listName == "" { @@ -229,7 +233,12 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) case time.Time: const ISO8601UTC = "2006-01-02T15:04:05Z" - v.Set(name, value.UTC().Format(ISO8601UTC)) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 7161835..b34f525 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -4,7 +4,6 @@ package rest import ( "bytes" "encoding/base64" - "encoding/json" "fmt" "io" "net/http" @@ -18,11 +17,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - // Whether the byte value can be sent without escaping in AWS URLs var noEscape [256]bool @@ -252,13 +249,12 @@ func EscapePath(path string, encodeSep bool) string { return buf.String() } -func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { v = reflect.Indirect(v) if !v.IsValid() { return "", errValueNotSet } - var str string switch value := v.Interface().(type) { case string: str = value @@ -271,19 +267,28 @@ func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { case float64: str = strconv.FormatFloat(value, 'f', -1, 64) case time.Time: - str = value.UTC().Format(RFC822) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) case aws.JSONValue: - b, err := json.Marshal(value) - if err != nil { - return "", err + if len(value) == 0 { + return "", errValueNotSet } + escaping := protocol.NoEscape if tag.Get("location") == "header" { - str = base64.StdEncoding.EncodeToString(b) - } else { - str = string(b) + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) } default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) return "", err } return str, nil diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 7a779ee..33fd53b 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -3,7 +3,6 @@ package rest import ( "bytes" "encoding/base64" - "encoding/json" "fmt" "io" "io/ioutil" @@ -16,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalHandler is a named request handler for unmarshaling rest protocol requests @@ -198,23 +198,21 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro } v.Set(reflect.ValueOf(&f)) case *time.Time: - t, err := time.Parse(RFC822, header) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) if err != nil { return err } v.Set(reflect.ValueOf(&t)) case aws.JSONValue: - b := []byte(header) - var err error + escaping := protocol.NoEscape if tag.Get("location") == "header" { - b, err = base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } + escaping = protocol.Base64Escape } - - m := aws.JSONValue{} - err = json.Unmarshal(b, &m) + m, err := protocol.DecodeJSONValue(header, escaping) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 0000000..b7ed6c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,72 @@ +package protocol + +import ( + "strconv" + "time" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822TimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601TimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), 0), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 7091b45..07764c8 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -13,9 +13,13 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) -// BuildXML will serialize params into an xml.Encoder. -// Error will be returned if the serialization of any of the params or nested values fails. +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { b := xmlBuilder{encoder: e, namespaces: map[string]string{}} root := NewXMLElement(xml.Name{}) if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { @@ -23,7 +27,7 @@ func BuildXML(params interface{}, e *xml.Encoder) error { } for _, c := range root.Children { for _, v := range c { - return StructToXML(e, v, false) + return StructToXML(e, v, sorted) } } return nil @@ -278,8 +282,12 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl case float32: str = strconv.FormatFloat(float64(converted), 'f', -1, 32) case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - str = converted.UTC().Format(ISO8601UTC) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", tag.Get("locationName"), value.Interface(), value.Type().Name()) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 8758462..ff1ef68 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" "time" + + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalXML deserializes an xml.Decoder into the container v. V @@ -52,9 +54,15 @@ func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { if t == "" { switch rtype.Kind() { case reflect.Struct: - t = "structure" + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } case reflect.Slice: - t = "list" + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } case reflect.Map: t = "map" } @@ -247,8 +255,12 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { } r.Set(reflect.ValueOf(&v)) case *time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - t, err := time.Parse(ISO8601UTC, node.Text) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 3e970b6..515ce15 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -29,6 +29,7 @@ func NewXMLElement(name xml.Name) *XMLNode { // AddChild adds child to the XMLNode. func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n if _, ok := n.Children[child.Name.Local]; !ok { n.Children[child.Name.Local] = []*XMLNode{} } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 52ac02c..0e999ca 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -3,14 +3,22 @@ package s3 import ( + "bytes" "fmt" "io" + "sync" + "sync/atomic" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" "github.com/aws/aws-sdk-go/private/protocol/restxml" ) @@ -18,19 +26,18 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // AbortMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the AbortMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See AbortMultipartUpload for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AbortMultipartUpload method directly -// instead. +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AbortMultipartUploadRequest method. // req, resp := client.AbortMultipartUploadRequest(params) @@ -40,7 +47,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { op := &request.Operation{ Name: opAbortMultipartUpload, @@ -76,7 +83,7 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // * ErrCodeNoSuchUpload "NoSuchUpload" // The specified multipart upload does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { req, out := c.AbortMultipartUploadRequest(input) return out, req.Send() @@ -102,19 +109,18 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // CompleteMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CompleteMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CompleteMultipartUpload for usage and error information. +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CompleteMultipartUpload method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CompleteMultipartUploadRequest method. // req, resp := client.CompleteMultipartUploadRequest(params) @@ -124,7 +130,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { op := &request.Operation{ Name: opCompleteMultipartUpload, @@ -151,7 +157,7 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation CompleteMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { req, out := c.CompleteMultipartUploadRequest(input) return out, req.Send() @@ -177,19 +183,18 @@ const opCopyObject = "CopyObject" // CopyObjectRequest generates a "aws/request.Request" representing the // client's request for the CopyObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See CopyObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CopyObject method directly -// instead. +// See CopyObject for more information on using the CopyObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CopyObjectRequest method. // req, resp := client.CopyObjectRequest(params) @@ -199,7 +204,7 @@ const opCopyObject = "CopyObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { op := &request.Operation{ Name: opCopyObject, @@ -232,7 +237,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // The source object of the COPY operation is not in the active tier and is // only stored in Amazon Glacier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { req, out := c.CopyObjectRequest(input) return out, req.Send() @@ -258,19 +263,18 @@ const opCreateBucket = "CreateBucket" // CreateBucketRequest generates a "aws/request.Request" representing the // client's request for the CreateBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateBucket for usage and error information. +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateBucket method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateBucketRequest method. // req, resp := client.CreateBucketRequest(params) @@ -280,7 +284,7 @@ const opCreateBucket = "CreateBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { op := &request.Operation{ Name: opCreateBucket, @@ -315,7 +319,7 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // // * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { req, out := c.CreateBucketRequest(input) return out, req.Send() @@ -341,19 +345,18 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // CreateMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CreateMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See CreateMultipartUpload for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateMultipartUpload method directly -// instead. +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateMultipartUploadRequest method. // req, resp := client.CreateMultipartUploadRequest(params) @@ -363,7 +366,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { op := &request.Operation{ Name: opCreateMultipartUpload, @@ -396,7 +399,7 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation CreateMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { req, out := c.CreateMultipartUploadRequest(input) return out, req.Send() @@ -422,19 +425,18 @@ const opDeleteBucket = "DeleteBucket" // DeleteBucketRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucket for usage and error information. +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucket method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketRequest method. // req, resp := client.DeleteBucketRequest(params) @@ -444,7 +446,7 @@ const opDeleteBucket = "DeleteBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { op := &request.Operation{ Name: opDeleteBucket, @@ -474,7 +476,7 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucket for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { req, out := c.DeleteBucketRequest(input) return out, req.Send() @@ -500,19 +502,18 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration // DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See DeleteBucketAnalyticsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly -// instead. +// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. // req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) @@ -522,7 +523,7 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketAnalyticsConfiguration, @@ -552,7 +553,7 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -578,19 +579,18 @@ const opDeleteBucketCors = "DeleteBucketCors" // DeleteBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketCors for usage and error information. +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketCors method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketCorsRequest method. // req, resp := client.DeleteBucketCorsRequest(params) @@ -600,7 +600,7 @@ const opDeleteBucketCors = "DeleteBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { op := &request.Operation{ Name: opDeleteBucketCors, @@ -629,7 +629,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { req, out := c.DeleteBucketCorsRequest(input) return out, req.Send() @@ -651,23 +651,98 @@ func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCor return out, req.Send() } +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { + op := &request.Operation{ + Name: opDeleteBucketEncryption, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &DeleteBucketEncryptionInput{} + } + + output = &DeleteBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// Deletes the server-side encryption configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() +} + +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" // DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketInventoryConfiguration for usage and error information. +// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketInventoryConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. // req, resp := client.DeleteBucketInventoryConfigurationRequest(params) @@ -677,7 +752,7 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketInventoryConfiguration, @@ -707,7 +782,7 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { req, out := c.DeleteBucketInventoryConfigurationRequest(input) return out, req.Send() @@ -733,19 +808,18 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See DeleteBucketLifecycle for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketLifecycle method directly -// instead. +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketLifecycleRequest method. // req, resp := client.DeleteBucketLifecycleRequest(params) @@ -755,7 +829,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { op := &request.Operation{ Name: opDeleteBucketLifecycle, @@ -784,7 +858,7 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { req, out := c.DeleteBucketLifecycleRequest(input) return out, req.Send() @@ -810,19 +884,18 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketMetricsConfiguration for usage and error information. +// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketMetricsConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. // req, resp := client.DeleteBucketMetricsConfigurationRequest(params) @@ -832,7 +905,7 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketMetricsConfiguration, @@ -862,7 +935,7 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { req, out := c.DeleteBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -888,19 +961,18 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy" // DeleteBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See DeleteBucketPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketPolicy method directly -// instead. +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketPolicyRequest method. // req, resp := client.DeleteBucketPolicyRequest(params) @@ -910,7 +982,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { op := &request.Operation{ Name: opDeleteBucketPolicy, @@ -939,7 +1011,7 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { req, out := c.DeleteBucketPolicyRequest(input) return out, req.Send() @@ -965,19 +1037,18 @@ const opDeleteBucketReplication = "DeleteBucketReplication" // DeleteBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketReplication for usage and error information. +// See DeleteBucketReplication for more information on using the DeleteBucketReplication +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketReplication method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketReplicationRequest method. // req, resp := client.DeleteBucketReplicationRequest(params) @@ -987,7 +1058,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { op := &request.Operation{ Name: opDeleteBucketReplication, @@ -1016,7 +1087,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { req, out := c.DeleteBucketReplicationRequest(input) return out, req.Send() @@ -1042,19 +1113,18 @@ const opDeleteBucketTagging = "DeleteBucketTagging" // DeleteBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See DeleteBucketTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketTagging method directly -// instead. +// See DeleteBucketTagging for more information on using the DeleteBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketTaggingRequest method. // req, resp := client.DeleteBucketTaggingRequest(params) @@ -1064,7 +1134,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { op := &request.Operation{ Name: opDeleteBucketTagging, @@ -1093,7 +1163,7 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { req, out := c.DeleteBucketTaggingRequest(input) return out, req.Send() @@ -1119,19 +1189,18 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite" // DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketWebsite for usage and error information. +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketWebsite method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketWebsiteRequest method. // req, resp := client.DeleteBucketWebsiteRequest(params) @@ -1141,7 +1210,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { op := &request.Operation{ Name: opDeleteBucketWebsite, @@ -1170,7 +1239,7 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { req, out := c.DeleteBucketWebsiteRequest(input) return out, req.Send() @@ -1196,19 +1265,18 @@ const opDeleteObject = "DeleteObject" // DeleteObjectRequest generates a "aws/request.Request" representing the // client's request for the DeleteObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See DeleteObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObject method directly -// instead. +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectRequest method. // req, resp := client.DeleteObjectRequest(params) @@ -1218,7 +1286,7 @@ const opDeleteObject = "DeleteObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { op := &request.Operation{ Name: opDeleteObject, @@ -1247,7 +1315,7 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { req, out := c.DeleteObjectRequest(input) return out, req.Send() @@ -1273,19 +1341,18 @@ const opDeleteObjectTagging = "DeleteObjectTagging" // DeleteObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteObjectTagging for usage and error information. +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjectTagging method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectTaggingRequest method. // req, resp := client.DeleteObjectTaggingRequest(params) @@ -1295,7 +1362,7 @@ const opDeleteObjectTagging = "DeleteObjectTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { op := &request.Operation{ Name: opDeleteObjectTagging, @@ -1322,7 +1389,7 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { req, out := c.DeleteObjectTaggingRequest(input) return out, req.Send() @@ -1348,19 +1415,18 @@ const opDeleteObjects = "DeleteObjects" // DeleteObjectsRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See DeleteObjects for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjects method directly -// instead. +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectsRequest method. // req, resp := client.DeleteObjectsRequest(params) @@ -1370,7 +1436,7 @@ const opDeleteObjects = "DeleteObjects" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { op := &request.Operation{ Name: opDeleteObjects, @@ -1398,7 +1464,7 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObjects for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { req, out := c.DeleteObjectsRequest(input) return out, req.Send() @@ -1424,19 +1490,18 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketAccelerateConfiguration for usage and error information. +// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAccelerateConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAccelerateConfigurationRequest method. // req, resp := client.GetBucketAccelerateConfigurationRequest(params) @@ -1446,7 +1511,7 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opGetBucketAccelerateConfiguration, @@ -1473,7 +1538,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { req, out := c.GetBucketAccelerateConfigurationRequest(input) return out, req.Send() @@ -1499,19 +1564,18 @@ const opGetBucketAcl = "GetBucketAcl" // GetBucketAclRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetBucketAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAcl method directly -// instead. +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAclRequest method. // req, resp := client.GetBucketAclRequest(params) @@ -1521,7 +1585,7 @@ const opGetBucketAcl = "GetBucketAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { op := &request.Operation{ Name: opGetBucketAcl, @@ -1548,7 +1612,7 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { req, out := c.GetBucketAclRequest(input) return out, req.Send() @@ -1574,19 +1638,18 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketAnalyticsConfiguration for usage and error information. +// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAnalyticsConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. // req, resp := client.GetBucketAnalyticsConfigurationRequest(params) @@ -1596,7 +1659,7 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opGetBucketAnalyticsConfiguration, @@ -1624,7 +1687,7 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { req, out := c.GetBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -1650,19 +1713,18 @@ const opGetBucketCors = "GetBucketCors" // GetBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the GetBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetBucketCors for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketCors method directly -// instead. +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketCorsRequest method. // req, resp := client.GetBucketCorsRequest(params) @@ -1672,7 +1734,7 @@ const opGetBucketCors = "GetBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { op := &request.Operation{ Name: opGetBucketCors, @@ -1699,7 +1761,7 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { req, out := c.GetBucketCorsRequest(input) return out, req.Send() @@ -1721,23 +1783,96 @@ func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput return out, req.Send() } +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { + op := &request.Operation{ + Name: opGetBucketEncryption, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &GetBucketEncryptionInput{} + } + + output = &GetBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// Returns the server-side encryption configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() +} + +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetBucketInventoryConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketInventoryConfiguration method directly -// instead. +// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketInventoryConfigurationRequest method. // req, resp := client.GetBucketInventoryConfigurationRequest(params) @@ -1747,7 +1882,7 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opGetBucketInventoryConfiguration, @@ -1775,7 +1910,7 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { req, out := c.GetBucketInventoryConfigurationRequest(input) return out, req.Send() @@ -1801,19 +1936,18 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // GetBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketLifecycle for usage and error information. +// See GetBucketLifecycle for more information on using the GetBucketLifecycle +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycle method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLifecycleRequest method. // req, resp := client.GetBucketLifecycleRequest(params) @@ -1823,7 +1957,7 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") @@ -1853,7 +1987,7 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { req, out := c.GetBucketLifecycleRequest(input) return out, req.Send() @@ -1879,19 +2013,18 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetBucketLifecycleConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycleConfiguration method directly -// instead. +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLifecycleConfigurationRequest method. // req, resp := client.GetBucketLifecycleConfigurationRequest(params) @@ -1901,7 +2034,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opGetBucketLifecycleConfiguration, @@ -1928,7 +2061,7 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { req, out := c.GetBucketLifecycleConfigurationRequest(input) return out, req.Send() @@ -1954,19 +2087,18 @@ const opGetBucketLocation = "GetBucketLocation" // GetBucketLocationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLocation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketLocation for usage and error information. +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLocation method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLocationRequest method. // req, resp := client.GetBucketLocationRequest(params) @@ -1976,7 +2108,7 @@ const opGetBucketLocation = "GetBucketLocation" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { op := &request.Operation{ Name: opGetBucketLocation, @@ -2003,7 +2135,7 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLocation for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { req, out := c.GetBucketLocationRequest(input) return out, req.Send() @@ -2029,19 +2161,18 @@ const opGetBucketLogging = "GetBucketLogging" // GetBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetBucketLogging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLogging method directly -// instead. +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLoggingRequest method. // req, resp := client.GetBucketLoggingRequest(params) @@ -2051,7 +2182,7 @@ const opGetBucketLogging = "GetBucketLogging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { op := &request.Operation{ Name: opGetBucketLogging, @@ -2079,7 +2210,7 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { req, out := c.GetBucketLoggingRequest(input) return out, req.Send() @@ -2105,19 +2236,18 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketMetricsConfiguration for usage and error information. +// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketMetricsConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketMetricsConfigurationRequest method. // req, resp := client.GetBucketMetricsConfigurationRequest(params) @@ -2127,7 +2257,7 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opGetBucketMetricsConfiguration, @@ -2155,7 +2285,7 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { req, out := c.GetBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -2181,19 +2311,18 @@ const opGetBucketNotification = "GetBucketNotification" // GetBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetBucketNotification for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotification method directly -// instead. +// See GetBucketNotification for more information on using the GetBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketNotificationRequest method. // req, resp := client.GetBucketNotificationRequest(params) @@ -2203,7 +2332,7 @@ const opGetBucketNotification = "GetBucketNotification" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") @@ -2233,7 +2362,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { req, out := c.GetBucketNotificationRequest(input) return out, req.Send() @@ -2259,19 +2388,18 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration // GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketNotificationConfiguration for usage and error information. +// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotificationConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketNotificationConfigurationRequest method. // req, resp := client.GetBucketNotificationConfigurationRequest(params) @@ -2281,7 +2409,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { op := &request.Operation{ Name: opGetBucketNotificationConfiguration, @@ -2308,7 +2436,7 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { req, out := c.GetBucketNotificationConfigurationRequest(input) return out, req.Send() @@ -2334,19 +2462,18 @@ const opGetBucketPolicy = "GetBucketPolicy" // GetBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetBucketPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketPolicy method directly -// instead. +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketPolicyRequest method. // req, resp := client.GetBucketPolicyRequest(params) @@ -2356,7 +2483,7 @@ const opGetBucketPolicy = "GetBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { op := &request.Operation{ Name: opGetBucketPolicy, @@ -2383,7 +2510,7 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { req, out := c.GetBucketPolicyRequest(input) return out, req.Send() @@ -2409,19 +2536,18 @@ const opGetBucketReplication = "GetBucketReplication" // GetBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketReplication for usage and error information. +// See GetBucketReplication for more information on using the GetBucketReplication +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketReplication method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketReplicationRequest method. // req, resp := client.GetBucketReplicationRequest(params) @@ -2431,7 +2557,7 @@ const opGetBucketReplication = "GetBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { op := &request.Operation{ Name: opGetBucketReplication, @@ -2458,7 +2584,7 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { req, out := c.GetBucketReplicationRequest(input) return out, req.Send() @@ -2484,19 +2610,18 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment" // GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the GetBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetBucketRequestPayment for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketRequestPayment method directly -// instead. +// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketRequestPaymentRequest method. // req, resp := client.GetBucketRequestPaymentRequest(params) @@ -2506,7 +2631,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { op := &request.Operation{ Name: opGetBucketRequestPayment, @@ -2533,7 +2658,7 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { req, out := c.GetBucketRequestPaymentRequest(input) return out, req.Send() @@ -2559,19 +2684,18 @@ const opGetBucketTagging = "GetBucketTagging" // GetBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketTagging for usage and error information. +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketTagging method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketTaggingRequest method. // req, resp := client.GetBucketTaggingRequest(params) @@ -2581,7 +2705,7 @@ const opGetBucketTagging = "GetBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { op := &request.Operation{ Name: opGetBucketTagging, @@ -2608,7 +2732,7 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { req, out := c.GetBucketTaggingRequest(input) return out, req.Send() @@ -2634,19 +2758,18 @@ const opGetBucketVersioning = "GetBucketVersioning" // GetBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the GetBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetBucketVersioning for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketVersioning method directly -// instead. +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketVersioningRequest method. // req, resp := client.GetBucketVersioningRequest(params) @@ -2656,7 +2779,7 @@ const opGetBucketVersioning = "GetBucketVersioning" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { op := &request.Operation{ Name: opGetBucketVersioning, @@ -2683,7 +2806,7 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { req, out := c.GetBucketVersioningRequest(input) return out, req.Send() @@ -2709,19 +2832,18 @@ const opGetBucketWebsite = "GetBucketWebsite" // GetBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the GetBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketWebsite for usage and error information. +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketWebsite method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketWebsiteRequest method. // req, resp := client.GetBucketWebsiteRequest(params) @@ -2731,7 +2853,7 @@ const opGetBucketWebsite = "GetBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { op := &request.Operation{ Name: opGetBucketWebsite, @@ -2758,7 +2880,7 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { req, out := c.GetBucketWebsiteRequest(input) return out, req.Send() @@ -2784,19 +2906,18 @@ const opGetObject = "GetObject" // GetObjectRequest generates a "aws/request.Request" representing the // client's request for the GetObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObject method directly -// instead. +// See GetObject for more information on using the GetObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectRequest method. // req, resp := client.GetObjectRequest(params) @@ -2806,7 +2927,7 @@ const opGetObject = "GetObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { op := &request.Operation{ Name: opGetObject, @@ -2838,7 +2959,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { req, out := c.GetObjectRequest(input) return out, req.Send() @@ -2864,19 +2985,18 @@ const opGetObjectAcl = "GetObjectAcl" // GetObjectAclRequest generates a "aws/request.Request" representing the // client's request for the GetObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetObjectAcl for usage and error information. +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectAcl method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectAclRequest method. // req, resp := client.GetObjectAclRequest(params) @@ -2886,7 +3006,7 @@ const opGetObjectAcl = "GetObjectAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { op := &request.Operation{ Name: opGetObjectAcl, @@ -2918,7 +3038,7 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { req, out := c.GetObjectAclRequest(input) return out, req.Send() @@ -2944,19 +3064,18 @@ const opGetObjectTagging = "GetObjectTagging" // GetObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the GetObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetObjectTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTagging method directly -// instead. +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectTaggingRequest method. // req, resp := client.GetObjectTaggingRequest(params) @@ -2966,7 +3085,7 @@ const opGetObjectTagging = "GetObjectTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { op := &request.Operation{ Name: opGetObjectTagging, @@ -2993,7 +3112,7 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { req, out := c.GetObjectTaggingRequest(input) return out, req.Send() @@ -3019,19 +3138,18 @@ const opGetObjectTorrent = "GetObjectTorrent" // GetObjectTorrentRequest generates a "aws/request.Request" representing the // client's request for the GetObjectTorrent operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetObjectTorrent for usage and error information. +// See GetObjectTorrent for more information on using the GetObjectTorrent +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTorrent method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectTorrentRequest method. // req, resp := client.GetObjectTorrentRequest(params) @@ -3041,7 +3159,7 @@ const opGetObjectTorrent = "GetObjectTorrent" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { op := &request.Operation{ Name: opGetObjectTorrent, @@ -3068,7 +3186,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetObjectTorrent for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { req, out := c.GetObjectTorrentRequest(input) return out, req.Send() @@ -3094,19 +3212,18 @@ const opHeadBucket = "HeadBucket" // HeadBucketRequest generates a "aws/request.Request" representing the // client's request for the HeadBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See HeadBucket for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadBucket method directly -// instead. +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the HeadBucketRequest method. // req, resp := client.HeadBucketRequest(params) @@ -3116,7 +3233,7 @@ const opHeadBucket = "HeadBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { op := &request.Operation{ Name: opHeadBucket, @@ -3151,7 +3268,7 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { req, out := c.HeadBucketRequest(input) return out, req.Send() @@ -3177,19 +3294,18 @@ const opHeadObject = "HeadObject" // HeadObjectRequest generates a "aws/request.Request" representing the // client's request for the HeadObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See HeadObject for usage and error information. +// See HeadObject for more information on using the HeadObject +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadObject method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the HeadObjectRequest method. // req, resp := client.HeadObjectRequest(params) @@ -3199,7 +3315,7 @@ const opHeadObject = "HeadObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { op := &request.Operation{ Name: opHeadObject, @@ -3231,7 +3347,7 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation HeadObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { req, out := c.HeadObjectRequest(input) return out, req.Send() @@ -3257,19 +3373,18 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" // ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketAnalyticsConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See ListBucketAnalyticsConfigurations for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketAnalyticsConfigurations method directly -// instead. +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. // req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) @@ -3279,7 +3394,7 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { op := &request.Operation{ Name: opListBucketAnalyticsConfigurations, @@ -3306,7 +3421,7 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketAnalyticsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { req, out := c.ListBucketAnalyticsConfigurationsRequest(input) return out, req.Send() @@ -3332,19 +3447,18 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketInventoryConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListBucketInventoryConfigurations for usage and error information. +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketInventoryConfigurations method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketInventoryConfigurationsRequest method. // req, resp := client.ListBucketInventoryConfigurationsRequest(params) @@ -3354,7 +3468,7 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { op := &request.Operation{ Name: opListBucketInventoryConfigurations, @@ -3381,7 +3495,7 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketInventoryConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { req, out := c.ListBucketInventoryConfigurationsRequest(input) return out, req.Send() @@ -3407,19 +3521,18 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketMetricsConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See ListBucketMetricsConfigurations for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketMetricsConfigurations method directly -// instead. +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketMetricsConfigurationsRequest method. // req, resp := client.ListBucketMetricsConfigurationsRequest(params) @@ -3429,7 +3542,7 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { op := &request.Operation{ Name: opListBucketMetricsConfigurations, @@ -3456,7 +3569,7 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketMetricsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { req, out := c.ListBucketMetricsConfigurationsRequest(input) return out, req.Send() @@ -3482,19 +3595,18 @@ const opListBuckets = "ListBuckets" // ListBucketsRequest generates a "aws/request.Request" representing the // client's request for the ListBuckets operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListBuckets for usage and error information. +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBuckets method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketsRequest method. // req, resp := client.ListBucketsRequest(params) @@ -3504,7 +3616,7 @@ const opListBuckets = "ListBuckets" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { op := &request.Operation{ Name: opListBuckets, @@ -3531,7 +3643,7 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBuckets for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { req, out := c.ListBucketsRequest(input) return out, req.Send() @@ -3557,19 +3669,18 @@ const opListMultipartUploads = "ListMultipartUploads" // ListMultipartUploadsRequest generates a "aws/request.Request" representing the // client's request for the ListMultipartUploads operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See ListMultipartUploads for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListMultipartUploads method directly -// instead. +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListMultipartUploadsRequest method. // req, resp := client.ListMultipartUploadsRequest(params) @@ -3579,7 +3690,7 @@ const opListMultipartUploads = "ListMultipartUploads" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { op := &request.Operation{ Name: opListMultipartUploads, @@ -3612,7 +3723,7 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListMultipartUploads for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { req, out := c.ListMultipartUploadsRequest(input) return out, req.Send() @@ -3688,19 +3799,18 @@ const opListObjectVersions = "ListObjectVersions" // ListObjectVersionsRequest generates a "aws/request.Request" representing the // client's request for the ListObjectVersions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListObjectVersions for usage and error information. +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectVersions method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectVersionsRequest method. // req, resp := client.ListObjectVersionsRequest(params) @@ -3710,7 +3820,7 @@ const opListObjectVersions = "ListObjectVersions" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { op := &request.Operation{ Name: opListObjectVersions, @@ -3743,7 +3853,7 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListObjectVersions for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { req, out := c.ListObjectVersionsRequest(input) return out, req.Send() @@ -3819,19 +3929,18 @@ const opListObjects = "ListObjects" // ListObjectsRequest generates a "aws/request.Request" representing the // client's request for the ListObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See ListObjects for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjects method directly -// instead. +// See ListObjects for more information on using the ListObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectsRequest method. // req, resp := client.ListObjectsRequest(params) @@ -3841,7 +3950,7 @@ const opListObjects = "ListObjects" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { op := &request.Operation{ Name: opListObjects, @@ -3881,7 +3990,7 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { req, out := c.ListObjectsRequest(input) return out, req.Send() @@ -3957,19 +4066,18 @@ const opListObjectsV2 = "ListObjectsV2" // ListObjectsV2Request generates a "aws/request.Request" representing the // client's request for the ListObjectsV2 operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListObjectsV2 for usage and error information. +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectsV2 method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectsV2Request method. // req, resp := client.ListObjectsV2Request(params) @@ -3979,7 +4087,7 @@ const opListObjectsV2 = "ListObjectsV2" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { op := &request.Operation{ Name: opListObjectsV2, @@ -4020,7 +4128,7 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { req, out := c.ListObjectsV2Request(input) return out, req.Send() @@ -4096,19 +4204,18 @@ const opListParts = "ListParts" // ListPartsRequest generates a "aws/request.Request" representing the // client's request for the ListParts operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See ListParts for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListParts method directly -// instead. +// See ListParts for more information on using the ListParts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListPartsRequest method. // req, resp := client.ListPartsRequest(params) @@ -4118,7 +4225,7 @@ const opListParts = "ListParts" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { op := &request.Operation{ Name: opListParts, @@ -4151,7 +4258,7 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListParts for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { req, out := c.ListPartsRequest(input) return out, req.Send() @@ -4227,19 +4334,18 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketAccelerateConfiguration for usage and error information. +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAccelerateConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAccelerateConfigurationRequest method. // req, resp := client.PutBucketAccelerateConfigurationRequest(params) @@ -4249,7 +4355,7 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opPutBucketAccelerateConfiguration, @@ -4278,7 +4384,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { req, out := c.PutBucketAccelerateConfigurationRequest(input) return out, req.Send() @@ -4304,19 +4410,18 @@ const opPutBucketAcl = "PutBucketAcl" // PutBucketAclRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See PutBucketAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAcl method directly -// instead. +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAclRequest method. // req, resp := client.PutBucketAclRequest(params) @@ -4326,7 +4431,7 @@ const opPutBucketAcl = "PutBucketAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { op := &request.Operation{ Name: opPutBucketAcl, @@ -4355,7 +4460,7 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { req, out := c.PutBucketAclRequest(input) return out, req.Send() @@ -4381,19 +4486,18 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketAnalyticsConfiguration for usage and error information. +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAnalyticsConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. // req, resp := client.PutBucketAnalyticsConfigurationRequest(params) @@ -4403,7 +4507,7 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opPutBucketAnalyticsConfiguration, @@ -4433,7 +4537,7 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { req, out := c.PutBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -4459,19 +4563,18 @@ const opPutBucketCors = "PutBucketCors" // PutBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the PutBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See PutBucketCors for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketCors method directly -// instead. +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketCorsRequest method. // req, resp := client.PutBucketCorsRequest(params) @@ -4481,7 +4584,7 @@ const opPutBucketCors = "PutBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { op := &request.Operation{ Name: opPutBucketCors, @@ -4510,7 +4613,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { req, out := c.PutBucketCorsRequest(input) return out, req.Send() @@ -4532,23 +4635,99 @@ func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput return out, req.Send() } +const opPutBucketEncryption = "PutBucketEncryption" + +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { + op := &request.Operation{ + Name: opPutBucketEncryption, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &PutBucketEncryptionInput{} + } + + output = &PutBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// Creates a new server-side encryption configuration (or replaces an existing +// one, if present). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + return out, req.Send() +} + +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See PutBucketInventoryConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketInventoryConfiguration method directly -// instead. +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketInventoryConfigurationRequest method. // req, resp := client.PutBucketInventoryConfigurationRequest(params) @@ -4558,7 +4737,7 @@ const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opPutBucketInventoryConfiguration, @@ -4588,7 +4767,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { req, out := c.PutBucketInventoryConfigurationRequest(input) return out, req.Send() @@ -4614,19 +4793,18 @@ const opPutBucketLifecycle = "PutBucketLifecycle" // PutBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketLifecycle for usage and error information. +// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycle method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketLifecycleRequest method. // req, resp := client.PutBucketLifecycleRequest(params) @@ -4636,7 +4814,7 @@ const opPutBucketLifecycle = "PutBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") @@ -4668,7 +4846,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { req, out := c.PutBucketLifecycleRequest(input) return out, req.Send() @@ -4694,19 +4872,18 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See PutBucketLifecycleConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycleConfiguration method directly -// instead. +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketLifecycleConfigurationRequest method. // req, resp := client.PutBucketLifecycleConfigurationRequest(params) @@ -4716,7 +4893,7 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opPutBucketLifecycleConfiguration, @@ -4746,7 +4923,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { req, out := c.PutBucketLifecycleConfigurationRequest(input) return out, req.Send() @@ -4772,19 +4949,18 @@ const opPutBucketLogging = "PutBucketLogging" // PutBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketLogging for usage and error information. +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLogging method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketLoggingRequest method. // req, resp := client.PutBucketLoggingRequest(params) @@ -4794,7 +4970,7 @@ const opPutBucketLogging = "PutBucketLogging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { op := &request.Operation{ Name: opPutBucketLogging, @@ -4825,7 +5001,7 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { req, out := c.PutBucketLoggingRequest(input) return out, req.Send() @@ -4851,19 +5027,18 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See PutBucketMetricsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketMetricsConfiguration method directly -// instead. +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketMetricsConfigurationRequest method. // req, resp := client.PutBucketMetricsConfigurationRequest(params) @@ -4873,7 +5048,7 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opPutBucketMetricsConfiguration, @@ -4903,7 +5078,7 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { req, out := c.PutBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -4929,19 +5104,18 @@ const opPutBucketNotification = "PutBucketNotification" // PutBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketNotification for usage and error information. +// See PutBucketNotification for more information on using the PutBucketNotification +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotification method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketNotificationRequest method. // req, resp := client.PutBucketNotificationRequest(params) @@ -4951,7 +5125,7 @@ const opPutBucketNotification = "PutBucketNotification" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") @@ -4983,7 +5157,7 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { req, out := c.PutBucketNotificationRequest(input) return out, req.Send() @@ -5009,19 +5183,18 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration // PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See PutBucketNotificationConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotificationConfiguration method directly -// instead. +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketNotificationConfigurationRequest method. // req, resp := client.PutBucketNotificationConfigurationRequest(params) @@ -5031,7 +5204,7 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { op := &request.Operation{ Name: opPutBucketNotificationConfiguration, @@ -5060,7 +5233,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { req, out := c.PutBucketNotificationConfigurationRequest(input) return out, req.Send() @@ -5086,19 +5259,18 @@ const opPutBucketPolicy = "PutBucketPolicy" // PutBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the PutBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketPolicy for usage and error information. +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketPolicy method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketPolicyRequest method. // req, resp := client.PutBucketPolicyRequest(params) @@ -5108,7 +5280,7 @@ const opPutBucketPolicy = "PutBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { op := &request.Operation{ Name: opPutBucketPolicy, @@ -5138,7 +5310,7 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { req, out := c.PutBucketPolicyRequest(input) return out, req.Send() @@ -5164,19 +5336,18 @@ const opPutBucketReplication = "PutBucketReplication" // PutBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See PutBucketReplication for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketReplication method directly -// instead. +// See PutBucketReplication for more information on using the PutBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketReplicationRequest method. // req, resp := client.PutBucketReplicationRequest(params) @@ -5186,7 +5357,7 @@ const opPutBucketReplication = "PutBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { op := &request.Operation{ Name: opPutBucketReplication, @@ -5216,7 +5387,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { req, out := c.PutBucketReplicationRequest(input) return out, req.Send() @@ -5242,19 +5413,18 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment" // PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the PutBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketRequestPayment for usage and error information. +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketRequestPayment method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketRequestPaymentRequest method. // req, resp := client.PutBucketRequestPaymentRequest(params) @@ -5264,7 +5434,7 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { op := &request.Operation{ Name: opPutBucketRequestPayment, @@ -5297,7 +5467,7 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { req, out := c.PutBucketRequestPaymentRequest(input) return out, req.Send() @@ -5323,19 +5493,18 @@ const opPutBucketTagging = "PutBucketTagging" // PutBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See PutBucketTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketTagging method directly -// instead. +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketTaggingRequest method. // req, resp := client.PutBucketTaggingRequest(params) @@ -5345,7 +5514,7 @@ const opPutBucketTagging = "PutBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { op := &request.Operation{ Name: opPutBucketTagging, @@ -5374,7 +5543,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { req, out := c.PutBucketTaggingRequest(input) return out, req.Send() @@ -5400,19 +5569,18 @@ const opPutBucketVersioning = "PutBucketVersioning" // PutBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the PutBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketVersioning for usage and error information. +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketVersioning method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketVersioningRequest method. // req, resp := client.PutBucketVersioningRequest(params) @@ -5422,7 +5590,7 @@ const opPutBucketVersioning = "PutBucketVersioning" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { op := &request.Operation{ Name: opPutBucketVersioning, @@ -5452,7 +5620,7 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { req, out := c.PutBucketVersioningRequest(input) return out, req.Send() @@ -5478,19 +5646,18 @@ const opPutBucketWebsite = "PutBucketWebsite" // PutBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the PutBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See PutBucketWebsite for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketWebsite method directly -// instead. +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketWebsiteRequest method. // req, resp := client.PutBucketWebsiteRequest(params) @@ -5500,7 +5667,7 @@ const opPutBucketWebsite = "PutBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { op := &request.Operation{ Name: opPutBucketWebsite, @@ -5529,7 +5696,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { req, out := c.PutBucketWebsiteRequest(input) return out, req.Send() @@ -5555,19 +5722,18 @@ const opPutObject = "PutObject" // PutObjectRequest generates a "aws/request.Request" representing the // client's request for the PutObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutObject for usage and error information. +// See PutObject for more information on using the PutObject +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObject method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutObjectRequest method. // req, resp := client.PutObjectRequest(params) @@ -5577,7 +5743,7 @@ const opPutObject = "PutObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { op := &request.Operation{ Name: opPutObject, @@ -5604,7 +5770,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { req, out := c.PutObjectRequest(input) return out, req.Send() @@ -5630,19 +5796,18 @@ const opPutObjectAcl = "PutObjectAcl" // PutObjectAclRequest generates a "aws/request.Request" representing the // client's request for the PutObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See PutObjectAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectAcl method directly -// instead. +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutObjectAclRequest method. // req, resp := client.PutObjectAclRequest(params) @@ -5652,7 +5817,7 @@ const opPutObjectAcl = "PutObjectAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { op := &request.Operation{ Name: opPutObjectAcl, @@ -5685,7 +5850,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { req, out := c.PutObjectAclRequest(input) return out, req.Send() @@ -5711,19 +5876,18 @@ const opPutObjectTagging = "PutObjectTagging" // PutObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the PutObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutObjectTagging for usage and error information. +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectTagging method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutObjectTaggingRequest method. // req, resp := client.PutObjectTaggingRequest(params) @@ -5733,7 +5897,7 @@ const opPutObjectTagging = "PutObjectTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { op := &request.Operation{ Name: opPutObjectTagging, @@ -5760,7 +5924,7 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { req, out := c.PutObjectTaggingRequest(input) return out, req.Send() @@ -5786,19 +5950,18 @@ const opRestoreObject = "RestoreObject" // RestoreObjectRequest generates a "aws/request.Request" representing the // client's request for the RestoreObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See RestoreObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RestoreObject method directly -// instead. +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RestoreObjectRequest method. // req, resp := client.RestoreObjectRequest(params) @@ -5808,7 +5971,7 @@ const opRestoreObject = "RestoreObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { op := &request.Operation{ Name: opRestoreObject, @@ -5840,7 +6003,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" // This operation is not allowed against this storage tier // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { req, out := c.RestoreObjectRequest(input) return out, req.Send() @@ -5862,123 +6025,203 @@ func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput return out, req.Send() } -const opUploadPart = "UploadPart" +const opSelectObjectContent = "SelectObjectContent" -// UploadPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadPart operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. // -// See UploadPart for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPart method directly -// instead. +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the UploadPartRequest method. -// req, resp := client.UploadPartRequest(params) +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { op := &request.Operation{ - Name: opUploadPart, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", } if input == nil { - input = &UploadPartInput{} + input = &SelectObjectContentInput{} } - output = &UploadPartOutput{} + output = &SelectObjectContentOutput{} req = c.newRequest(op, input, output) + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop) return } -// UploadPart API operation for Amazon Simple Storage Service. -// -// Uploads a part in a multipart upload. +// SelectObjectContent API operation for Amazon Simple Storage Service. // -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. +// This operation filters the contents of an Amazon S3 object based on a simple +// Structured Query Language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON or +// CSV) of the object. Amazon S3 uses this to parse object data into records, +// and returns only records that match the specified SQL expression. You must +// also specify the data serialization format for the response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation UploadPart for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) return out, req.Send() } -// UploadPartWithContext is the same as UploadPart with the addition of +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of // the ability to pass a context and additional request options. // -// See UploadPart for details on how to use this API operation. +// See SelectObjectContent for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUploadPartCopy = "UploadPartCopy" +const opUploadPart = "UploadPart" -// UploadPartCopyRequest generates a "aws/request.Request" representing the -// client's request for the UploadPartCopy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. // -// See UploadPartCopy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPartCopy method directly -// instead. +// See UploadPart for more information on using the UploadPart +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the UploadPartCopyRequest method. -// req, resp := client.UploadPartCopyRequest(params) +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy -func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { op := &request.Operation{ - Name: opUploadPartCopy, + Name: opUploadPart, HTTPMethod: "PUT", HTTPPath: "/{Bucket}/{Key+}", } if input == nil { - input = &UploadPartCopyInput{} + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} } output = &UploadPartCopyOutput{} @@ -5996,7 +6239,7 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation UploadPartCopy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { req, out := c.UploadPartCopyRequest(input) return out, req.Send() @@ -6020,7 +6263,6 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp // Specifies the days since the initiation of an Incomplete Multipart Upload // that Lifecycle will wait before permanently removing all parts of the upload. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` @@ -6045,7 +6287,6 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest type AbortMultipartUploadInput struct { _ struct{} `type:"structure"` @@ -6103,6 +6344,13 @@ func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInp return s } +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { s.Key = &v @@ -6121,7 +6369,6 @@ func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput type AbortMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -6146,7 +6393,6 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration type AccelerateConfiguration struct { _ struct{} `type:"structure"` @@ -6170,7 +6416,6 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy type AccessControlPolicy struct { _ struct{} `type:"structure"` @@ -6222,7 +6467,45 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator +// Container for information regarding the access control for replicas. +type AccessControlTranslation struct { + _ struct{} `type:"structure"` + + // The override value for the owner of the replica object. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + type AnalyticsAndOperator struct { _ struct{} `type:"structure"` @@ -6275,7 +6558,6 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -6350,7 +6632,6 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination type AnalyticsExportDestination struct { _ struct{} `type:"structure"` @@ -6394,7 +6675,6 @@ func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3Bucket return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter type AnalyticsFilter struct { _ struct{} `type:"structure"` @@ -6457,7 +6737,6 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` @@ -6512,6 +6791,13 @@ func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDes return s } +func (s *AnalyticsS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetBucketAccountId sets the BucketAccountId field's value. func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { s.BucketAccountId = &v @@ -6530,12 +6816,11 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket type Bucket struct { _ struct{} `type:"structure"` // Date the bucket was created. - CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + CreationDate *time.Time `type:"timestamp"` // The name of the bucket. Name *string `type:"string"` @@ -6563,7 +6848,6 @@ func (s *Bucket) SetName(v string) *Bucket { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration type BucketLifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -6610,10 +6894,12 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus type BucketLoggingStatus struct { _ struct{} `type:"structure"` + // Container for logging information. Presence of this element indicates that + // logging is enabled. Parameters TargetBucket and TargetPrefix are required + // in this case. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -6648,7 +6934,6 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration type CORSConfiguration struct { _ struct{} `type:"structure"` @@ -6695,7 +6980,6 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule type CORSRule struct { _ struct{} `type:"structure"` @@ -6779,7 +7063,149 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration +// Describes how a CSV-formatted input object is formatted. +type CSVInput struct { + _ struct{} `type:"structure"` + + // Specifies that CSV field values may contain quoted record delimiters and + // such records should be allowed. Default value is FALSE. Setting this value + // to TRUE may lower performance. + AllowQuotedRecordDelimiter *bool `type:"boolean"` + + // Single character used to indicate a row should be ignored when present at + // the start of a row. + Comments *string `type:"string"` + + // Value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values: None, Ignore, Use. + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // Value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // Single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVInput) GoString() string { + return s.String() +} + +// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. +func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { + s.AllowQuotedRecordDelimiter = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how CSV-formatted results are formatted. +type CSVOutput struct { + _ struct{} `type:"structure"` + + // Value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // Value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // Single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether or not all output fields should be quoted. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // Value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` @@ -6837,7 +7263,6 @@ func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionC return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix type CommonPrefix struct { _ struct{} `type:"structure"` @@ -6860,7 +7285,6 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest type CompleteMultipartUploadInput struct { _ struct{} `type:"structure" payload:"MultipartUpload"` @@ -6870,7 +7294,7 @@ type CompleteMultipartUploadInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. @@ -6920,6 +7344,13 @@ func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUpl return s } +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { s.Key = &v @@ -6944,7 +7375,6 @@ func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartU return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput type CompleteMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -6993,6 +7423,13 @@ func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUp return s } +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetETag sets the ETag field's value. func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { s.ETag = &v @@ -7041,7 +7478,6 @@ func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipar return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload type CompletedMultipartUpload struct { _ struct{} `type:"structure"` @@ -7064,7 +7500,6 @@ func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultip return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart type CompletedPart struct { _ struct{} `type:"structure"` @@ -7098,7 +7533,6 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition type Condition struct { _ struct{} `type:"structure"` @@ -7141,7 +7575,32 @@ func (s *Condition) SetKeyPrefixEquals(v string) *Condition { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest +type ContinuationEvent struct { + _ struct{} `locationName:"ContinuationEvent" type:"structure"` +} + +// String returns the string representation +func (s ContinuationEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinuationEvent) GoString() string { + return s.String() +} + +// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + type CopyObjectInput struct { _ struct{} `type:"structure"` @@ -7178,14 +7637,14 @@ type CopyObjectInput struct { CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // Specifies the algorithm to use when decrypting the source object (e.g., AES256). CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` @@ -7201,7 +7660,7 @@ type CopyObjectInput struct { CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` @@ -7318,6 +7777,13 @@ func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { return s } +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCacheControl sets the CacheControl field's value. func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { s.CacheControl = &v @@ -7390,6 +7856,13 @@ func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput return s } +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + // SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { s.CopySourceSSECustomerKeyMD5 = &v @@ -7462,6 +7935,13 @@ func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { return s } +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { s.SSECustomerKeyMD5 = &v @@ -7504,7 +7984,6 @@ func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` @@ -7605,13 +8084,12 @@ func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult type CopyObjectResult struct { _ struct{} `type:"structure"` ETag *string `type:"string"` - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` } // String returns the string representation @@ -7636,7 +8114,6 @@ func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult type CopyPartResult struct { _ struct{} `type:"structure"` @@ -7644,7 +8121,7 @@ type CopyPartResult struct { ETag *string `type:"string"` // Date and time at which the object was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` } // String returns the string representation @@ -7669,7 +8146,6 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration type CreateBucketConfiguration struct { _ struct{} `type:"structure"` @@ -7694,7 +8170,6 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest type CreateBucketInput struct { _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` @@ -7704,7 +8179,7 @@ type CreateBucketInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Allows grantee the read, write, read ACP, and write ACP permissions on the // bucket. @@ -7758,6 +8233,13 @@ func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { return s } +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { s.CreateBucketConfiguration = v @@ -7794,7 +8276,6 @@ func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput type CreateBucketOutput struct { _ struct{} `type:"structure"` @@ -7817,7 +8298,6 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest type CreateMultipartUploadInput struct { _ struct{} `type:"structure"` @@ -7845,7 +8325,7 @@ type CreateMultipartUploadInput struct { ContentType *string `location:"header" locationName:"Content-Type" type:"string"` // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` @@ -7899,6 +8379,9 @@ type CreateMultipartUploadInput struct { // The type of storage to use for the object. Defaults to 'STANDARD'. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. @@ -7946,6 +8429,13 @@ func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadI return s } +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCacheControl sets the CacheControl field's value. func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { s.CacheControl = &v @@ -8036,6 +8526,13 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipar return s } +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { s.SSECustomerKeyMD5 = &v @@ -8060,18 +8557,23 @@ func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartU return s } +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + // SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { s.WebsiteRedirectLocation = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput type CreateMultipartUploadOutput struct { _ struct{} `type:"structure"` // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` // Id of the lifecycle rule that makes a multipart upload eligible for abort // operation. @@ -8137,6 +8639,13 @@ func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUpload return s } +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { s.Key = &v @@ -8179,7 +8688,6 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete type Delete struct { _ struct{} `type:"structure"` @@ -8236,7 +8744,6 @@ func (s *Delete) SetQuiet(v bool) *Delete { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest type DeleteBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure"` @@ -8283,13 +8790,19 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBuc return s } +func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput type DeleteBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -8304,7 +8817,6 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest type DeleteBucketCorsInput struct { _ struct{} `type:"structure"` @@ -8341,7 +8853,13 @@ func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -8356,27 +8874,29 @@ func (s DeleteBucketCorsOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest -type DeleteBucketInput struct { +type DeleteBucketEncryptionInput struct { _ struct{} `type:"structure"` + // The name of the bucket containing the server-side encryption configuration + // to delete. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBucketInput) String() string { +func (s DeleteBucketEncryptionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketInput) GoString() string { +func (s DeleteBucketEncryptionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } @@ -8388,21 +8908,84 @@ func (s *DeleteBucketInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest -type DeleteBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure"` +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // The name of the bucket containing the inventory configuration to delete. - // - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} - // The ID used to identify the inventory configuration. +// String returns the string representation +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -8440,13 +9023,19 @@ func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBuc return s } +func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -8461,7 +9050,6 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest type DeleteBucketLifecycleInput struct { _ struct{} `type:"structure"` @@ -8498,7 +9086,13 @@ func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -8513,7 +9107,6 @@ func (s DeleteBucketLifecycleOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest type DeleteBucketMetricsConfigurationInput struct { _ struct{} `type:"structure"` @@ -8560,13 +9153,19 @@ func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucke return s } +func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput type DeleteBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -8581,7 +9180,6 @@ func (s DeleteBucketMetricsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput type DeleteBucketOutput struct { _ struct{} `type:"structure"` } @@ -8596,7 +9194,6 @@ func (s DeleteBucketOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest type DeleteBucketPolicyInput struct { _ struct{} `type:"structure"` @@ -8633,7 +9230,13 @@ func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput +func (s *DeleteBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -8648,7 +9251,6 @@ func (s DeleteBucketPolicyOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest type DeleteBucketReplicationInput struct { _ struct{} `type:"structure"` @@ -8685,7 +9287,13 @@ func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicat return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput +func (s *DeleteBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -8700,7 +9308,6 @@ func (s DeleteBucketReplicationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest type DeleteBucketTaggingInput struct { _ struct{} `type:"structure"` @@ -8737,7 +9344,13 @@ func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput +func (s *DeleteBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -8752,7 +9365,6 @@ func (s DeleteBucketTaggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest type DeleteBucketWebsiteInput struct { _ struct{} `type:"structure"` @@ -8789,7 +9401,13 @@ func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -8804,7 +9422,6 @@ func (s DeleteBucketWebsiteOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry type DeleteMarkerEntry struct { _ struct{} `type:"structure"` @@ -8816,7 +9433,7 @@ type DeleteMarkerEntry struct { Key *string `min:"1" type:"string"` // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` Owner *Owner `type:"structure"` @@ -8864,7 +9481,6 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest type DeleteObjectInput struct { _ struct{} `type:"structure"` @@ -8923,6 +9539,13 @@ func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { return s } +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { s.Key = &v @@ -8947,7 +9570,6 @@ func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput type DeleteObjectOutput struct { _ struct{} `type:"structure"` @@ -8992,7 +9614,6 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest type DeleteObjectTaggingInput struct { _ struct{} `type:"structure"` @@ -9041,6 +9662,13 @@ func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput return s } +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { s.Key = &v @@ -9053,7 +9681,6 @@ func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingIn return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput type DeleteObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -9077,7 +9704,6 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest type DeleteObjectsInput struct { _ struct{} `type:"structure" payload:"Delete"` @@ -9085,7 +9711,7 @@ type DeleteObjectsInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Delete is a required field - Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The concatenation of the authentication device's serial number, a space, // and the value that is displayed on your authentication device. @@ -9135,6 +9761,13 @@ func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { return s } +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetDelete sets the Delete field's value. func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { s.Delete = v @@ -9153,7 +9786,6 @@ func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput type DeleteObjectsOutput struct { _ struct{} `type:"structure"` @@ -9194,7 +9826,6 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject type DeletedObject struct { _ struct{} `type:"structure"` @@ -9241,16 +9872,26 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination +// Container for replication destination information. type Destination struct { _ struct{} `type:"structure"` + // Container for information regarding the access control for replicas. + AccessControlTranslation *AccessControlTranslation `type:"structure"` + + // Account ID of the destination bucket. Currently this is only being verified + // if Access Control Translation is enabled + Account *string `type:"string"` + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store // replicas of the object identified by the rule. // // Bucket is a required field Bucket *string `type:"string" required:"true"` + // Container for information regarding encryption based configuration for replicas. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + // The class of storage used to store the object. StorageClass *string `type:"string" enum:"StorageClass"` } @@ -9271,6 +9912,11 @@ func (s *Destination) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9278,19 +9924,154 @@ func (s *Destination) Validate() error { return nil } +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v + return s +} + // SetBucket sets the Bucket field's value. func (s *Destination) SetBucket(v string) *Destination { s.Bucket = &v return s } +func (s *Destination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v + return s +} + // SetStorageClass sets the StorageClass field's value. func (s *Destination) SetStorageClass(v string) *Destination { s.StorageClass = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error +// Describes the server-side encryption that will be applied to the restore +// results. +type Encryption struct { + _ struct{} `type:"structure"` + + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (e.g., AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` + + // If the encryption type is aws:kms, this optional value specifies the AWS + // KMS key ID to use for encryption of job results. + KMSKeyId *string `type:"string"` +} + +// String returns the string representation +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Encryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v + return s +} + +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v + return s +} + +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v + return s +} + +// Container for information regarding encryption based configuration for replicas. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The id of the KMS key used to encrypt the replica object. + ReplicaKmsKeyID *string `type:"string"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v + return s +} + +type EndEvent struct { + _ struct{} `locationName:"EndEvent" type:"structure"` +} + +// String returns the string representation +func (s EndEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EndEvent) GoString() string { + return s.String() +} + +// The EndEvent is and event in the SelectObjectContentEventStream group of events. +func (s *EndEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *EndEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + type Error struct { _ struct{} `type:"structure"` @@ -9337,7 +10118,6 @@ func (s *Error) SetVersionId(v string) *Error { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument type ErrorDocument struct { _ struct{} `type:"structure"` @@ -9380,7 +10160,6 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument { } // Container for key value pair that defines the criteria for the filter rule. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule type FilterRule struct { _ struct{} `type:"structure"` @@ -9388,6 +10167,7 @@ type FilterRule struct { // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. // Overlapping prefixes and suffixes are not supported. For more information, // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string" enum:"FilterRuleName"` Value *string `type:"string"` @@ -9415,7 +10195,6 @@ func (s *FilterRule) SetValue(v string) *FilterRule { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest type GetBucketAccelerateConfigurationInput struct { _ struct{} `type:"structure"` @@ -9454,7 +10233,13 @@ func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAc return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput +func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` @@ -9478,7 +10263,6 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest type GetBucketAclInput struct { _ struct{} `type:"structure"` @@ -9515,7 +10299,13 @@ func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketAclOutput struct { _ struct{} `type:"structure"` @@ -9547,7 +10337,6 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest type GetBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure"` @@ -9594,13 +10383,19 @@ func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAna return s } +func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput type GetBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` @@ -9624,7 +10419,6 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest type GetBucketCorsInput struct { _ struct{} `type:"structure"` @@ -9661,7 +10455,13 @@ func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketCorsOutput struct { _ struct{} `type:"structure"` @@ -9684,7 +10484,76 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest +type GetBucketEncryptionInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + type GetBucketInventoryConfigurationInput struct { _ struct{} `type:"structure"` @@ -9731,13 +10600,19 @@ func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInv return s } +func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput type GetBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -9761,7 +10636,6 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest type GetBucketLifecycleConfigurationInput struct { _ struct{} `type:"structure"` @@ -9798,7 +10672,13 @@ func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLif return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` @@ -9821,7 +10701,6 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest type GetBucketLifecycleInput struct { _ struct{} `type:"structure"` @@ -9858,7 +10737,13 @@ func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput +func (s *GetBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` @@ -9881,7 +10766,6 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest type GetBucketLocationInput struct { _ struct{} `type:"structure"` @@ -9918,9 +10802,15 @@ func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput -type GetBucketLocationOutput struct { - _ struct{} `type:"structure"` +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -9941,7 +10831,6 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest type GetBucketLoggingInput struct { _ struct{} `type:"structure"` @@ -9978,10 +10867,19 @@ func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` + // Container for logging information. Presence of this element indicates that + // logging is enabled. Parameters TargetBucket and TargetPrefix are required + // in this case. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -10001,7 +10899,6 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest type GetBucketMetricsConfigurationInput struct { _ struct{} `type:"structure"` @@ -10048,13 +10945,19 @@ func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetri return s } +func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput type GetBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -10078,7 +10981,6 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest type GetBucketNotificationConfigurationRequest struct { _ struct{} `type:"structure"` @@ -10117,7 +11019,13 @@ func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBuck return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest +func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketPolicyInput struct { _ struct{} `type:"structure"` @@ -10154,7 +11062,13 @@ func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput +func (s *GetBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketPolicyOutput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -10178,7 +11092,6 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest type GetBucketReplicationInput struct { _ struct{} `type:"structure"` @@ -10215,7 +11128,13 @@ func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput +func (s *GetBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketReplicationOutput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -10240,7 +11159,6 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest type GetBucketRequestPaymentInput struct { _ struct{} `type:"structure"` @@ -10277,7 +11195,13 @@ func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaym return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput +func (s *GetBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` @@ -10301,7 +11225,6 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest type GetBucketTaggingInput struct { _ struct{} `type:"structure"` @@ -10338,7 +11261,13 @@ func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput +func (s *GetBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` @@ -10362,7 +11291,6 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest type GetBucketVersioningInput struct { _ struct{} `type:"structure"` @@ -10399,7 +11327,13 @@ func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` @@ -10434,7 +11368,6 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest type GetBucketWebsiteInput struct { _ struct{} `type:"structure"` @@ -10471,7 +11404,13 @@ func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` @@ -10518,7 +11457,6 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest type GetObjectAclInput struct { _ struct{} `type:"structure"` @@ -10573,6 +11511,13 @@ func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { return s } +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { s.Key = &v @@ -10591,7 +11536,6 @@ func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput type GetObjectAclOutput struct { _ struct{} `type:"structure"` @@ -10633,7 +11577,6 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest type GetObjectInput struct { _ struct{} `type:"structure"` @@ -10646,7 +11589,7 @@ type GetObjectInput struct { // Return the object only if it has been modified since the specified time, // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one // specified, otherwise return a 304 (not modified). @@ -10654,7 +11597,7 @@ type GetObjectInput struct { // Return the object only if it has not been modified since the specified time, // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -10690,7 +11633,7 @@ type GetObjectInput struct { ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` // Specifies the algorithm to use to when encrypting the object (e.g., AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` @@ -10746,6 +11689,13 @@ func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { return s } +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetIfMatch sets the IfMatch field's value. func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { s.IfMatch = &v @@ -10842,6 +11792,13 @@ func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { return s } +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { s.SSECustomerKeyMD5 = &v @@ -10854,7 +11811,6 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -10904,7 +11860,7 @@ type GetObjectOutput struct { Expires *string `location:"header" locationName:"Expires" type:"string"` // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` @@ -11138,7 +12094,6 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest type GetObjectTaggingInput struct { _ struct{} `type:"structure"` @@ -11186,6 +12141,13 @@ func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { return s } +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { s.Key = &v @@ -11198,7 +12160,6 @@ func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput type GetObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -11230,7 +12191,6 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest type GetObjectTorrentInput struct { _ struct{} `type:"structure"` @@ -11282,6 +12242,13 @@ func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { return s } +func (s *GetObjectTorrentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { s.Key = &v @@ -11294,7 +12261,6 @@ func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput type GetObjectTorrentOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -11327,7 +12293,6 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters type GlacierJobParameters struct { _ struct{} `type:"structure"` @@ -11366,11 +12331,10 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant type Grant struct { _ struct{} `type:"structure"` - Grantee *Grantee `type:"structure"` + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Specifies the permission given to the grantee. Permission *string `type:"string" enum:"Permission"` @@ -11413,7 +12377,6 @@ func (s *Grant) SetPermission(v string) *Grant { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee type Grantee struct { _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` @@ -11488,7 +12451,6 @@ func (s *Grantee) SetURI(v string) *Grantee { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest type HeadBucketInput struct { _ struct{} `type:"structure"` @@ -11525,7 +12487,13 @@ func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type HeadBucketOutput struct { _ struct{} `type:"structure"` } @@ -11540,7 +12508,6 @@ func (s HeadBucketOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest type HeadObjectInput struct { _ struct{} `type:"structure"` @@ -11553,7 +12520,7 @@ type HeadObjectInput struct { // Return the object only if it has been modified since the specified time, // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one // specified, otherwise return a 304 (not modified). @@ -11561,7 +12528,7 @@ type HeadObjectInput struct { // Return the object only if it has not been modified since the specified time, // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -11636,6 +12603,13 @@ func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { return s } +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetIfMatch sets the IfMatch field's value. func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { s.IfMatch = &v @@ -11696,6 +12670,13 @@ func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { return s } +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { s.SSECustomerKeyMD5 = &v @@ -11708,7 +12689,6 @@ func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput type HeadObjectOutput struct { _ struct{} `type:"structure"` @@ -11752,7 +12732,7 @@ type HeadObjectOutput struct { Expires *string `location:"header" locationName:"Expires" type:"string"` // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` @@ -11965,7 +12945,6 @@ func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutpu return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument type IndexDocument struct { _ struct{} `type:"structure"` @@ -12007,7 +12986,6 @@ func (s *IndexDocument) SetSuffix(v string) *IndexDocument { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator type Initiator struct { _ struct{} `type:"structure"` @@ -12041,7 +13019,49 @@ func (s *Initiator) SetID(v string) *Initiator { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration +// Describes the serialization format of the object. +type InputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType *string `type:"string" enum:"CompressionType"` + + // Specifies JSON as object's input serialization format. + JSON *JSONInput `type:"structure"` +} + +// String returns the string representation +func (s InputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { + s.CompressionType = &v + return s +} + +// SetJSON sets the JSON field's value. +func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { + s.JSON = v + return s +} + type InventoryConfiguration struct { _ struct{} `type:"structure"` @@ -12170,7 +13190,6 @@ func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryCon return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination type InventoryDestination struct { _ struct{} `type:"structure"` @@ -12215,7 +13234,55 @@ func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestin return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delievered Inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delievered Inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v + return s +} + +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v + return s +} + type InventoryFilter struct { _ struct{} `type:"structure"` @@ -12254,7 +13321,6 @@ func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination type InventoryS3BucketDestination struct { _ struct{} `type:"structure"` @@ -12267,6 +13333,10 @@ type InventoryS3BucketDestination struct { // Bucket is a required field Bucket *string `type:"string" required:"true"` + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` + // Specifies the output format of the inventory results. // // Format is a required field @@ -12295,6 +13365,11 @@ func (s *InventoryS3BucketDestination) Validate() error { if s.Format == nil { invalidParams.Add(request.NewErrParamRequired("Format")) } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -12314,6 +13389,19 @@ func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDes return s } +func (s *InventoryS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v + return s +} + // SetFormat sets the Format field's value. func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { s.Format = &v @@ -12326,7 +13414,6 @@ func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDes return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule type InventorySchedule struct { _ struct{} `type:"structure"` @@ -12365,8 +13452,53 @@ func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { return s } +type JSONInput struct { + _ struct{} `type:"structure"` + + // The type of JSON. Valid values: Document, Lines. + Type *string `type:"string" enum:"JSONType"` +} + +// String returns the string representation +func (s JSONInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JSONInput) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *JSONInput) SetType(v string) *JSONInput { + s.Type = &v + return s +} + +type JSONOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual records in the output. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s JSONOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JSONOutput) GoString() string { + return s.String() +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { + s.RecordDelimiter = &v + return s +} + // Container for object key name prefix and suffix filtering rules. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter type KeyFilter struct { _ struct{} `type:"structure"` @@ -12392,7 +13524,6 @@ func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { } // Container for specifying the AWS Lambda notification configuration. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration type LambdaFunctionConfiguration struct { _ struct{} `type:"structure"` @@ -12401,6 +13532,7 @@ type LambdaFunctionConfiguration struct { // Container for object key name filtering rules. For information about key // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` // Optional unique identifier for configurations in a notification configuration. @@ -12464,7 +13596,6 @@ func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunc return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration type LifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -12511,7 +13642,6 @@ func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration type LifecycleExpiration struct { _ struct{} `type:"structure"` @@ -12558,7 +13688,6 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule type LifecycleRule struct { _ struct{} `type:"structure"` @@ -12682,7 +13811,6 @@ func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { // This is used in a Lifecycle Rule Filter to apply a logical AND to two or // more predicates. The Lifecycle Rule will apply to any object matching all // of the predicates configured inside the And operator. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` @@ -12737,7 +13865,6 @@ func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { // The Filter is used to identify objects that a Lifecycle Rule applies to. // A Filter must have exactly one of Prefix, Tag, or And specified. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter type LifecycleRuleFilter struct { _ struct{} `type:"structure"` @@ -12801,7 +13928,6 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest type ListBucketAnalyticsConfigurationsInput struct { _ struct{} `type:"structure"` @@ -12844,13 +13970,19 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucket return s } +func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { s.ContinuationToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput type ListBucketAnalyticsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -12905,7 +14037,6 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest type ListBucketInventoryConfigurationsInput struct { _ struct{} `type:"structure"` @@ -12950,13 +14081,19 @@ func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucket return s } +func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { s.ContinuationToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput type ListBucketInventoryConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -13011,7 +14148,6 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest type ListBucketMetricsConfigurationsInput struct { _ struct{} `type:"structure"` @@ -13056,13 +14192,19 @@ func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMe return s } +func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { s.ContinuationToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput type ListBucketMetricsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -13119,7 +14261,6 @@ func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v strin return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput type ListBucketsInput struct { _ struct{} `type:"structure"` } @@ -13134,7 +14275,6 @@ func (s ListBucketsInput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput type ListBucketsOutput struct { _ struct{} `type:"structure"` @@ -13165,7 +14305,6 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest type ListMultipartUploadsInput struct { _ struct{} `type:"structure"` @@ -13231,6 +14370,13 @@ func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInp return s } +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetDelimiter sets the Delimiter field's value. func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { s.Delimiter = &v @@ -13267,7 +14413,6 @@ func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` @@ -13328,6 +14473,13 @@ func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOu return s } +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCommonPrefixes sets the CommonPrefixes field's value. func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { s.CommonPrefixes = v @@ -13394,7 +14546,6 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest type ListObjectVersionsInput struct { _ struct{} `type:"structure"` @@ -13455,6 +14606,13 @@ func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { return s } +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetDelimiter sets the Delimiter field's value. func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { s.Delimiter = &v @@ -13491,7 +14649,6 @@ func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersio return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` @@ -13619,7 +14776,6 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest type ListObjectsInput struct { _ struct{} `type:"structure"` @@ -13682,6 +14838,13 @@ func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { return s } +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetDelimiter sets the Delimiter field's value. func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { s.Delimiter = &v @@ -13718,7 +14881,6 @@ func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput type ListObjectsOutput struct { _ struct{} `type:"structure"` @@ -13823,7 +14985,6 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request type ListObjectsV2Input struct { _ struct{} `type:"structure"` @@ -13894,6 +15055,13 @@ func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { return s } +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { s.ContinuationToken = &v @@ -13942,7 +15110,6 @@ func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output type ListObjectsV2Output struct { _ struct{} `type:"structure"` @@ -14076,7 +15243,6 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest type ListPartsInput struct { _ struct{} `type:"structure"` @@ -14143,6 +15309,13 @@ func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { return s } +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *ListPartsInput) SetKey(v string) *ListPartsInput { s.Key = &v @@ -14173,12 +15346,11 @@ func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput type ListPartsOutput struct { _ struct{} `type:"structure"` // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` // Id of the lifecycle rule that makes a multipart upload eligible for abort // operation. @@ -14250,6 +15422,13 @@ func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { return s } +func (s *ListPartsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetInitiator sets the Initiator field's value. func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { s.Initiator = v @@ -14316,48 +15495,79 @@ func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled -type LoggingEnabled struct { +// Describes an S3 location that will receive the results of the restore request. +type Location struct { _ struct{} `type:"structure"` - // Specifies the bucket where you want Amazon S3 to store server access logs. - // You can have your logs delivered to any bucket that you own, including the - // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case you should - // choose a different TargetPrefix for each source bucket so that the delivered - // log files can be distinguished by key. - TargetBucket *string `type:"string"` + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` - TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + // The name of the bucket where the restore results will be placed. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` - // This element lets you specify a prefix for the keys that the log files will - // be stored under. - TargetPrefix *string `type:"string"` + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Describes the server-side encryption that will be applied to the restore + // results. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` } // String returns the string representation -func (s LoggingEnabled) String() string { +func (s Location) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LoggingEnabled) GoString() string { +func (s Location) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *LoggingEnabled) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} - if s.TargetGrants != nil { - for i, v := range s.TargetGrants { +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { if v == nil { continue } if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) } } } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -14365,28 +15575,167 @@ func (s *LoggingEnabled) Validate() error { return nil } -// SetTargetBucket sets the TargetBucket field's value. -func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { - s.TargetBucket = &v +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v return s } -// SetTargetGrants sets the TargetGrants field's value. -func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { - s.TargetGrants = v +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v return s } -// SetTargetPrefix sets the TargetPrefix field's value. -func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { - s.TargetPrefix = &v +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator -type MetricsAndOperator struct { - _ struct{} `type:"structure"` - +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + +// Container for logging information. Presence of this element indicates that +// logging is enabled. Parameters TargetBucket and TargetPrefix are required +// in this case. +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} + +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} + +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} + +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + _ struct{} `type:"structure"` + + Name *string `type:"string"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s MetadataEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetadataEntry) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} + +type MetricsAndOperator struct { + _ struct{} `type:"structure"` + // The prefix used when evaluating an AND predicate. Prefix *string `type:"string"` @@ -14436,7 +15785,6 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration type MetricsConfiguration struct { _ struct{} `type:"structure"` @@ -14491,7 +15839,6 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter type MetricsFilter struct { _ struct{} `type:"structure"` @@ -14555,12 +15902,11 @@ func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload type MultipartUpload struct { _ struct{} `type:"structure"` // Date and time at which the multipart upload was initiated. - Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + Initiated *time.Time `type:"timestamp"` // Identifies who initiated the multipart upload. Initiator *Initiator `type:"structure"` @@ -14628,14 +15974,14 @@ func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { // configuration action on a bucket that has versioning enabled (or suspended) // to request that Amazon S3 delete noncurrent object versions at a specific // period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration type NoncurrentVersionExpiration struct { _ struct{} `type:"structure"` // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` } @@ -14656,18 +16002,19 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers } // Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA or GLACIER storage class. If your bucket is -// versioning-enabled (or versioning is suspended), you can set this action -// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA -// or GLACIER storage class at a specific period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition +// transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your +// bucket is versioning-enabled (or versioning is suspended), you can set this +// action to request that Amazon S3 transition noncurrent object versions to +// the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period +// in the object's lifetime. type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` // The class of storage used to store the object. @@ -14698,7 +16045,6 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi // Container for specifying the notification configuration of the bucket. If // this element is empty, notifications are turned off on the bucket. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration type NotificationConfiguration struct { _ struct{} `type:"structure"` @@ -14777,7 +16123,6 @@ func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfigurati return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated type NotificationConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -14818,7 +16163,7 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf // Container for object key name filtering rules. For information about key // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter +// in the Amazon Simple Storage Service Developer Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` @@ -14842,7 +16187,6 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object type Object struct { _ struct{} `type:"structure"` @@ -14850,7 +16194,7 @@ type Object struct { Key *string `min:"1" type:"string"` - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` Owner *Owner `type:"structure"` @@ -14906,7 +16250,6 @@ func (s *Object) SetStorageClass(v string) *Object { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier type ObjectIdentifier struct { _ struct{} `type:"structure"` @@ -14957,7 +16300,6 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion type ObjectVersion struct { _ struct{} `type:"structure"` @@ -14971,7 +16313,7 @@ type ObjectVersion struct { Key *string `min:"1" type:"string"` // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` Owner *Owner `type:"structure"` @@ -15043,7 +16385,78 @@ func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + _ struct{} `type:"structure"` + + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` +} + +// String returns the string representation +func (s OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v + return s +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput `type:"structure"` +} + +// String returns the string representation +func (s OutputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} + +// SetJSON sets the JSON field's value. +func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { + s.JSON = v + return s +} + type Owner struct { _ struct{} `type:"structure"` @@ -15074,7 +16487,6 @@ func (s *Owner) SetID(v string) *Owner { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part type Part struct { _ struct{} `type:"structure"` @@ -15082,7 +16494,7 @@ type Part struct { ETag *string `type:"string"` // Date and time at which the part was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` // Part number identifying the part. This is a positive integer between 1 and // 10,000. @@ -15126,14 +16538,94 @@ func (s *Part) SetSize(v int64) *Part { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest +type Progress struct { + _ struct{} `type:"structure"` + + // Current number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // Current number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // Current number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation +func (s Progress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Progress) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Progress) SetBytesProcessed(v int64) *Progress { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Progress) SetBytesReturned(v int64) *Progress { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Progress) SetBytesScanned(v int64) *Progress { + s.BytesScanned = &v + return s +} + +type ProgressEvent struct { + _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` + + // The Progress event details. + Details *Progress `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s ProgressEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProgressEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { + s.Details = v + return s +} + +// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ProgressEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + type PutBucketAccelerateConfigurationInput struct { _ struct{} `type:"structure" payload:"AccelerateConfiguration"` // Specifies the Accelerate Configuration you want to set for the bucket. // // AccelerateConfiguration is a required field - AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Name of the bucket for which the accelerate configuration is set. // @@ -15179,7 +16671,13 @@ func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAc return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput +func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -15194,14 +16692,13 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest type PutBucketAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -15269,6 +16766,13 @@ func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { return s } +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetGrantFullControl sets the GrantFullControl field's value. func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { s.GrantFullControl = &v @@ -15299,7 +16803,6 @@ func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput type PutBucketAclOutput struct { _ struct{} `type:"structure"` } @@ -15314,14 +16817,13 @@ func (s PutBucketAclOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest type PutBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` // The configuration and any analyses for the analytics filter. // // AnalyticsConfiguration is a required field - AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"` + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The name of the bucket to which an analytics configuration is stored. // @@ -15380,13 +16882,19 @@ func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAna return s } +func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -15401,7 +16909,6 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest type PutBucketCorsInput struct { _ struct{} `type:"structure" payload:"CORSConfiguration"` @@ -15409,7 +16916,7 @@ type PutBucketCorsInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // CORSConfiguration is a required field - CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15449,13 +16956,19 @@ func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { return s } +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCORSConfiguration sets the CORSConfiguration field's value. func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { s.CORSConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -15470,51 +16983,44 @@ func (s PutBucketCorsOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest -type PutBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` +type PutBucketEncryptionInput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` - // The name of the bucket where the inventory configuration will be stored. + // The name of the bucket for which the server-side encryption configuration + // is set. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - - // Specifies the inventory configuration. + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. // - // InventoryConfiguration is a required field - InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"` + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutBucketInventoryConfigurationInput) String() string { +func (s PutBucketEncryptionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketInventoryConfigurationInput) GoString() string { +func (s PutBucketEncryptionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketInventoryConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.InventoryConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) } - if s.InventoryConfiguration != nil { - if err := s.InventoryConfiguration.Validate(); err != nil { - invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) } } @@ -15525,46 +17031,137 @@ func (s *PutBucketInventoryConfigurationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { s.Bucket = &v return s } -// SetId sets the Id field's value. -func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { - s.Id = &v - return s +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetInventoryConfiguration sets the InventoryConfiguration field's value. -func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { - s.InventoryConfiguration = v +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput -type PutBucketInventoryConfigurationOutput struct { +type PutBucketEncryptionOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketInventoryConfigurationOutput) String() string { +func (s PutBucketEncryptionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketInventoryConfigurationOutput) GoString() string { +func (s PutBucketEncryptionOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest -type PutBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` +type PutBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v + return s +} + +type PutBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15601,13 +17198,19 @@ func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLif return s } +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetLifecycleConfiguration sets the LifecycleConfiguration field's value. func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { s.LifecycleConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -15622,14 +17225,13 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest type PutBucketLifecycleInput struct { _ struct{} `type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15666,13 +17268,19 @@ func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { return s } +func (s *PutBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetLifecycleConfiguration sets the LifecycleConfiguration field's value. func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { s.LifecycleConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -15687,7 +17295,6 @@ func (s PutBucketLifecycleOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest type PutBucketLoggingInput struct { _ struct{} `type:"structure" payload:"BucketLoggingStatus"` @@ -15695,7 +17302,7 @@ type PutBucketLoggingInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // BucketLoggingStatus is a required field - BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15735,13 +17342,19 @@ func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { return s } +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetBucketLoggingStatus sets the BucketLoggingStatus field's value. func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { s.BucketLoggingStatus = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } @@ -15756,7 +17369,6 @@ func (s PutBucketLoggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest type PutBucketMetricsConfigurationInput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -15773,7 +17385,7 @@ type PutBucketMetricsConfigurationInput struct { // Specifies the metrics configuration. // // MetricsConfiguration is a required field - MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"` + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15816,6 +17428,13 @@ func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetri return s } +func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { s.Id = &v @@ -15828,7 +17447,6 @@ func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsC return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -15843,7 +17461,6 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest type PutBucketNotificationConfigurationInput struct { _ struct{} `type:"structure" payload:"NotificationConfiguration"` @@ -15854,7 +17471,7 @@ type PutBucketNotificationConfigurationInput struct { // this element is empty, notifications are turned off on the bucket. // // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15894,13 +17511,19 @@ func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucket return s } +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetNotificationConfiguration sets the NotificationConfiguration field's value. func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { s.NotificationConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -15915,7 +17538,6 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest type PutBucketNotificationInput struct { _ struct{} `type:"structure" payload:"NotificationConfiguration"` @@ -15923,7 +17545,7 @@ type PutBucketNotificationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15958,13 +17580,19 @@ func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationI return s } +func (s *PutBucketNotificationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetNotificationConfiguration sets the NotificationConfiguration field's value. func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { s.NotificationConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } @@ -15979,13 +17607,16 @@ func (s PutBucketNotificationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest type PutBucketPolicyInput struct { _ struct{} `type:"structure" payload:"Policy"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` + // The bucket policy as a JSON document. // // Policy is a required field @@ -16024,13 +17655,25 @@ func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { return s } +func (s *PutBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { s.Policy = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -16045,7 +17688,6 @@ func (s PutBucketPolicyOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest type PutBucketReplicationInput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -16056,7 +17698,7 @@ type PutBucketReplicationInput struct { // replication configuration size can be up to 2 MB. // // ReplicationConfiguration is a required field - ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -16096,13 +17738,19 @@ func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInp return s } +func (s *PutBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetReplicationConfiguration sets the ReplicationConfiguration field's value. func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { s.ReplicationConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -16117,7 +17765,6 @@ func (s PutBucketReplicationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest type PutBucketRequestPaymentInput struct { _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` @@ -16125,7 +17772,7 @@ type PutBucketRequestPaymentInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // RequestPaymentConfiguration is a required field - RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -16165,13 +17812,19 @@ func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaym return s } +func (s *PutBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { s.RequestPaymentConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } @@ -16186,7 +17839,6 @@ func (s PutBucketRequestPaymentOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest type PutBucketTaggingInput struct { _ struct{} `type:"structure" payload:"Tagging"` @@ -16194,7 +17846,7 @@ type PutBucketTaggingInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -16234,13 +17886,19 @@ func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { return s } +func (s *PutBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetTagging sets the Tagging field's value. func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { s.Tagging = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -16255,7 +17913,6 @@ func (s PutBucketTaggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest type PutBucketVersioningInput struct { _ struct{} `type:"structure" payload:"VersioningConfiguration"` @@ -16267,7 +17924,7 @@ type PutBucketVersioningInput struct { MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` // VersioningConfiguration is a required field - VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -16302,6 +17959,13 @@ func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput return s } +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetMFA sets the MFA field's value. func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { s.MFA = &v @@ -16314,7 +17978,6 @@ func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfi return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } @@ -16329,7 +17992,6 @@ func (s PutBucketVersioningOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest type PutBucketWebsiteInput struct { _ struct{} `type:"structure" payload:"WebsiteConfiguration"` @@ -16337,7 +17999,7 @@ type PutBucketWebsiteInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // WebsiteConfiguration is a required field - WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -16377,13 +18039,19 @@ func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { return s } +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetWebsiteConfiguration sets the WebsiteConfiguration field's value. func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { s.WebsiteConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -16398,14 +18066,13 @@ func (s PutBucketWebsiteOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest type PutObjectAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16491,6 +18158,13 @@ func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { return s } +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetGrantFullControl sets the GrantFullControl field's value. func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { s.GrantFullControl = &v @@ -16539,7 +18213,6 @@ func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput type PutObjectAclOutput struct { _ struct{} `type:"structure"` @@ -16564,7 +18237,6 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest type PutObjectInput struct { _ struct{} `type:"structure" payload:"Body"` @@ -16597,11 +18269,14 @@ type PutObjectInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` @@ -16713,6 +18388,13 @@ func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { return s } +func (s *PutObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCacheControl sets the CacheControl field's value. func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { s.CacheControl = &v @@ -16743,6 +18425,12 @@ func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { return s } +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + // SetContentType sets the ContentType field's value. func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { s.ContentType = &v @@ -16809,6 +18497,13 @@ func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { return s } +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { s.SSECustomerKeyMD5 = &v @@ -16845,7 +18540,6 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput type PutObjectOutput struct { _ struct{} `type:"structure"` @@ -16940,7 +18634,6 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest type PutObjectTaggingInput struct { _ struct{} `type:"structure" payload:"Tagging"` @@ -16951,7 +18644,7 @@ type PutObjectTaggingInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -16999,6 +18692,13 @@ func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { return s } +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { s.Key = &v @@ -17017,7 +18717,6 @@ func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput type PutObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -17042,7 +18741,6 @@ func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput // Container for specifying an configuration when you want Amazon S3 to publish // events to an Amazon Simple Queue Service (Amazon SQS) queue. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration type QueueConfiguration struct { _ struct{} `type:"structure"` @@ -17051,6 +18749,7 @@ type QueueConfiguration struct { // Container for object key name filtering rules. For information about key // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` // Optional unique identifier for configurations in a notification configuration. @@ -17114,7 +18813,6 @@ func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated type QueueConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -17164,7 +18862,45 @@ func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDep return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect +type RecordsEvent struct { + _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` + + // The byte array of partial, one or more result records. + // + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob"` +} + +// String returns the string representation +func (s RecordsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordsEvent) GoString() string { + return s.String() +} + +// SetPayload sets the Payload field's value. +func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { + s.Payload = v + return s +} + +// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *RecordsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} + type Redirect struct { _ struct{} `type:"structure"` @@ -17233,7 +18969,6 @@ func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo type RedirectAllRequestsTo struct { _ struct{} `type:"structure"` @@ -17284,7 +19019,6 @@ func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { // Container for replication rules. You can add as many as 1,000 rules. Total // replication configuration size can be up to 2 MB. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration type ReplicationConfiguration struct { _ struct{} `type:"structure"` @@ -17349,10 +19083,12 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule +// Container for information about a particular replication rule. type ReplicationRule struct { _ struct{} `type:"structure"` + // Container for replication destination information. + // // Destination is a required field Destination *Destination `type:"structure" required:"true"` @@ -17366,6 +19102,9 @@ type ReplicationRule struct { // Prefix is a required field Prefix *string `type:"string" required:"true"` + // Container for filters that define which source objects should be replicated. + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` + // The rule is ignored if status is not Enabled. // // Status is a required field @@ -17399,6 +19138,11 @@ func (s *ReplicationRule) Validate() error { invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) } } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -17424,13 +19168,18 @@ func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { return s } +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + // SetStatus sets the Status field's value. func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { s.Status = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration type RequestPaymentConfiguration struct { _ struct{} `type:"structure"` @@ -17469,7 +19218,30 @@ func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfigur return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest +type RequestProgress struct { + _ struct{} `type:"structure"` + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s RequestProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestProgress) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { + s.Enabled = &v + return s +} + type RestoreObjectInput struct { _ struct{} `type:"structure" payload:"RestoreRequest"` @@ -17485,7 +19257,8 @@ type RestoreObjectInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -17530,87 +19303,999 @@ func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { return s } +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { s.Key = &v return s } -// SetRequestPayer sets the RequestPayer field's value. -func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { - s.RequestPayer = &v +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + Days *int64 `type:"integer"` + + // The optional description for the job. + Description *string `type:"string"` + + // Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} + +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} + +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your + // bucket is versioning-enabled (or versioning is suspended), you can set this + // action to request that Amazon S3 transition noncurrent object versions to + // the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period + // in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s +} + +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Specifies the use of SSE-KMS to encrypt delievered Inventory reports. +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` + + // Specifies the ID of the AWS Key Management Service (KMS) master encryption + // key to use for encrypting Inventory reports. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SSEKMS) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSEKMS) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v + return s +} + +// Specifies the use of SSE-S3 to encrypt delievered Inventory reports. +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s SSES3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSES3) GoString() string { + return s.String() +} + +// SelectObjectContentEventStream provides handling of EventStreams for +// the SelectObjectContent API. +// +// Use this type to receive SelectObjectContentEventStream events. The events +// can be read from the Events channel member. +// +// The events that can be received are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStream struct { + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer +} + +// Close closes the EventStream. This will also cause the Events channel to be +// closed. You can use the closing of the Events channel to terminate your +// application's read from the API's EventStream. +// +// Will close the underlying EventStream reader. For EventStream over HTTP +// connection this will also close the HTTP connection. +// +// Close must be called when done using the EventStream API. Not calling Close +// may result in resource leaks. +func (es *SelectObjectContentEventStream) Close() (err error) { + es.Reader.Close() + return es.Err() +} + +// Err returns any error that occurred while reading EventStream Events from +// the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.Reader.Err(); err != nil { + return err + } + es.StreamCloser.Close() + + return nil +} + +// Events returns a channel to read EventStream Events from the +// SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +// SelectObjectContentEventStreamEvent groups together all EventStream +// events read from the SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamEvent interface { + eventSelectObjectContentEventStream() +} + +// SelectObjectContentEventStreamReader provides the interface for reading EventStream +// Events from the SelectObjectContent API. The +// default implementation for this interface will be SelectObjectContentEventStream. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan SelectObjectContentEventStreamEvent + + // Close will close the underlying event stream reader. For event stream over + // HTTP this will also close the HTTP connection. + Close() error + + // Returns any error that has occured while reading from the event stream. + Err() error +} + +type readSelectObjectContentEventStream struct { + eventReader *eventstreamapi.EventReader + stream chan SelectObjectContentEventStreamEvent + errVal atomic.Value + + done chan struct{} + closeOnce sync.Once +} + +func newReadSelectObjectContentEventStream( + reader io.ReadCloser, + unmarshalers request.HandlerList, + logger aws.Logger, + logLevel aws.LogLevelType, +) *readSelectObjectContentEventStream { + r := &readSelectObjectContentEventStream{ + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + } + + r.eventReader = eventstreamapi.NewEventReader( + reader, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: unmarshalers, + }, + r.unmarshalerForEventType, + ) + r.eventReader.UseLogger(logger, logLevel) + + return r +} + +// Close will close the underlying event stream reader. For EventStream over +// HTTP this will also close the HTTP connection. +func (r *readSelectObjectContentEventStream) Close() error { + r.closeOnce.Do(r.safeClose) + + return r.Err() +} + +func (r *readSelectObjectContentEventStream) safeClose() { + close(r.done) + err := r.eventReader.Close() + if err != nil { + r.errVal.Store(err) + } +} + +func (r *readSelectObjectContentEventStream) Err() error { + if v := r.errVal.Load(); v != nil { + return v.(error) + } + + return nil +} + +func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return r.stream +} + +func (r *readSelectObjectContentEventStream) readEventStream() { + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + r.errVal.Store(err) + return + } + + select { + case r.stream <- event.(SelectObjectContentEventStreamEvent): + case <-r.done: + return + } + } +} + +func (r *readSelectObjectContentEventStream) unmarshalerForEventType( + eventType string, +) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "Cont": + return &ContinuationEvent{}, nil + + case "End": + return &EndEvent{}, nil + + case "Progress": + return &ProgressEvent{}, nil + + case "Records": + return &RecordsEvent{}, nil + + case "Stats": + return &StatsEvent{}, nil + default: + return nil, awserr.New( + request.ErrCodeSerialization, + fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType), + nil, + ) + } +} + +// Request to filter the contents of an Amazon S3 object based on a simple Structured +// Query Language (SQL) statement. In the request, along with the SQL expression, +// you must also specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records, and returns only records +// that match the specified SQL expression. You must also specify the data serialization +// format for the response. For more information, go to S3Select API Documentation +// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The S3 Bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (e.g., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the format of the data in the object that is being queried. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // The Object Key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` + + // Specifies if periodic request progress information should be enabled. + RequestProgress *RequestProgress `type:"structure"` + + // The SSE Algorithm used to encrypt the object. For more information, go to + // Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The SSE Customer Key. For more information, go to Server-Side Encryption + // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // The SSE Customer Key MD5. For more information, go to Server-Side Encryption + // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` +} + +// String returns the string representation +func (s SelectObjectContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectObjectContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectObjectContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { + s.Bucket = &v + return s +} + +func (s *SelectObjectContentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpression sets the Expression field's value. +func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { + s.InputSerialization = v + return s +} + +// SetKey sets the Key field's value. +func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { + s.Key = &v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { + s.OutputSerialization = v + return s +} + +// SetRequestProgress sets the RequestProgress field's value. +func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { + s.RequestProgress = v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { + s.SSECustomerKey = &v + return s +} + +func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { + s.SSECustomerKeyMD5 = &v + return s +} + +type SelectObjectContentOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // Use EventStream to use the API's stream. + EventStream *SelectObjectContentEventStream `type:"structure"` +} + +// String returns the string representation +func (s SelectObjectContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectObjectContentOutput) GoString() string { + return s.String() +} + +// SetEventStream sets the EventStream field's value. +func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { + s.EventStream = v + return s +} + +func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) { + if r.Error != nil { + return + } + reader := newReadSelectObjectContentEventStream( + r.HTTPResponse.Body, + r.Handlers.UnmarshalStream, + r.Config.Logger, + r.Config.LogLevel.Value(), + ) + go reader.readEventStream() + + eventStream := &SelectObjectContentEventStream{ + StreamCloser: r.HTTPResponse.Body, + Reader: reader, + } + s.EventStream = eventStream +} + +// Describes the parameters for Select job types. +type SelectParameters struct { + _ struct{} `type:"structure"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (e.g., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the serialization format of the object. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // Describes how the results of the Select job are serialized. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SelectParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v return s } -// SetRestoreRequest sets the RestoreRequest field's value. -func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { - s.RestoreRequest = v +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v return s } -// SetVersionId sets the VersionId field's value. -func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { - s.VersionId = &v +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput -type RestoreObjectOutput struct { +// Describes the default server-side encryption to apply to new objects in the +// bucket. If Put Object request does not specify any server-side encryption, +// this default encryption will be applied. +type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // KMS master key ID to use for the default encryption. This parameter is allowed + // if SSEAlgorithm is aws:kms. + KMSMasterKeyID *string `type:"string"` + + // Server-side encryption algorithm to use for the default encryption. + // + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` } // String returns the string representation -func (s RestoreObjectOutput) String() string { +func (s ServerSideEncryptionByDefault) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RestoreObjectOutput) GoString() string { +func (s ServerSideEncryptionByDefault) GoString() string { return s.String() } -// SetRequestCharged sets the RequestCharged field's value. -func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { - s.RequestCharged = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest -type RestoreRequest struct { +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v + return s +} + +// Container for server-side encryption configuration rules. Currently S3 supports +// one rule only. +type ServerSideEncryptionConfiguration struct { _ struct{} `type:"structure"` - // Lifetime of the active copy in days + // Container for information about a particular server-side encryption configuration + // rule. // - // Days is a required field - Days *int64 `type:"integer" required:"true"` - - // Glacier related prameters pertaining to this job. - GlacierJobParameters *GlacierJobParameters `type:"structure"` + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } // String returns the string representation -func (s RestoreRequest) String() string { +func (s ServerSideEncryptionConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RestoreRequest) GoString() string { +func (s ServerSideEncryptionConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} - if s.Days == nil { - invalidParams.Add(request.NewErrParamRequired("Days")) +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) } - if s.GlacierJobParameters != nil { - if err := s.GlacierJobParameters.Validate(); err != nil { - invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } } } @@ -17620,51 +20305,40 @@ func (s *RestoreRequest) Validate() error { return nil } -// SetDays sets the Days field's value. -func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { - s.Days = &v - return s -} - -// SetGlacierJobParameters sets the GlacierJobParameters field's value. -func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { - s.GlacierJobParameters = v +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule -type RoutingRule struct { +// Container for information about a particular server-side encryption configuration +// rule. +type ServerSideEncryptionRule struct { _ struct{} `type:"structure"` - // A container for describing a condition that must be met for the specified - // redirect to apply. For example, 1. If request is for pages in the /docs folder, - // redirect to the /documents folder. 2. If request results in HTTP error 4xx, - // redirect request to another host where you might process the error. - Condition *Condition `type:"structure"` - - // Container for redirect information. You can redirect requests to another - // host, to another page, or with another protocol. In the event of an error, - // you can can specify a different error code to return. - // - // Redirect is a required field - Redirect *Redirect `type:"structure" required:"true"` + // Describes the default server-side encryption to apply to new objects in the + // bucket. If Put Object request does not specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` } // String returns the string representation -func (s RoutingRule) String() string { +func (s ServerSideEncryptionRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RoutingRule) GoString() string { +func (s ServerSideEncryptionRule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RoutingRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} - if s.Redirect == nil { - invalidParams.Add(request.NewErrParamRequired("Redirect")) +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -17673,75 +20347,75 @@ func (s *RoutingRule) Validate() error { return nil } -// SetCondition sets the Condition field's value. -func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { - s.Condition = v - return s -} - -// SetRedirect sets the Redirect field's value. -func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { - s.Redirect = v +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule -type Rule struct { +// Container for filters that define which source objects should be replicated. +type SourceSelectionCriteria struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Container for filter information of selection of KMS Encrypted S3 objects. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` +} - Expiration *LifecycleExpiration `type:"structure"` +// String returns the string representation +func (s SourceSelectionCriteria) String() string { + return awsutil.Prettify(s) +} - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` +// GoString returns the string representation +func (s SourceSelectionCriteria) GoString() string { + return s.String() +} - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) + } + } - // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA or GLACIER storage class. If your bucket is - // versioning-enabled (or versioning is suspended), you can set this action - // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA - // or GLACIER storage class at a specific period in the object's lifetime. - NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Prefix identifying one or more objects to which the rule applies. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v + return s +} - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. +// Container for filter information of selection of KMS Encrypted S3 objects. +type SseKmsEncryptedObjects struct { + _ struct{} `type:"structure"` + + // The replication for KMS encrypted S3 objects is disabled if status is not + // Enabled. // // Status is a required field - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - - Transition *Transition `type:"structure"` + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` } // String returns the string representation -func (s Rule) String() string { +func (s SseKmsEncryptedObjects) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Rule) GoString() string { +func (s SseKmsEncryptedObjects) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Rule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Rule"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} if s.Status == nil { invalidParams.Add(request.NewErrParamRequired("Status")) } @@ -17752,55 +20426,93 @@ func (s *Rule) Validate() error { return nil } -// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. -func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { - s.AbortIncompleteMultipartUpload = v +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v return s } -// SetExpiration sets the Expiration field's value. -func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { - s.Expiration = v - return s +type Stats struct { + _ struct{} `type:"structure"` + + // Total number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // Total number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // Total number of object bytes scanned. + BytesScanned *int64 `type:"long"` } -// SetID sets the ID field's value. -func (s *Rule) SetID(v string) *Rule { - s.ID = &v - return s +// String returns the string representation +func (s Stats) String() string { + return awsutil.Prettify(s) } -// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. -func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { - s.NoncurrentVersionExpiration = v - return s +// GoString returns the string representation +func (s Stats) GoString() string { + return s.String() } -// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. -func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { - s.NoncurrentVersionTransition = v +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Stats) SetBytesProcessed(v int64) *Stats { + s.BytesProcessed = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *Rule) SetPrefix(v string) *Rule { - s.Prefix = &v +// SetBytesReturned sets the BytesReturned field's value. +func (s *Stats) SetBytesReturned(v int64) *Stats { + s.BytesReturned = &v return s } -// SetStatus sets the Status field's value. -func (s *Rule) SetStatus(v string) *Rule { - s.Status = &v +// SetBytesScanned sets the BytesScanned field's value. +func (s *Stats) SetBytesScanned(v int64) *Stats { + s.BytesScanned = &v return s } -// SetTransition sets the Transition field's value. -func (s *Rule) SetTransition(v *Transition) *Rule { - s.Transition = v +type StatsEvent struct { + _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` + + // The Stats event details. + Details *Stats `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s StatsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatsEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { + s.Details = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis +// The StatsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *StatsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StatsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + type StorageClassAnalysis struct { _ struct{} `type:"structure"` @@ -17840,7 +20552,6 @@ func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport type StorageClassAnalysisDataExport struct { _ struct{} `type:"structure"` @@ -17898,7 +20609,6 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag type Tag struct { _ struct{} `type:"structure"` @@ -17954,7 +20664,6 @@ func (s *Tag) SetValue(v string) *Tag { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging type Tagging struct { _ struct{} `type:"structure"` @@ -18001,11 +20710,10 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant type TargetGrant struct { _ struct{} `type:"structure"` - Grantee *Grantee `type:"structure"` + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Logging permissions assigned to the Grantee for the bucket. Permission *string `type:"string" enum:"BucketLogsPermission"` @@ -18050,7 +20758,6 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { // Container for specifying the configuration when you want Amazon S3 to publish // events to an Amazon Simple Notification Service (Amazon SNS) topic. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration type TopicConfiguration struct { _ struct{} `type:"structure"` @@ -18059,6 +20766,7 @@ type TopicConfiguration struct { // Container for object key name filtering rules. For information about key // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` // Optional unique identifier for configurations in a notification configuration. @@ -18122,7 +20830,6 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -18174,7 +20881,6 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition type Transition struct { _ struct{} `type:"structure"` @@ -18218,7 +20924,6 @@ func (s *Transition) SetStorageClass(v string) *Transition { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest type UploadPartCopyInput struct { _ struct{} `type:"structure"` @@ -18235,14 +20940,14 @@ type UploadPartCopyInput struct { CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // The range of bytes to copy from the source object. The range value must use // the form bytes=first-last, where the first and last are the zero-based byte @@ -18345,6 +21050,13 @@ func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCopySource sets the CopySource field's value. func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { s.CopySource = &v @@ -18393,6 +21105,13 @@ func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartC return s } +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + // SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { s.CopySourceSSECustomerKeyMD5 = &v @@ -18429,6 +21148,13 @@ func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { s.SSECustomerKeyMD5 = &v @@ -18441,7 +21167,6 @@ func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` @@ -18526,7 +21251,6 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest type UploadPartInput struct { _ struct{} `type:"structure" payload:"Body"` @@ -18542,6 +21266,9 @@ type UploadPartInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // Object key for which the multipart upload was initiated. // // Key is a required field @@ -18628,12 +21355,25 @@ func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContentLength sets the ContentLength field's value. func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { s.ContentLength = &v return s } +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + // SetKey sets the Key field's value. func (s *UploadPartInput) SetKey(v string) *UploadPartInput { s.Key = &v @@ -18664,6 +21404,13 @@ func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { s.SSECustomerKeyMD5 = &v @@ -18676,7 +21423,6 @@ func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -18752,7 +21498,6 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -18787,7 +21532,6 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration type WebsiteConfiguration struct { _ struct{} `type:"structure"` @@ -18950,6 +21694,17 @@ const ( BucketVersioningStatusSuspended = "Suspended" ) +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "NONE" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "GZIP" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "BZIP2" +) + // Requests Amazon S3 to encode the object keys in the response and specifies // the encoding method to use. An object key may contain any Unicode character; // however, XML 1.0 parser cannot parse some characters, such as characters @@ -18999,6 +21754,22 @@ const ( ExpirationStatusDisabled = "Disabled" ) +const ( + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + const ( // FilterRuleNamePrefix is a FilterRuleName enum value FilterRuleNamePrefix = "prefix" @@ -19010,6 +21781,9 @@ const ( const ( // InventoryFormatCsv is a InventoryFormat enum value InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" ) const ( @@ -19046,6 +21820,17 @@ const ( // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" +) + +const ( + // JSONTypeDocument is a JSONType enum value + JSONTypeDocument = "DOCUMENT" + + // JSONTypeLines is a JSONType enum value + JSONTypeLines = "LINES" ) const ( @@ -19104,6 +21889,12 @@ const ( // ObjectStorageClassGlacier is a ObjectStorageClass enum value ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" ) const ( @@ -19111,6 +21902,11 @@ const ( ObjectVersionStorageClassStandard = "STANDARD" ) +const ( + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + const ( // PayerRequester is a Payer enum value PayerRequester = "Requester" @@ -19144,6 +21940,14 @@ const ( ProtocolHttps = "https" ) +const ( + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + const ( // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value ReplicationRuleStatusEnabled = "Enabled" @@ -19182,6 +21986,11 @@ const ( RequestPayerRequester = "requester" ) +const ( + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + const ( // ServerSideEncryptionAes256 is a ServerSideEncryption enum value ServerSideEncryptionAes256 = "AES256" @@ -19190,6 +21999,14 @@ const ( ServerSideEncryptionAwsKms = "aws:kms" ) +const ( + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + const ( // StorageClassStandard is a StorageClass enum value StorageClassStandard = "STANDARD" @@ -19199,6 +22016,9 @@ const ( // StorageClassStandardIa is a StorageClass enum value StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" ) const ( @@ -19231,6 +22051,9 @@ const ( // TransitionStorageClassStandardIa is a TransitionStorageClass enum value TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go new file mode 100644 index 0000000..5c8ce5c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -0,0 +1,249 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + if _, err := copySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := copySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go deleted file mode 100644 index 9fc5df9..0000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go +++ /dev/null @@ -1,36 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// contentMD5 computes and sets the HTTP Content-MD5 header for requests that -// require it. -func contentMD5(r *request.Request) { - h := md5.New() - - // hash the body. seek back to the first position after reading to reset - // the body for transmission. copy errors may be assumed to be from the - // body. - _, err := io.Copy(h, r.Body) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to read body", err) - return - } - _, err = r.Body.Seek(0, 0) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to seek body", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - sum := h.Sum(nil) - sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) - base64.StdEncoding.Encode(sum64, sum) - r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 8463347..a55beab 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -42,5 +42,29 @@ func defaultInitRequestFn(r *request.Request) { r.Handlers.Validate.PushFront(populateLocationConstraint) case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) } } + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go index f045fd0..0def022 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -10,69 +10,17 @@ // // Using the Client // -// To use the client for Amazon Simple Storage Service you will first need -// to create a new instance of it. +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. // -// When creating a client for an AWS service you'll first need to have a Session -// already created. The Session provides configuration that can be shared -// between multiple service clients. Additional configuration can be applied to -// the Session and service's client when they are constructed. The aws package's -// Config type contains several fields such as Region for the AWS Region the -// client should make API requests too. The optional Config value can be provided -// as the variadic argument for Sessions and client creation. -// -// Once the service's client is created you can use it to make API requests the -// AWS service. These clients are safe to use concurrently. -// -// // Create a session to share configuration, and load external configuration. -// sess := session.Must(session.NewSession()) -// -// // Create the service's client with the session. -// svc := s3.New(sess) -// -// See the SDK's documentation for more information on how to use service clients. +// See the SDK's documentation for more information on how to use the SDK. // https://docs.aws.amazon.com/sdk-for-go/api/ // -// See aws package's Config type for more information on configuration options. +// See aws.Config documentation for more information on configuring SDK clients. // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // // See the Amazon Simple Storage Service client S3 for more -// information on creating the service's client. +// information on creating client for this service. // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New -// -// Once the client is created you can make an API request to the service. -// Each API method takes a input parameter, and returns the service response -// and an error. -// -// The API method will document which error codes the service can be returned -// by the operation if the service models the API operation's errors. These -// errors will also be available as const strings prefixed with "ErrCode". -// -// result, err := svc.AbortMultipartUpload(params) -// if err != nil { -// // Cast err to awserr.Error to handle specific error codes. -// aerr, ok := err.(awserr.Error) -// if ok && aerr.Code() == { -// // Specific error code handling -// } -// return err -// } -// -// fmt.Println("AbortMultipartUpload result:") -// fmt.Println(result) -// -// Using the Client with Context -// -// The service's client also provides methods to make API requests with a Context -// value. This allows you to control the timeout, and cancellation of pending -// requests. These methods also take request Option as variadic parameter to apply -// additional configuration to the API request. -// -// ctx := context.Background() -// -// result, err := svc.AbortMultipartUploadWithContext(ctx, params) -// -// See the request package documentation for more information on using Context pattern -// with the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/ package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go index b794a63..39b912c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -35,7 +35,7 @@ // // The s3manager package's Downloader provides concurrently downloading of Objects // from S3. The Downloader will write S3 Object content with an io.WriterAt. -// Once the Downloader instance is created you can call Upload concurrently from +// Once the Downloader instance is created you can call Download concurrently from // multiple goroutines safely. // // // The session the S3 Downloader will use @@ -56,7 +56,7 @@ // Key: aws.String(myString), // }) // if err != nil { -// return fmt.Errorf("failed to upload file, %v", err) +// return fmt.Errorf("failed to download file, %v", err) // } // fmt.Printf("file downloaded, %d bytes\n", n) // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index ec3ffe4..a7fbc2d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) @@ -113,15 +112,9 @@ func updateEndpointForAccelerate(r *request.Request) { // Attempts to retrieve the bucket name from the request input parameters. // If no bucket is found, or the field is empty "", false will be returned. func bucketNameFromReqParams(params interface{}) (string, bool) { - b, _ := awsutil.ValuesAtPath(params, "Bucket") - if len(b) == 0 { - return "", false - } - - if bucket, ok := b[0].(*string); ok { - if bucketStr := aws.StringValue(bucket); bucketStr != "" { - return bucketStr, true - } + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 } return "", false diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index 614e477..20de53f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -29,8 +29,9 @@ var initRequest func(*request.Request) // Service information constants const ( - ServiceName = "s3" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifer of a specific service. ) // New creates a new instance of the S3 client with a session. @@ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, @@ -71,6 +73,8 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) + // Run custom client initialization if present if initClient != nil { initClient(svc.Client) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index 268ea2f..8010c4f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -5,17 +5,27 @@ import ( "encoding/base64" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) func validateSSERequiresSSL(r *request.Request) { - if r.HTTPRequest.URL.Scheme != "https" { - p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") - if len(p) > 0 { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { r.Error = errSSERequiresSSL + return } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index 5a78fd3..9f33efc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" ) func copyMultipartStatusOKUnmarhsalError(r *request.Request) { @@ -17,7 +18,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { } body := bytes.NewReader(b) r.HTTPResponse.Body = ioutil.NopCloser(body) - defer body.Seek(0, 0) + defer body.Seek(0, sdkio.SeekStart) if body.Len() == 0 { // If there is no body don't attempt to parse the body. diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go index cccfa8c..2596c69 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -11,7 +11,7 @@ import ( // WaitUntilBucketExists uses the Amazon S3 API operation // HeadBucket to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) @@ -72,7 +72,7 @@ func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucket // WaitUntilBucketNotExists uses the Amazon S3 API operation // HeadBucket to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) @@ -118,7 +118,7 @@ func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBuc // WaitUntilObjectExists uses the Amazon S3 API operation // HeadObject to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) @@ -169,7 +169,7 @@ func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObject // WaitUntilObjectNotExists uses the Amazon S3 API operation // HeadObject to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index e5c105f..6f89a79 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -14,19 +14,18 @@ const opAssumeRole = "AssumeRole" // AssumeRoleRequest generates a "aws/request.Request" representing the // client's request for the AssumeRole operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See AssumeRole for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRole method directly -// instead. +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleRequest method. // req, resp := client.AssumeRoleRequest(params) @@ -36,7 +35,7 @@ const opAssumeRole = "AssumeRole" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { op := &request.Operation{ Name: opAssumeRole, @@ -89,9 +88,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) // in the IAM User Guide. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a -// maximum of 3600 seconds (1 hour). The default is 1 hour. +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRole can be used to make // API calls to any AWS service with the following exception: you cannot call @@ -122,7 +130,12 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // the user to call AssumeRole on the ARN of the role in the other account. // If the user is in the same account as the role, then you can either attach // a policy to the user (identical to the previous different account user), -// or you can add the user as a principal directly in the role's trust policy +// or you can add the user as a principal directly in the role's trust policy. +// In this case, the trust policy acts as the only resource-based policy in +// IAM, and users in the same account as the role do not need explicit permission +// to assume the role. For more information about trust policies and resource-based +// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. // // Using MFA with AssumeRole // @@ -169,7 +182,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { req, out := c.AssumeRoleRequest(input) return out, req.Send() @@ -195,19 +208,18 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithSAML operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See AssumeRoleWithSAML for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithSAML method directly -// instead. +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithSAMLRequest method. // req, resp := client.AssumeRoleWithSAMLRequest(params) @@ -217,7 +229,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { op := &request.Operation{ Name: opAssumeRoleWithSAML, @@ -249,11 +261,20 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // an access key ID, a secret access key, and a security token. Applications // can use these temporary security credentials to sign calls to AWS services. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. The duration -// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). -// The default is 1 hour. +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot @@ -343,7 +364,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { req, out := c.AssumeRoleWithSAMLRequest(input) return out, req.Send() @@ -369,19 +390,18 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See AssumeRoleWithWebIdentity for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithWebIdentity method directly -// instead. +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithWebIdentityRequest method. // req, resp := client.AssumeRoleWithWebIdentityRequest(params) @@ -391,7 +411,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { op := &request.Operation{ Name: opAssumeRoleWithWebIdentity, @@ -441,9 +461,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // key ID, a secret access key, and a security token. Applications can use these // temporary security credentials to sign calls to AWS service APIs. // -// The credentials are valid for the duration that you specified when calling -// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to -// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: @@ -495,7 +524,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // the information from these providers to get and use temporary security // credentials. // -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of // how to use web identity federation to get access to content in Amazon // S3. @@ -546,7 +575,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { req, out := c.AssumeRoleWithWebIdentityRequest(input) return out, req.Send() @@ -572,19 +601,18 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the // client's request for the DecodeAuthorizationMessage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See DecodeAuthorizationMessage for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DecodeAuthorizationMessage method directly -// instead. +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DecodeAuthorizationMessageRequest method. // req, resp := client.DecodeAuthorizationMessageRequest(params) @@ -594,7 +622,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { op := &request.Operation{ Name: opDecodeAuthorizationMessage, @@ -659,7 +687,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // invalid. This can happen if the token contains invalid characters, such as // linebreaks. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { req, out := c.DecodeAuthorizationMessageRequest(input) return out, req.Send() @@ -685,19 +713,18 @@ const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCallerIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetCallerIdentity for usage and error information. +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetCallerIdentity method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetCallerIdentityRequest method. // req, resp := client.GetCallerIdentityRequest(params) @@ -707,7 +734,7 @@ const opGetCallerIdentity = "GetCallerIdentity" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { op := &request.Operation{ Name: opGetCallerIdentity, @@ -735,7 +762,7 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // // See the AWS API reference guide for AWS Security Token Service's // API operation GetCallerIdentity for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { req, out := c.GetCallerIdentityRequest(input) return out, req.Send() @@ -761,19 +788,18 @@ const opGetFederationToken = "GetFederationToken" // GetFederationTokenRequest generates a "aws/request.Request" representing the // client's request for the GetFederationToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. // -// See GetFederationToken for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetFederationToken method directly -// instead. +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetFederationTokenRequest method. // req, resp := client.GetFederationTokenRequest(params) @@ -783,7 +809,7 @@ const opGetFederationToken = "GetFederationToken" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { op := &request.Operation{ Name: opGetFederationToken, @@ -905,7 +931,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { req, out := c.GetFederationTokenRequest(input) return out, req.Send() @@ -931,19 +957,18 @@ const opGetSessionToken = "GetSessionToken" // GetSessionTokenRequest generates a "aws/request.Request" representing the // client's request for the GetSessionToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetSessionToken for usage and error information. +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetSessionToken method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetSessionTokenRequest method. // req, resp := client.GetSessionTokenRequest(params) @@ -953,7 +978,7 @@ const opGetSessionToken = "GetSessionToken" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { op := &request.Operation{ Name: opGetSessionToken, @@ -1034,7 +1059,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { req, out := c.GetSessionTokenRequest(input) return out, req.Send() @@ -1056,20 +1081,27 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken return out, req.Send() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest type AssumeRoleInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1248,7 +1280,6 @@ func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { // Contains the response to a successful AssumeRole request, including temporary // AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse type AssumeRoleOutput struct { _ struct{} `type:"structure"` @@ -1302,22 +1333,30 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest type AssumeRoleWithSAMLInput struct { _ struct{} `type:"structure"` - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. An expiration can also be specified in the SAML authentication - // response's SessionNotOnOrAfter value. The actual expiration time is whichever - // value is shorter. + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Enabling SAML 2.0 Federated - // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1443,7 +1482,6 @@ func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAML // Contains the response to a successful AssumeRoleWithSAML request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse type AssumeRoleWithSAMLOutput struct { _ struct{} `type:"structure"` @@ -1555,20 +1593,27 @@ func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLO return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest type AssumeRoleWithWebIdentityInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1718,7 +1763,6 @@ func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRo // Contains the response to a successful AssumeRoleWithWebIdentity request, // including temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse type AssumeRoleWithWebIdentityOutput struct { _ struct{} `type:"structure"` @@ -1811,7 +1855,6 @@ func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v strin // The identifiers for the temporary security credentials that the operation // returns. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser type AssumedRoleUser struct { _ struct{} `type:"structure"` @@ -1854,7 +1897,6 @@ func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { } // AWS credentials for API authentication. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials type Credentials struct { _ struct{} `type:"structure"` @@ -1866,7 +1908,7 @@ type Credentials struct { // The date on which the current credentials expire. // // Expiration is a required field - Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + Expiration *time.Time `type:"timestamp" required:"true"` // The secret access key that can be used to sign requests. // @@ -1913,7 +1955,6 @@ func (s *Credentials) SetSessionToken(v string) *Credentials { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest type DecodeAuthorizationMessageInput struct { _ struct{} `type:"structure"` @@ -1958,7 +1999,6 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut // A document that contains additional information about the authorization status // of a request from an encoded message that is returned in response to an AWS // request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse type DecodeAuthorizationMessageOutput struct { _ struct{} `type:"structure"` @@ -1983,7 +2023,6 @@ func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAu } // Identifiers for the federated user that is associated with the credentials. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser type FederatedUser struct { _ struct{} `type:"structure"` @@ -2024,7 +2063,6 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -2041,7 +2079,6 @@ func (s GetCallerIdentityInput) GoString() string { // Contains the response to a successful GetCallerIdentity request, including // information about the entity making the request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse type GetCallerIdentityOutput struct { _ struct{} `type:"structure"` @@ -2087,7 +2124,6 @@ func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest type GetFederationTokenInput struct { _ struct{} `type:"structure"` @@ -2196,7 +2232,6 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse type GetFederationTokenOutput struct { _ struct{} `type:"structure"` @@ -2249,7 +2284,6 @@ func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest type GetSessionTokenInput struct { _ struct{} `type:"structure"` @@ -2334,7 +2368,6 @@ func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { // Contains the response to a successful GetSessionToken request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse type GetSessionTokenOutput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index d2af518..ef681ab 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -56,69 +56,17 @@ // // Using the Client // -// To use the client for AWS Security Token Service you will first need -// to create a new instance of it. +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. // -// When creating a client for an AWS service you'll first need to have a Session -// already created. The Session provides configuration that can be shared -// between multiple service clients. Additional configuration can be applied to -// the Session and service's client when they are constructed. The aws package's -// Config type contains several fields such as Region for the AWS Region the -// client should make API requests too. The optional Config value can be provided -// as the variadic argument for Sessions and client creation. -// -// Once the service's client is created you can use it to make API requests the -// AWS service. These clients are safe to use concurrently. -// -// // Create a session to share configuration, and load external configuration. -// sess := session.Must(session.NewSession()) -// -// // Create the service's client with the session. -// svc := sts.New(sess) -// -// See the SDK's documentation for more information on how to use service clients. +// See the SDK's documentation for more information on how to use the SDK. // https://docs.aws.amazon.com/sdk-for-go/api/ // -// See aws package's Config type for more information on configuration options. +// See aws.Config documentation for more information on configuring SDK clients. // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // // See the AWS Security Token Service client STS for more -// information on creating the service's client. +// information on creating client for this service. // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New -// -// Once the client is created you can make an API request to the service. -// Each API method takes a input parameter, and returns the service response -// and an error. -// -// The API method will document which error codes the service can be returned -// by the operation if the service models the API operation's errors. These -// errors will also be available as const strings prefixed with "ErrCode". -// -// result, err := svc.AssumeRole(params) -// if err != nil { -// // Cast err to awserr.Error to handle specific error codes. -// aerr, ok := err.(awserr.Error) -// if ok && aerr.Code() == { -// // Specific error code handling -// } -// return err -// } -// -// fmt.Println("AssumeRole result:") -// fmt.Println(result) -// -// Using the Client with Context -// -// The service's client also provides methods to make API requests with a Context -// value. This allows you to control the timeout, and cancellation of pending -// requests. These methods also take request Option as variadic parameter to apply -// additional configuration to the API request. -// -// ctx := context.Background() -// -// result, err := svc.AssumeRoleWithContext(ctx, params) -// -// See the request package documentation for more information on using Context pattern -// with the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/ package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 1ee5839..185c914 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -29,8 +29,9 @@ var initRequest func(*request.Request) // Service information constants const ( - ServiceName = "sts" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifer of a specific service. ) // New creates a new instance of the STS client with a session. @@ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, diff --git a/vendor/github.com/bgentry/speakeasy/.gitignore b/vendor/github.com/bgentry/speakeasy/.gitignore new file mode 100644 index 0000000..9e13114 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/.gitignore @@ -0,0 +1,2 @@ +example/example +example/example.exe diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS new file mode 100644 index 0000000..ff177f6 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [2013] [the CloudFoundry Authors] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md new file mode 100644 index 0000000..fceda75 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/Readme.md @@ -0,0 +1,30 @@ +# Speakeasy + +This package provides cross-platform Go (#golang) helpers for taking user input +from the terminal while not echoing the input back (similar to `getpasswd`). The +package uses syscalls to avoid any dependence on cgo, and is therefore +compatible with cross-compiling. + +[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc] + +## Unicode + +Multi-byte unicode characters work successfully on Mac OS X. On Windows, +however, this may be problematic (as is UTF in general on Windows). Other +platforms have not been tested. + +## License + +The code herein was not written by me, but was compiled from two separate open +source packages. Unix portions were imported from [gopass][gopass], while +Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s +[Windows terminal helpers][cf-ui-windows]. + +The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly +from the source (though I attempted to fill in the correct owner in the +boilerplate copyright notice). + +[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI" +[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers" +[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org" +[gopass]: https://code.google.com/p/gopass "gopass" diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go new file mode 100644 index 0000000..71c1dd1 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy.go @@ -0,0 +1,49 @@ +package speakeasy + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Ask the user to enter a password with input hidden. prompt is a string to +// display before the user's input. Returns the provided password, or an error +// if the command failed. +func Ask(prompt string) (password string, err error) { + return FAsk(os.Stdout, prompt) +} + +// FAsk is the same as Ask, except it is possible to specify the file to write +// the prompt to. If 'nil' is passed as the writer, no prompt will be written. +func FAsk(wr io.Writer, prompt string) (password string, err error) { + if wr != nil && prompt != "" { + fmt.Fprint(wr, prompt) // Display the prompt. + } + password, err = getPassword() + + // Carriage return after the user input. + if wr != nil { + fmt.Fprintln(wr, "") + } + return +} + +func readline() (value string, err error) { + var valb []byte + var n int + b := make([]byte, 1) + for { + // read one byte at a time so we don't accidentally read extra bytes + n, err = os.Stdin.Read(b) + if err != nil && err != io.EOF { + return "", err + } + if n == 0 || b[0] == '\n' { + break + } + valb = append(valb, b[0]) + } + + return strings.TrimSuffix(string(valb), "\r"), nil +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go new file mode 100644 index 0000000..d99fda1 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go @@ -0,0 +1,93 @@ +// based on https://code.google.com/p/gopass +// Author: johnsiilver@gmail.com (John Doak) +// +// Original code is based on code by RogerV in the golang-nuts thread: +// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package speakeasy + +import ( + "fmt" + "os" + "os/signal" + "strings" + "syscall" +) + +const sttyArg0 = "/bin/stty" + +var ( + sttyArgvEOff = []string{"stty", "-echo"} + sttyArgvEOn = []string{"stty", "echo"} +) + +// getPassword gets input hidden from the terminal from a user. This is +// accomplished by turning off terminal echo, reading input from the user and +// finally turning on terminal echo. +func getPassword() (password string, err error) { + sig := make(chan os.Signal, 10) + brk := make(chan bool) + + // File descriptors for stdin, stdout, and stderr. + fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} + + // Setup notifications of termination signals to channel sig, create a process to + // watch for these signals so we can turn back on echo if need be. + signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, + syscall.SIGTERM) + go catchSignal(fd, sig, brk) + + // Turn off the terminal echo. + pid, err := echoOff(fd) + if err != nil { + return "", err + } + + // Turn on the terminal echo and stop listening for signals. + defer signal.Stop(sig) + defer close(brk) + defer echoOn(fd) + + syscall.Wait4(pid, nil, 0, nil) + + line, err := readline() + if err == nil { + password = strings.TrimSpace(line) + } else { + err = fmt.Errorf("failed during password entry: %s", err) + } + + return password, err +} + +// echoOff turns off the terminal echo. +func echoOff(fd []uintptr) (int, error) { + pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) + if err != nil { + return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) + } + return pid, nil +} + +// echoOn turns back on the terminal echo. +func echoOn(fd []uintptr) { + // Turn on the terminal echo. + pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) + if e == nil { + syscall.Wait4(pid, nil, 0, nil) + } +} + +// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn +// terminal echo back on before the program ends. Otherwise the user is left +// with echo off on their terminal. +func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { + select { + case <-sig: + echoOn(fd) + os.Exit(-1) + case <-brk: + } +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go new file mode 100644 index 0000000..c2093a8 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package speakeasy + +import ( + "syscall" +) + +// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx +const ENABLE_ECHO_INPUT = 0x0004 + +func getPassword() (password string, err error) { + var oldMode uint32 + + err = syscall.GetConsoleMode(syscall.Stdin, &oldMode) + if err != nil { + return + } + + var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) + + err = setConsoleMode(syscall.Stdin, newMode) + defer setConsoleMode(syscall.Stdin, oldMode) + if err != nil { + return + } + + return readline() +} + +func setConsoleMode(console syscall.Handle, mode uint32) (err error) { + dll := syscall.MustLoadDLL("kernel32") + proc := dll.MustFindProc("SetConsoleMode") + r, _, err := proc.Call(uintptr(console), uintptr(mode)) + + if r == 0 { + return err + } + return nil +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index c836416..bc52e96 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -2,7 +2,7 @@ ISC License Copyright (c) 2012-2016 Dave Collins -Permission to use, copy, modify, and distribute this software for any +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 8a4a658..7929947 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -16,7 +16,9 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew @@ -34,80 +36,49 @@ const ( ptrSize = unsafe.Sizeof((*byte)(nil)) ) +type flag uintptr + var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag ) -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) } // unsafeReflectValue converts the passed reflect.Value into a one that bypasses @@ -119,34 +90,56 @@ func init() { // This allows us to check for implementations of the Stringer and error // interfaces to be used for pretty printing ordinarily unaddressable and // inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } } - return rv + panic("reflect.Value read-only flag has changed semantics") } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 1fe3cf3..205c28d 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -16,7 +16,7 @@ // when the code is running on Google App Engine, compiled by GopherJS, or // "-tags safe" is added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe +// +build js appengine safe disableunsafe !go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 7c519ff..1be8ce9 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) { w.Write(closeParenBytes) } -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' // prefix to Writer w. func printHexPtr(w io.Writer, p uintptr) { // Null pointer. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index df1d582..f78d89f 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -35,16 +35,16 @@ var ( // cCharRE is a regular expression that matches a cgo char. // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) // cUnsignedCharRE is a regular expression that matches a cgo unsigned // char. It is used to detect unsigned character arrays to hexdump // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) // cUint8tCharRE is a regular expression that matches a cgo uint8_t. // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) ) // dumpState contains information about the state of a dump operation. @@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) { // Display dereferenced value. d.w.Write(openParenBytes) switch { - case nilFound == true: + case nilFound: d.w.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: d.w.Write(circularBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index c49875b..b04edb7 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) { // Display dereferenced value. switch { - case nilFound == true: + case nilFound: f.fs.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: f.fs.Write(circularShortBytes) default: diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml index 0064ba1..65c872b 100644 --- a/vendor/github.com/go-ini/ini/.travis.yml +++ b/vendor/github.com/go-ini/ini/.travis.yml @@ -1,14 +1,15 @@ sudo: false language: go - go: - - 1.4 - - 1.5 - - 1.6 - - tip + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - master script: - - go get -v github.com/smartystreets/goconvey + - go get golang.org/x/tools/cmd/cover + - go get github.com/smartystreets/goconvey - go test -v -cover -race notifications: diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md index 22a4234..8594742 100644 --- a/vendor/github.com/go-ini/ini/README.md +++ b/vendor/github.com/go-ini/ini/README.md @@ -1,4 +1,4 @@ -INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) === ![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) @@ -106,6 +106,12 @@ cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. +To generate such keys in your program, you could use `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + #### Comment Take care that following format will be treated as comment: diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md index 3b4fb66..163432d 100644 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -99,6 +99,12 @@ cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) 这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 +如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + #### 关于注释 下述几种情况的内容将被视为注释: diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 77e0dbd..68d73aa 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -37,7 +37,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.23.1" + _VERSION = "1.25.4" ) // Version returns current package version literal. @@ -176,6 +176,8 @@ type LoadOptions struct { // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. // This type of keys are mostly used in my.cnf. AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise // conform to key/value pairs. Specify the names of those blocks here. UnparseableSections []string @@ -219,6 +221,12 @@ func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { return LoadSources(LoadOptions{Insensitive: true}, source, others...) } +// InsensitiveLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} + // Empty returns an empty file object. func Empty() *File { // Ignore error here, we sure our data is good. @@ -441,6 +449,7 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { } alignSpaces := bytes.Repeat([]byte(" "), alignLength) + KEY_LIST: for _, kname := range sec.keyList { key := sec.Key(kname) if len(key.Comment) > 0 { @@ -467,28 +476,33 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { case strings.Contains(kname, "`"): kname = `"""` + kname + `"""` } - if _, err = buf.WriteString(kname); err != nil { - return 0, err - } - if key.isBooleanType { - continue - } + for _, val := range key.ValueWithShadows() { + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } - val := key.value - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } - if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { - return 0, err + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } } } diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go index 9738c55..852696f 100644 --- a/vendor/github.com/go-ini/ini/key.go +++ b/vendor/github.com/go-ini/ini/key.go @@ -15,6 +15,7 @@ package ini import ( + "errors" "fmt" "strconv" "strings" @@ -29,9 +30,42 @@ type Key struct { isAutoIncrement bool isBooleanType bool + isShadow bool + shadows []*Key + Comment string } +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + // ValueMapper represents a mapping function for values, e.g. os.ExpandEnv type ValueMapper func(string) string @@ -45,16 +79,29 @@ func (k *Key) Value() string { return k.value } -// String returns string representation of value. -func (k *Key) String() string { - val := k.value +// ValueWithShadows returns raw values of key and its shadows if any. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + return []string{k.value} + } + vals := make([]string, len(k.shadows)+1) + vals[0] = k.value + for i := range k.shadows { + vals[i+1] = k.shadows[i].value + } + return vals +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { if k.s.f.ValueMapper != nil { val = k.s.f.ValueMapper(val) } - if strings.Index(val, "%") == -1 { + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { return val } - for i := 0; i < _DEPTH_VALUES; i++ { vr := varPattern.FindString(val) if len(vr) == 0 { @@ -78,6 +125,11 @@ func (k *Key) String() string { return val } +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + // Validate accepts a validate function which can // return modifed result as key value. func (k *Key) Validate(fn func(string) string) string { @@ -394,11 +446,31 @@ func (k *Key) Strings(delim string) []string { vals := strings.Split(str, delim) for i := range vals { + // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) vals[i] = strings.TrimSpace(vals[i]) } return vals } +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + // Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. func (k *Key) Float64s(delim string) []float64 { vals, _ := k.getFloat64s(delim, true, false) @@ -407,13 +479,13 @@ func (k *Key) Float64s(delim string) []float64 { // Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. func (k *Key) Ints(delim string) []int { - vals, _ := k.getInts(delim, true, false) + vals, _ := k.parseInts(k.Strings(delim), true, false) return vals } // Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.getInt64s(delim, true, false) + vals, _ := k.parseInt64s(k.Strings(delim), true, false) return vals } @@ -452,14 +524,14 @@ func (k *Key) ValidFloat64s(delim string) []float64 { // ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will // not be included to result list. func (k *Key) ValidInts(delim string) []int { - vals, _ := k.getInts(delim, false, false) + vals, _ := k.parseInts(k.Strings(delim), false, false) return vals } // ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, // then it will not be included to result list. func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.getInt64s(delim, false, false) + vals, _ := k.parseInt64s(k.Strings(delim), false, false) return vals } @@ -495,12 +567,12 @@ func (k *Key) StrictFloat64s(delim string) ([]float64, error) { // StrictInts returns list of int divided by given delimiter or error on first invalid input. func (k *Key) StrictInts(delim string) ([]int, error) { - return k.getInts(delim, false, true) + return k.parseInts(k.Strings(delim), false, true) } // StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.getInt64s(delim, false, true) + return k.parseInt64s(k.Strings(delim), false, true) } // StrictUints returns list of uint divided by given delimiter or error on first invalid input. @@ -541,9 +613,8 @@ func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]flo return vals, nil } -// getInts returns list of int divided by given delimiter. -func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) { - strs := k.Strings(delim) +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { vals := make([]int, 0, len(strs)) for _, str := range strs { val, err := strconv.Atoi(str) @@ -557,9 +628,8 @@ func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, er return vals, nil } -// getInt64s returns list of int64 divided by given delimiter. -func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) { - strs := k.Strings(delim) +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { vals := make([]int64, 0, len(strs)) for _, str := range strs { val, err := strconv.ParseInt(str, 10, 64) diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go index b0aabe3..673ef80 100644 --- a/vendor/github.com/go-ini/ini/parser.go +++ b/vendor/github.com/go-ini/ini/parser.go @@ -318,11 +318,14 @@ func (f *File) parse(reader io.Reader) (err error) { if err != nil { // Treat as boolean key when desired, and whole line is key name. if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { - key, err := section.NewKey(string(line), "true") + kname, err := p.readValue(line, f.options.IgnoreContinuation) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) if err != nil { return err } - key.isBooleanType = true key.Comment = strings.TrimSpace(p.comment.String()) p.comment.Reset() continue @@ -338,17 +341,16 @@ func (f *File) parse(reader io.Reader) (err error) { p.count++ } - key, err := section.NewKey(kname, "") + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation) if err != nil { return err } - key.isAutoIncrement = isAutoIncr - value, err := p.readValue(line[offset:], f.options.IgnoreContinuation) + key, err := section.NewKey(kname, value) if err != nil { return err } - key.SetValue(value) + key.isAutoIncrement = isAutoIncr key.Comment = strings.TrimSpace(p.comment.String()) p.comment.Reset() } diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go index 45d2f3b..c9fa27e 100644 --- a/vendor/github.com/go-ini/ini/section.go +++ b/vendor/github.com/go-ini/ini/section.go @@ -68,20 +68,33 @@ func (s *Section) NewKey(name, val string) (*Key, error) { } if inSlice(name, s.keyList) { - s.keys[name].value = val + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + } return s.keys[name], nil } s.keyList = append(s.keyList, name) - s.keys[name] = &Key{ - s: s, - name: name, - value: val, - } + s.keys[name] = newKey(s, name, val) s.keysHash[name] = val return s.keys[name], nil } +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + // GetKey returns key in section by given name. func (s *Section) GetKey(name string) (*Key, error) { // FIXME: change to section level lock? diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go index 5ef38d8..509c682 100644 --- a/vendor/github.com/go-ini/ini/struct.go +++ b/vendor/github.com/go-ini/ini/struct.go @@ -78,8 +78,14 @@ func parseDelim(actual string) string { var reflectTime = reflect.TypeOf(time.Now()).Kind() // setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string) error { - strs := key.Strings(delim) +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + numVals := len(strs) if numVals == 0 { return nil @@ -92,9 +98,9 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string) error { case reflect.String: vals = strs case reflect.Int: - vals = key.Ints(delim) + vals, _ = key.parseInts(strs, true, false) case reflect.Int64: - vals = key.Int64s(delim) + vals, _ = key.parseInt64s(strs, true, false) case reflect.Uint: vals = key.Uints(delim) case reflect.Uint64: @@ -133,7 +139,7 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string) error { // setWithProperType sets proper value to field based on its type, // but it does not return error for failing parsing, // because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { switch t.Kind() { case reflect.String: if len(key.String()) == 0 { @@ -187,13 +193,25 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri } field.Set(reflect.ValueOf(timeVal)) case reflect.Slice: - return setSliceWithProperType(key, field, delim) + return setSliceWithProperType(key, field, delim, allowShadow) default: return fmt.Errorf("unsupported type '%s'", t) } return nil } +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + func (s *Section) mapTo(val reflect.Value) error { if val.Kind() == reflect.Ptr { val = val.Elem() @@ -209,8 +227,8 @@ func (s *Section) mapTo(val reflect.Value) error { continue } - opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty - fieldName := s.parseFieldName(tpField.Name, opts[0]) + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) if len(fieldName) == 0 || !field.CanSet() { continue } @@ -231,7 +249,8 @@ func (s *Section) mapTo(val reflect.Value) error { } if key, err := s.GetKey(fieldName); err == nil { - if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { return fmt.Errorf("error mapping field(%s): %v", fieldName, err) } } diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 0000000..0f64693 --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 0000000..3cd3249 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,253 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 0000000..d9aa3c4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,428 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 0000000..dea2617 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 0000000..3abfed2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,203 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 0000000..d4db5a1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 0000000..816a3b9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,543 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, extension.Field) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 0000000..75565cc --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,979 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 0000000..3b6ca41 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,314 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "sync" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + + case map[int32]Extension: + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, + } + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + + default: + return nil, errors.New("proto: not an extension map") + } +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 0000000..b6cad90 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 0000000..d55a335 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 0000000..50b99b8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,544 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + switch t1 := typ; t1.Kind() { + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + + case reflect.Slice: + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() + } + + case reflect.Map: + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + _, _, _, oots = om.XXX_OneofFuncs() + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 0000000..b167944 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2767 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 0000000..5525def --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 0000000..ebf1caa --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2051 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 0000000..1aaee72 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,843 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 0000000..bb55a3a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,880 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 0000000..70276e8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,141 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 0000000..e3c56d3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,191 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +package any // import "github.com/golang/protobuf/ptypes/any" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_any_744b9ca530f228db, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } + +var fileDescriptor_any_744b9ca530f228db = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 0000000..c748667 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,149 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 0000000..c0d595d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 0000000..65cb0f8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 0000000..a7beb2c --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package duration // import "github.com/golang/protobuf/ptypes/duration" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_duration_e7d612259e3f0613, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) +} + +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 0000000..975fce4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 0000000..47f10db --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,134 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 0000000..8e76ae9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,175 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) +} + +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 0000000..06750ab --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,133 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go index 7d8a57c..8d306bf 100644 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -26,6 +26,7 @@ func DefaultPooledTransport() *http.Transport { DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, + DualStack: true, }).DialContext, MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod new file mode 100644 index 0000000..310f075 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 0000000..7eda377 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,43 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + next.ServeHTTP(w, r) + return + }) +} diff --git a/vendor/github.com/hashicorp/go-getter/.travis.yml b/vendor/github.com/hashicorp/go-getter/.travis.yml index 4438286..da804c2 100644 --- a/vendor/github.com/hashicorp/go-getter/.travis.yml +++ b/vendor/github.com/hashicorp/go-getter/.travis.yml @@ -10,8 +10,14 @@ addons: language: go go: - - 1.5 + - 1.8.x + - 1.9.x + - master branches: only: - master + +matrix: + allow_failures: + - go: master diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md index 4a0b6a6..40ace74 100644 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -21,8 +21,7 @@ URLs. For example: "github.com/hashicorp/go-getter" would turn into a Git URL. Or "./foo" would turn into a file URL. These are extensible. This library is used by [Terraform](https://terraform.io) for -downloading modules, [Otto](https://ottoproject.io) for dependencies and -Appfile imports, and [Nomad](https://nomadproject.io) for downloading +downloading modules and [Nomad](https://nomadproject.io) for downloading binaries. ## Installation and Usage @@ -119,6 +118,37 @@ The protocol-specific options are documented below the URL format section. But because they are part of the URL, we point it out here so you know they exist. +### Subdirectories + +If you want to download only a specific subdirectory from a downloaded +directory, you can specify a subdirectory after a double-slash `//`. +go-getter will first download the URL specified _before_ the double-slash +(as if you didn't specify a double-slash), but will then copy the +path after the double slash into the target directory. + +For example, if you're downloading this GitHub repository, but you only +want to download the `test-fixtures` directory, you can do the following: + +``` +https://github.com/hashicorp/go-getter.git//test-fixtures +``` + +If you downloaded this to the `/tmp` directory, then the file +`/tmp/archive.gz` would exist. Notice that this file is in the `test-fixtures` +directory in this repository, but because we specified a subdirectory, +go-getter automatically copied only that directory contents. + +Subdirectory paths may contain may also use filesystem glob patterns. +The path must match _exactly one_ entry or go-getter will return an error. +This is useful if you're not sure the exact directory name but it follows +a predictable naming structure. + +For example, the following URL would also work: + +``` +https://github.com/hashicorp/go-getter.git//test-* +``` + ### Checksumming For file downloads of any protocol, go-getter can automatically verify @@ -154,9 +184,11 @@ The following archive formats are supported: * `tar.gz` and `tgz` * `tar.bz2` and `tbz2` + * `tar.xz` and `txz` * `zip` * `gz` * `bz2` + * `xz` For example, an example URL is shown below: @@ -200,6 +232,9 @@ The options below are available to all protocols: * `checksum` - Checksum to verify the downloaded file or archive. See the entire section on checksumming above for format and more details. + * `filename` - When in file download mode, allows specifying the name of the + downloaded file on disk. Has no effect in directory mode. + ### Local Files (`file`) None @@ -222,13 +257,17 @@ None ### HTTP (`http`) -None +#### Basic Authentication + +To use HTTP basic authentication with go-getter, simply prepend `username:password@` to the +hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special +characters, including the username and password, must be URL encoded. ### S3 (`s3`) S3 takes various access configurations in the URL. Note that it will also -read these from standard AWS environment variables if they're set. If -the query parameters are present, these take priority. +read these from standard AWS environment variables if they're set. S3 compliant servers like Minio +are also supported. If the query parameters are present, these take priority. * `aws_access_key_id` - AWS access key. * `aws_access_key_secret` - AWS access key secret. @@ -240,6 +279,14 @@ If you use go-getter and want to use an EC2 IAM Instance Profile to avoid using credentials, then just omit these and the profile, if available will be used automatically. +### Using S3 with Minio + If you use go-gitter for Minio support, you must consider the following: + + * `aws_access_key_id` (required) - Minio access key. + * `aws_access_key_secret` (required) - Minio access key secret. + * `region` (optional - defaults to us-east-1) - Region identifier to use. + * `version` (optional - defaults to Minio default) - Configuration file format. + #### S3 Bucket Examples S3 has several addressing schemes used to reference your bucket. These are @@ -250,4 +297,5 @@ Some examples for these addressing schemes: - s3::https://s3-eu-west-1.amazonaws.com/bucket/foo - bucket.s3.amazonaws.com/foo - bucket.s3-eu-west-1.amazonaws.com/foo/bar +- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY®ion=us-east-2" diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml index 159dad4..ec48d45 100644 --- a/vendor/github.com/hashicorp/go-getter/appveyor.yml +++ b/vendor/github.com/hashicorp/go-getter/appveyor.yml @@ -1,5 +1,5 @@ version: "build-{branch}-{build}" -image: Visual Studio 2015 +image: Visual Studio 2017 clone_folder: c:\gopath\github.com\hashicorp\go-getter environment: GOPATH: c:\gopath diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go index 876812a..300301c 100644 --- a/vendor/github.com/hashicorp/go-getter/client.go +++ b/vendor/github.com/hashicorp/go-getter/client.go @@ -17,6 +17,7 @@ import ( "strings" urlhelper "github.com/hashicorp/go-getter/helper/url" + "github.com/hashicorp/go-safetemp" ) // Client is a client for downloading things. @@ -100,17 +101,14 @@ func (c *Client) Get() error { dst := c.Dst src, subDir := SourceDirSubdir(src) if subDir != "" { - tmpDir, err := ioutil.TempDir("", "tf") + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(tmpDir); err != nil { - return err - } - defer os.RemoveAll(tmpDir) + defer tdcloser.Close() realDst = dst - dst = tmpDir + dst = td } u, err := urlhelper.Parse(src) @@ -232,7 +230,18 @@ func (c *Client) Get() error { // Destination is the base name of the URL path in "any" mode when // a file source is detected. if mode == ClientModeFile { - dst = filepath.Join(dst, filepath.Base(u.Path)) + filename := filepath.Base(u.Path) + + // Determine if we have a custom file name + if v := q.Get("filename"); v != "" { + // Delete the query parameter if we have it. + q.Del("filename") + u.RawQuery = q.Encode() + + filename = v + } + + dst = filepath.Join(dst, filename) } } @@ -305,7 +314,13 @@ func (c *Client) Get() error { return err } - return copyDir(realDst, filepath.Join(dst, subDir), false) + // Process any globs + subDir, err := SubdirGlob(dst, subDir) + if err != nil { + return err + } + + return copyDir(realDst, subDir, false) } return nil diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go index d18174c..198bb0e 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress.go +++ b/vendor/github.com/hashicorp/go-getter/decompress.go @@ -1,7 +1,15 @@ package getter +import ( + "strings" +) + // Decompressor defines the interface that must be implemented to add // support for decompressing a type. +// +// Important: if you're implementing a decompressor, please use the +// containsDotDot helper in this file to ensure that files can't be +// decompressed outside of the specified directory. type Decompressor interface { // Decompress should decompress src to dst. dir specifies whether dst // is a directory or single file. src is guaranteed to be a single file @@ -16,14 +24,35 @@ var Decompressors map[string]Decompressor func init() { tbzDecompressor := new(TarBzip2Decompressor) tgzDecompressor := new(TarGzipDecompressor) + txzDecompressor := new(TarXzDecompressor) Decompressors = map[string]Decompressor{ "bz2": new(Bzip2Decompressor), "gz": new(GzipDecompressor), + "xz": new(XzDecompressor), "tar.bz2": tbzDecompressor, "tar.gz": tgzDecompressor, + "tar.xz": txzDecompressor, "tbz2": tbzDecompressor, "tgz": tgzDecompressor, + "txz": txzDecompressor, "zip": new(ZipDecompressor), } } + +// containsDotDot checks if the filepath value v contains a ".." entry. +// This will check filepath components by splitting along / or \. This +// function is copied directly from the Go net/http implementation. +func containsDotDot(v string) bool { + if !strings.Contains(v, "..") { + return false + } + for _, ent := range strings.FieldsFunc(v, isSlashRune) { + if ent == ".." { + return true + } + } + return false +} + +func isSlashRune(r rune) bool { return r == '/' || r == '\\' } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go index 2001054..5ebf709 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go @@ -9,7 +9,7 @@ import ( ) // GzipDecompressor is an implementation of Decompressor that can -// decompress bz2 files. +// decompress gzip files. type GzipDecompressor struct{} func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go new file mode 100644 index 0000000..39cb392 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tar.go @@ -0,0 +1,138 @@ +package getter + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" +) + +// untar is a shared helper for untarring an archive. The reader should provide +// an uncompressed view of the tar archive. +func untar(input io.Reader, dst, src string, dir bool) error { + tarR := tar.NewReader(input) + done := false + dirHdrs := []*tar.Header{} + for { + hdr, err := tarR.Next() + if err == io.EOF { + if !done { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + + break + } + if err != nil { + return err + } + + if hdr.Typeflag == tar.TypeXGlobalHeader || hdr.Typeflag == tar.TypeXHeader { + // don't unpack extended headers as files + continue + } + + path := dst + if dir { + // Disallow parent traversal + if containsDotDot(hdr.Name) { + return fmt.Errorf("entry contains '..': %s", hdr.Name) + } + + path = filepath.Join(path, hdr.Name) + } + + if hdr.FileInfo().IsDir() { + if !dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + // Record the directory information so that we may set its attributes + // after all files have been extracted + dirHdrs = append(dirHdrs, hdr) + + continue + } else { + // There is no ordering guarantee that a file in a directory is + // listed before the directory + dstPath := filepath.Dir(path) + + // Check that the directory exists, otherwise create it + if _, err := os.Stat(dstPath); os.IsNotExist(err) { + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + } + } + + // We have a file. If we already decoded, then it is an error + if !dir && done { + return fmt.Errorf("expected a single file, got multiple: %s", src) + } + + // Mark that we're done so future in single file mode errors + done = true + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + return err + } + _, err = io.Copy(dstF, tarR) + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { + return err + } + + // Set the access and modification time + if err := os.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + + // Adding a file or subdirectory changes the mtime of a directory + // We therefore wait until we've extracted everything and then set the mtime and atime attributes + for _, dirHdr := range dirHdrs { + path := filepath.Join(dst, dirHdr.Name) + if err := os.Chtimes(path, dirHdr.AccessTime, dirHdr.ModTime); err != nil { + return err + } + } + + return nil +} + +// tarDecompressor is an implementation of Decompressor that can +// unpack tar files. +type tarDecompressor struct{} + +func (d *tarDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + return untar(f, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go index c46ed44..5391b5c 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go @@ -1,10 +1,7 @@ package getter import ( - "archive/tar" "compress/bzip2" - "fmt" - "io" "os" "path/filepath" ) @@ -32,64 +29,5 @@ func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { // Bzip2 compression is second bzipR := bzip2.NewReader(f) - - // Once bzip decompressed we have a tar format - tarR := tar.NewReader(bzipR) - done := false - for { - hdr, err := tarR.Next() - if err == io.EOF { - if !done { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - - return nil - } - if err != nil { - return err - } - - path := dst - if dir { - path = filepath.Join(path, hdr.Name) - } - - if hdr.FileInfo().IsDir() { - if dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - - continue - } - - // We have a file. If we already decoded, then it is an error - if !dir && done { - return fmt.Errorf("expected a single file, got multiple: %s", src) - } - - // Mark that we're done so future in single file mode errors - done = true - - // Open the file for writing - dstF, err := os.Create(path) - if err != nil { - return err - } - _, err = io.Copy(dstF, tarR) - dstF.Close() - if err != nil { - return err - } - - // Chmod the file - if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { - return err - } - } + return untar(bzipR, dst, src, dir) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go index 686d6c2..91cf33d 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_testing.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go @@ -11,7 +11,9 @@ import ( "runtime" "sort" "strings" - "testing" + "time" + + "github.com/mitchellh/go-testing-interface" ) // TestDecompressCase is a single test case for testing decompressors @@ -21,10 +23,11 @@ type TestDecompressCase struct { Err bool // Err is whether we expect an error or not DirList []string // DirList is the list of files for Dir mode FileMD5 string // FileMD5 is the expected MD5 for a single file + Mtime *time.Time // Mtime is the optionally expected mtime for a single file (or all files if in Dir mode) } // TestDecompressor is a helper function for testing generic decompressors. -func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) { +func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { for _, tc := range cases { t.Logf("Testing: %s", tc.Input) @@ -67,6 +70,14 @@ func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) } } + if tc.Mtime != nil { + actual := fi.ModTime() + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), dst, actual.String()) + } + } + return } @@ -83,11 +94,26 @@ func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected) } + // Check for correct atime/mtime + for _, dir := range actual { + path := filepath.Join(dst, dir) + if tc.Mtime != nil { + fi, err := os.Stat(path) + if err != nil { + t.Fatalf("err: %s", err) + } + actual := fi.ModTime() + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), path, actual.String()) + } + } + } }() } } -func testListDir(t *testing.T, path string) []string { +func testListDir(t testing.T, path string) []string { var result []string err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { if err != nil { @@ -102,7 +128,7 @@ func testListDir(t *testing.T, path string) []string { // If it is a dir, add trailing sep if info.IsDir() { - sub += "/" + sub += string(os.PathSeparator) } result = append(result, sub) @@ -116,7 +142,7 @@ func testListDir(t *testing.T, path string) []string { return result } -func testMD5(t *testing.T, path string) string { +func testMD5(t testing.T, path string) string { f, err := os.Open(path) if err != nil { t.Fatalf("err: %s", err) diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go index e8b1c31..65eb70d 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go @@ -1,10 +1,8 @@ package getter import ( - "archive/tar" "compress/gzip" "fmt" - "io" "os" "path/filepath" ) @@ -37,63 +35,5 @@ func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { } defer gzipR.Close() - // Once gzip decompressed we have a tar format - tarR := tar.NewReader(gzipR) - done := false - for { - hdr, err := tarR.Next() - if err == io.EOF { - if !done { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - - return nil - } - if err != nil { - return err - } - - path := dst - if dir { - path = filepath.Join(path, hdr.Name) - } - - if hdr.FileInfo().IsDir() { - if !dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - - continue - } - - // We have a file. If we already decoded, then it is an error - if !dir && done { - return fmt.Errorf("expected a single file, got multiple: %s", src) - } - - // Mark that we're done so future in single file mode errors - done = true - - // Open the file for writing - dstF, err := os.Create(path) - if err != nil { - return err - } - _, err = io.Copy(dstF, tarR) - dstF.Close() - if err != nil { - return err - } - - // Chmod the file - if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { - return err - } - } + return untar(gzipR, dst, src, dir) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_txz.go b/vendor/github.com/hashicorp/go-getter/decompress_txz.go new file mode 100644 index 0000000..5e151c1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_txz.go @@ -0,0 +1,39 @@ +package getter + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// TarXzDecompressor is an implementation of Decompressor that can +// decompress tar.xz files. +type TarXzDecompressor struct{} + +func (d *TarXzDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + txzR, err := xz.NewReader(f) + if err != nil { + return fmt.Errorf("Error opening an xz reader for %s: %s", src, err) + } + + return untar(txzR, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_xz.go b/vendor/github.com/hashicorp/go-getter/decompress_xz.go new file mode 100644 index 0000000..4e37aba --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_xz.go @@ -0,0 +1,49 @@ +package getter + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// XzDecompressor is an implementation of Decompressor that can +// decompress xz files. +type XzDecompressor struct{} + +func (d *XzDecompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("xz-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + xzR, err := xz.NewReader(f) + if err != nil { + return err + } + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, xzR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go index a065c07..b0e70ca 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_zip.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go @@ -42,6 +42,11 @@ func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { for _, f := range zipR.File { path := dst if dir { + // Disallow parent traversal + if containsDotDot(f.Name) { + return fmt.Errorf("entry contains '..': %s", f.Name) + } + path = filepath.Join(path, f.Name) } diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go index 481b737..c369551 100644 --- a/vendor/github.com/hashicorp/go-getter/detect.go +++ b/vendor/github.com/hashicorp/go-getter/detect.go @@ -72,12 +72,18 @@ func Detect(src string, pwd string, ds []Detector) (string, error) { subDir = detectSubdir } } + if subDir != "" { u, err := url.Parse(result) if err != nil { return "", fmt.Errorf("Error parsing URL: %s", err) } u.Path += "//" + subDir + + // a subdir may contain wildcards, but in order to support them we + // have to ensure the path isn't escaped. + u.RawPath = u.Path + result = u.String() } diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go index 756ea43..4ef41ea 100644 --- a/vendor/github.com/hashicorp/go-getter/detect_file.go +++ b/vendor/github.com/hashicorp/go-getter/detect_file.go @@ -32,7 +32,7 @@ func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { return "", true, err } if fi.Mode()&os.ModeSymlink != 0 { - pwd, err = os.Readlink(pwd) + pwd, err = filepath.EvalSymlinks(pwd) if err != nil { return "", true, err } diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go index c3236f5..e6053d9 100644 --- a/vendor/github.com/hashicorp/go-getter/get.go +++ b/vendor/github.com/hashicorp/go-getter/get.go @@ -18,6 +18,8 @@ import ( "os/exec" "regexp" "syscall" + + cleanhttp "github.com/hashicorp/go-cleanhttp" ) // Getter defines the interface that schemes must implement to download @@ -49,8 +51,13 @@ var Getters map[string]Getter // syntax is schema::url, example: git::https://foo.com var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) +// httpClient is the default client to be used by HttpGetters. +var httpClient = cleanhttp.DefaultClient() + func init() { - httpGetter := &HttpGetter{Netrc: true} + httpGetter := &HttpGetter{ + Netrc: true, + } Getters = map[string]Getter{ "file": new(FileGetter), diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go index 0728139..cb1d029 100644 --- a/vendor/github.com/hashicorp/go-getter/get_git.go +++ b/vendor/github.com/hashicorp/go-getter/get_git.go @@ -11,6 +11,7 @@ import ( "strings" urlhelper "github.com/hashicorp/go-getter/helper/url" + "github.com/hashicorp/go-safetemp" "github.com/hashicorp/go-version" ) @@ -105,13 +106,11 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { // GetFile for Git doesn't support updating at this time. It will download // the file every time. func (g *GitGetter) GetFile(dst string, u *url.URL) error { - td, err := ioutil.TempDir("", "getter-git") + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(td); err != nil { - return err - } + defer tdcloser.Close() // Get the filename, and strip the filename from the URL so we can // just get the repository directly. @@ -180,17 +179,34 @@ func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error { // setupGitEnv sets up the environment for the given command. This is used to // pass configuration data to git and ssh and enables advanced cloning methods. func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { - var sshOpts []string + const gitSSHCommand = "GIT_SSH_COMMAND=" + var sshCmd []string + + // If we have an existing GIT_SSH_COMMAND, we need to append our options. + // We will also remove our old entry to make sure the behavior is the same + // with versions of Go < 1.9. + env := os.Environ() + for i, v := range env { + if strings.HasPrefix(v, gitSSHCommand) { + sshCmd = []string{v} + + env[i], env[len(env)-1] = env[len(env)-1], env[i] + env = env[:len(env)-1] + break + } + } + + if len(sshCmd) == 0 { + sshCmd = []string{gitSSHCommand + "ssh"} + } if sshKeyFile != "" { // We have an SSH key temp file configured, tell ssh about this. - sshOpts = append(sshOpts, "-i", sshKeyFile) + sshCmd = append(sshCmd, "-i", sshKeyFile) } - cmd.Env = append(os.Environ(), - // Set the ssh command to use for clones. - "GIT_SSH_COMMAND=ssh "+strings.Join(sshOpts, " "), - ) + env = append(env, strings.Join(sshCmd, " ")) + cmd.Env = env } // checkGitVersion is used to check the version of git installed on the system diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go index 820bdd4..f386922 100644 --- a/vendor/github.com/hashicorp/go-getter/get_hg.go +++ b/vendor/github.com/hashicorp/go-getter/get_hg.go @@ -2,7 +2,6 @@ package getter import ( "fmt" - "io/ioutil" "net/url" "os" "os/exec" @@ -10,6 +9,7 @@ import ( "runtime" urlhelper "github.com/hashicorp/go-getter/helper/url" + "github.com/hashicorp/go-safetemp" ) // HgGetter is a Getter implementation that will download a module from @@ -64,13 +64,13 @@ func (g *HgGetter) Get(dst string, u *url.URL) error { // GetFile for Hg doesn't support updating at this time. It will download // the file every time. func (g *HgGetter) GetFile(dst string, u *url.URL) error { - td, err := ioutil.TempDir("", "getter-hg") + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(td); err != nil { - return err - } + defer tdcloser.Close() // Get the filename, and strip the filename from the URL so we can // just get the repository directly. diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go index 3c02034..d2e2879 100644 --- a/vendor/github.com/hashicorp/go-getter/get_http.go +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -4,12 +4,13 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" "path/filepath" "strings" + + "github.com/hashicorp/go-safetemp" ) // HttpGetter is a Getter implementation that will download from an HTTP @@ -36,6 +37,10 @@ type HttpGetter struct { // Netrc, if true, will lookup and use auth information found // in the user's netrc file if available. Netrc bool + + // Client is the http.Client to use for Get requests. + // This defaults to a cleanhttp.DefaultClient if left unset. + Client *http.Client } func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { @@ -57,13 +62,17 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { } } + if g.Client == nil { + g.Client = httpClient + } + // Add terraform-get to the parameter. q := u.Query() q.Add("terraform-get", "1") u.RawQuery = q.Encode() // Get the URL - resp, err := http.Get(u.String()) + resp, err := g.Client.Get(u.String()) if err != nil { return err } @@ -98,7 +107,18 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { } func (g *HttpGetter) GetFile(dst string, u *url.URL) error { - resp, err := http.Get(u.String()) + if g.Netrc { + // Add auth from netrc if we can + if err := addAuthFromNetrc(u); err != nil { + return err + } + } + + if g.Client == nil { + g.Client = httpClient + } + + resp, err := g.Client.Get(u.String()) if err != nil { return err } @@ -116,29 +136,40 @@ func (g *HttpGetter) GetFile(dst string, u *url.URL) error { if err != nil { return err } - defer f.Close() - _, err = io.Copy(f, resp.Body) + n, err := io.Copy(f, resp.Body) + if err == nil && n < resp.ContentLength { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } return err } // getSubdir downloads the source into the destination, but with // the proper subdir. func (g *HttpGetter) getSubdir(dst, source, subDir string) error { - // Create a temporary directory to store the full source - td, err := ioutil.TempDir("", "tf") + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - defer os.RemoveAll(td) + defer tdcloser.Close() // Download that into the given directory if err := Get(td, source); err != nil { return err } + // Process any globbing + sourcePath, err := SubdirGlob(td, subDir) + if err != nil { + return err + } + // Make sure the subdir path actually exists - sourcePath := filepath.Join(td, subDir) if _, err := os.Stat(sourcePath); err != nil { return fmt.Errorf( "Error downloading %s: %s", source, err) diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go index d3bffeb..ebb3217 100644 --- a/vendor/github.com/hashicorp/go-getter/get_s3.go +++ b/vendor/github.com/hashicorp/go-getter/get_s3.go @@ -28,7 +28,7 @@ func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { } // Create client config - config := g.getAWSConfig(region, creds) + config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) @@ -84,7 +84,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { return err } - config := g.getAWSConfig(region, creds) + config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) @@ -139,7 +139,7 @@ func (g *S3Getter) GetFile(dst string, u *url.URL) error { return err } - config := g.getAWSConfig(region, creds) + config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) return g.getObject(client, dst, bucket, path, version) @@ -174,7 +174,7 @@ func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) er return err } -func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config { +func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config { conf := &aws.Config{} if creds == nil { // Grab the metadata URL @@ -195,6 +195,14 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) * }) } + if creds != nil { + conf.Endpoint = &url.Host + conf.S3ForcePathStyle = aws.Bool(true) + if url.Scheme == "http" { + conf.DisableSSL = aws.Bool(true) + } + } + conf.Credentials = creds if region != "" { conf.Region = aws.String(region) @@ -204,29 +212,48 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) * } func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { - // Expected host style: s3.amazonaws.com. They always have 3 parts, - // although the first may differ if we're accessing a specific region. - hostParts := strings.Split(u.Host, ".") - if len(hostParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } + // This just check whether we are dealing with S3 or + // any other S3 compliant service. S3 has a predictable + // url as others do not + if strings.Contains(u.Host, "amazonaws.com") { + // Expected host style: s3.amazonaws.com. They always have 3 parts, + // although the first may differ if we're accessing a specific region. + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } - // Parse the region out of the first part of the host - region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") - if region == "" { - region = "us-east-1" - } + // Parse the region out of the first part of the host + region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") + if region == "" { + region = "us-east-1" + } - pathParts := strings.SplitN(u.Path, "/", 3) - if len(pathParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") - bucket = pathParts[1] - path = pathParts[2] - version = u.Query().Get("version") + } else { + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 complaint URL") + return + } + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + region = u.Query().Get("region") + if region == "" { + region = "us-east-1" + } + } _, hasAwsId := u.Query()["aws_access_key_id"] _, hasAwsSecret := u.Query()["aws_access_key_secret"] diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go index 4d5ee3c..c63f2bb 100644 --- a/vendor/github.com/hashicorp/go-getter/source.go +++ b/vendor/github.com/hashicorp/go-getter/source.go @@ -1,6 +1,8 @@ package getter import ( + "fmt" + "path/filepath" "strings" ) @@ -34,3 +36,27 @@ func SourceDirSubdir(src string) (string, string) { return src, subdir } + +// SubdirGlob returns the actual subdir with globbing processed. +// +// dst should be a destination directory that is already populated (the +// download is complete) and subDir should be the set subDir. If subDir +// is an empty string, this returns an empty string. +// +// The returned path is the full absolute path. +func SubdirGlob(dst, subDir string) (string, error) { + matches, err := filepath.Glob(filepath.Join(dst, subDir)) + if err != nil { + return "", err + } + + if len(matches) == 0 { + return "", fmt.Errorf("subdir %q not found", subDir) + } + + if len(matches) > 1 { + return "", fmt.Errorf("subdir %q matches multiple paths", subDir) + } + + return matches[0], nil +} diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE new file mode 100644 index 0000000..abaf1e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md new file mode 100644 index 0000000..614342b --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -0,0 +1,123 @@ +# go-hclog + +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: https://godoc.org/github.com/hashicorp/go-hclog + +`go-hclog` is a package for Go that provides a simple key/value logging +interface for use in development and production environments. + +It provides logging levels that provide decreased output based upon the +desired amount of output, unlike the standard library `log` package. + +It does not provide `Printf` style logging, only key/value logging that is +exposed as arguments to the logging functions for simplicity. + +It provides a human readable output mode for use in development as well as +JSON output mode for production. + +## Stability Note + +While this library is fully open source and HashiCorp will be maintaining it +(since we are and will be making extensive use of it), the API and output +format is subject to minor changes as we fully bake and vet it in our projects. +This notice will be removed once it's fully integrated into our major projects +and no further changes are anticipated. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-hclog`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-hclog + +## Usage + +### Use the global logger + +```go +hclog.Default().Info("hello world") +``` + +```text +2017-07-05T16:15:55.167-0700 [INFO ] hello world +``` + +(Note timestamps are removed in future examples for brevity.) + +### Create a new logger + +```go +appLogger := hclog.New(&hclog.LoggerOptions{ + Name: "my-app", + Level: hclog.LevelFromString("DEBUG"), +}) +``` + +### Emit an Info level message with 2 key/value pairs + +```go +input := "5.5" +_, err := strconv.ParseInt(input, 10, 32) +if err != nil { + appLogger.Info("Invalid input for ParseInt", "input", input, "error", err) +} +``` + +```text +... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax" +``` + +### Create a new Logger for a major subsystem + +```go +subsystemLogger := appLogger.Named("transport") +subsystemLogger.Info("we are transporting something") +``` + +```text +... [INFO ] my-app.transport: we are transporting something +``` + +Notice that logs emitted by `subsystemLogger` contain `my-app.transport`, +reflecting both the application and subsystem names. + +### Create a new Logger with fixed key/value pairs + +Using `With()` will include a specific key-value pair in all messages emitted +by that logger. + +```go +requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363" +requestLogger := subsystemLogger.With("request", requestID) +requestLogger.Info("we are transporting a request") +``` + +```text +... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363 +``` + +This allows sub Loggers to be context specific without having to thread that +into all the callers. + +### Use this with code that uses the standard library logger + +If you want to use the standard library's `log.Logger` interface you can wrap +`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use +it with the familiar `Println()`, `Printf()`, etc. For example: + +```go +stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, +}) +// Printf() is provided by stdlib log.Logger interface, not hclog.Logger +stdLogger.Printf("[DEBUG] %+v", stdLogger) +``` + +```text +... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} +``` + +Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +specify `InferLevels: true`, you will not see any output here. You must change +`appLogger` to `DEBUG` to see output. See the docs for more information. diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go new file mode 100644 index 0000000..55ce439 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -0,0 +1,34 @@ +package hclog + +import ( + "sync" +) + +var ( + protect sync.Once + def Logger + + // The options used to create the Default logger. These are + // read only when the Default logger is created, so set them + // as soon as the process starts. + DefaultOptions = &LoggerOptions{ + Level: DefaultLevel, + Output: DefaultOutput, + } +) + +// Return a logger that is held globally. This can be a good starting +// place, and then you can use .With() and .Name() to create sub-loggers +// to be used in more specific contexts. +func Default() Logger { + protect.Do(func() { + def = New(DefaultOptions) + }) + + return def +} + +// A short alias for Default() +func L() Logger { + return Default() +} diff --git a/vendor/github.com/hashicorp/go-hclog/int.go b/vendor/github.com/hashicorp/go-hclog/int.go new file mode 100644 index 0000000..9f90c28 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/int.go @@ -0,0 +1,385 @@ +package hclog + +import ( + "bufio" + "encoding/json" + "fmt" + "log" + "os" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO ]", + Warn: "[WARN ]", + Error: "[ERROR]", + } +) + +// Given the options (nil for defaults), create a new Logger +func New(opts *LoggerOptions) Logger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = os.Stderr + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + return &intLogger{ + m: new(sync.Mutex), + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + w: bufio.NewWriter(output), + level: level, + } +} + +// The internal logger implementation. Internal in that it is defined entirely +// by this package. +type intLogger struct { + json bool + caller bool + name string + + // this is a pointer so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + m *sync.Mutex + w *bufio.Writer + level Level + + implied []interface{} +} + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// The time format to use for logging. This is a version of RFC3339 that +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (z *intLogger) Log(level Level, msg string, args ...interface{}) { + if level < z.level { + return + } + + t := time.Now() + + z.m.Lock() + defer z.m.Unlock() + + if z.json { + z.logJson(t, level, msg, args...) + } else { + z.log(t, level, msg, args...) + } + + z.w.Flush() +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + + // Find the last separator. + // + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +// Non-JSON logging format function +func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { + z.w.WriteString(t.Format(TimeFormat)) + z.w.WriteByte(' ') + + s, ok := _levelToBracket[level] + if ok { + z.w.WriteString(s) + } else { + z.w.WriteString("[UNKN ]") + } + + if z.caller { + if _, file, line, ok := runtime.Caller(3); ok { + z.w.WriteByte(' ') + z.w.WriteString(trimCallerPath(file)) + z.w.WriteByte(':') + z.w.WriteString(strconv.Itoa(line)) + z.w.WriteByte(':') + } + } + + z.w.WriteByte(' ') + + if z.name != "" { + z.w.WriteString(z.name) + z.w.WriteString(": ") + } + + z.w.WriteString(msg) + + args = append(z.implied, args...) + + var stacktrace CapturedStacktrace + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + args = append(args, "") + } + } + + z.w.WriteByte(':') + + FOR: + for i := 0; i < len(args); i = i + 2 { + var val string + + switch st := args[i+1].(type) { + case string: + val = st + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case CapturedStacktrace: + stacktrace = st + continue FOR + default: + val = fmt.Sprintf("%v", st) + } + + z.w.WriteByte(' ') + z.w.WriteString(args[i].(string)) + z.w.WriteByte('=') + + if strings.ContainsAny(val, " \t\n\r") { + z.w.WriteByte('"') + z.w.WriteString(val) + z.w.WriteByte('"') + } else { + z.w.WriteString(val) + } + } + } + + z.w.WriteString("\n") + + if stacktrace != "" { + z.w.WriteString(string(stacktrace)) + } +} + +// JSON logging function +func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if z.name != "" { + vals["@module"] = z.name + } + + if z.caller { + if _, file, line, ok := runtime.Caller(3); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + args = append(args, "") + } + } + + for i := 0; i < len(args); i = i + 2 { + if _, ok := args[i].(string); !ok { + // As this is the logging function not much we can do here + // without injecting into logs... + continue + } + vals[args[i].(string)] = args[i+1] + } + } + + err := json.NewEncoder(z.w).Encode(vals) + if err != nil { + panic(err) + } +} + +// Emit the message and args at DEBUG level +func (z *intLogger) Debug(msg string, args ...interface{}) { + z.Log(Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (z *intLogger) Trace(msg string, args ...interface{}) { + z.Log(Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (z *intLogger) Info(msg string, args ...interface{}) { + z.Log(Info, msg, args...) +} + +// Emit the message and args at WARN level +func (z *intLogger) Warn(msg string, args ...interface{}) { + z.Log(Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (z *intLogger) Error(msg string, args ...interface{}) { + z.Log(Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (z *intLogger) IsTrace() bool { + return z.level == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (z *intLogger) IsDebug() bool { + return z.level <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (z *intLogger) IsInfo() bool { + return z.level <= Info +} + +// Indicate that the logger would emit WARN level logs +func (z *intLogger) IsWarn() bool { + return z.level <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (z *intLogger) IsError() bool { + return z.level <= Error +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (z *intLogger) With(args ...interface{}) Logger { + var nz intLogger = *z + + nz.implied = append(nz.implied, args...) + + return &nz +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (z *intLogger) Named(name string) Logger { + var nz intLogger = *z + + if nz.name != "" { + nz.name = nz.name + "." + name + } + + return &nz +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (z *intLogger) ResetNamed(name string) Logger { + var nz intLogger = *z + + nz.name = name + + return &nz +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (z *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(&stdlogAdapter{z, opts.InferLevels}, "", 0) +} diff --git a/vendor/github.com/hashicorp/go-hclog/log.go b/vendor/github.com/hashicorp/go-hclog/log.go new file mode 100644 index 0000000..6bb16ba --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/log.go @@ -0,0 +1,138 @@ +package hclog + +import ( + "io" + "log" + "os" + "strings" +) + +var ( + DefaultOutput = os.Stderr + DefaultLevel = Info +) + +type Level int + +const ( + // This is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // The most verbose level. Intended to be used for the tracing of actions + // in code, such as function enters/exits, etc. + Trace Level = 1 + + // For programmer lowlevel analysis. + Debug Level = 2 + + // For information about steady state operations. + Info Level = 3 + + // For information about rare but handled events. + Warn Level = 4 + + // For information about unrecoverable events. + Error Level = 5 +) + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept "INFO" or "info" + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + default: + return NoLevel + } +} + +// The main Logger interface. All code should code against this interface only. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger +} + +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool +} + +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is supressed + Level Level + + // Where to write the logs to. Defaults to os.Stdout if nil + Output io.Writer + + // Control if the output should be in JSON. + JSONFormat bool + + // Intclude file and line information in each log line + IncludeLocation bool +} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go new file mode 100644 index 0000000..8af1a3b --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -0,0 +1,108 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package hclog + +import ( + "bytes" + "runtime" + "strconv" + "strings" + "sync" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +// A stacktrace gathered by a previous call to log.Stacktrace. If passed +// to a logging function, the stacktrace will be appended. +type CapturedStacktrace string + +// Gather a stacktrace of the current goroutine and return it to be passed +// to a logging function. +func Stacktrace() CapturedStacktrace { + return CapturedStacktrace(takeStacktrace()) +} + +func takeStacktrace() string { + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var buffer bytes.Buffer + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.WriteByte('\n') + } + i++ + buffer.WriteString(frame.Function) + buffer.WriteByte('\n') + buffer.WriteByte('\t') + buffer.WriteString(frame.File) + buffer.WriteByte(':') + buffer.WriteString(strconv.Itoa(int(frame.Line))) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go new file mode 100644 index 0000000..2bb927f --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -0,0 +1,62 @@ +package hclog + +import ( + "bytes" + "strings" +) + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + hl Logger + inferLevels bool +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.inferLevels { + level, str := s.pickLevel(str) + switch level { + case Trace: + s.hl.Trace(str) + case Debug: + s.hl.Debug(str) + case Info: + s.hl.Info(str) + case Warn: + s.hl.Warn(str) + case Error: + s.hl.Error(str) + default: + s.hl.Info(str) + } + } else { + s.hl.Info(str) + } + + return len(data), nil +} + +// Detect, based on conventions, what log level this is +func (s *stdlogAdapter) pickLevel(str string) (Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return Warn, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERROR]"): + return Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return Error, strings.TrimSpace(str[5:]) + default: + return Info, str + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index 2058cfb..e4558db 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -1,10 +1,9 @@ # Go Plugin System over RPC `go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system -that has been in use by HashiCorp tooling for over 3 years. While initially -created for [Packer](https://www.packer.io), it has since been used by -[Terraform](https://www.terraform.io) and [Otto](https://www.ottoproject.io), -with plans to also use it for [Nomad](https://www.nomadproject.io) and +that has been in use by HashiCorp tooling for over 4 years. While initially +created for [Packer](https://www.packer.io), it is additionally in use by +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and [Vault](https://www.vaultproject.io). While the plugin system is over RPC, it is currently only designed to work @@ -24,6 +23,11 @@ interface as if it were going to run in the same process. For a plugin user: you just use and call functions on an interface as if it were in the same process. This plugin system handles the communication in between. +**Cross-language support.** Plugins can be written (and consumed) by +almost every major language. This library supports serving plugins via +[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written +in any language. + **Complex arguments and return values are supported.** This library provides APIs for handling complex arguments and return values such as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library @@ -37,7 +41,10 @@ and the plugin can call back into the host process. **Built-in Logging.** Any plugins that use the `log` standard library will have log data automatically sent to the host process. The host process will mirror this output prefixed with the path to the plugin -binary. This makes debugging with plugins simple. +binary. This makes debugging with plugins simple. If the host system +uses [hclog](https://github.com/hashicorp/go-hclog) then the log data +will be structured. If the plugin also uses hclog, logs from the plugin +will be sent to the host hclog and be structured. **Protocol Versioning.** A very basic "protocol version" is supported that can be incremented to invalidate any previous plugins. This is useful when @@ -62,13 +69,18 @@ This requires the host/plugin to know this is possible and daemonize properly. `NewClient` takes a `ReattachConfig` to determine if and how to reattach. +**Cryptographically Secure Plugins.** Plugins can be verified with an expected +checksum and RPC communications can be configured to use TLS. The host process +must be properly secured to protect this configuration. + ## Architecture The HashiCorp plugin system works by launching subprocesses and communicating -over RPC (using standard `net/rpc`). A single connection is made between -any plugin and the host process, and we use a -[connection multiplexing](https://github.com/hashicorp/yamux) -library to multiplex any other connections on top. +over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single +connection is made between any plugin and the host process. For net/rpc-based +plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. For gRPC-based plugins, +the HTTP2 protocol handles multiplexing. This architecture has a number of benefits: @@ -76,8 +88,8 @@ This architecture has a number of benefits: panic the plugin user. * Plugins are very easy to write: just write a Go application and `go build`. - Theoretically you could also use another language as long as it can - communicate the Go `net/rpc` protocol but this hasn't yet been tried. + Or use any other language to write a gRPC server with a tiny amount of + boilerplate to support go-plugin. * Plugins are very easy to install: just put the binary in a location where the host will find it (depends on the host but this library also provides @@ -85,8 +97,8 @@ This architecture has a number of benefits: * Plugins can be relatively secure: The plugin only has access to the interfaces and args given to it, not to the entire memory space of the - process. More security features are planned (see the coming soon section - below). + process. Additionally, go-plugin can communicate with the plugin over + TLS. ## Usage @@ -97,10 +109,9 @@ high-level steps that must be done. Examples are available in the 1. Choose the interface(s) you want to expose for plugins. 2. For each interface, implement an implementation of that interface - that communicates over an `*rpc.Client` (from the standard `net/rpc` - package) for every function call. Likewise, implement the RPC server - struct this communicates to which is then communicating to a real, - concrete implementation. + that communicates over a `net/rpc` connection or other a + [gRPC](http://www.grpc.io) connection or both. You'll have to implement + both a client and server implementation. 3. Create a `Plugin` implementation that knows how to create the RPC client/server for a given plugin type. @@ -125,10 +136,6 @@ improvements we can make. At this point in time, the roadmap for the plugin system is: -**Cryptographically Secure Plugins.** We'll implement signing plugins -and loading signed plugins in order to allow Vault to make use of multi-process -in a secure way. - **Semantic Versioning.** Plugins will be able to implement a semantic version. This plugin system will give host processes a system for constraining versions. This is in addition to the protocol versioning already present diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index 9f8a0f2..b3e3b78 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -2,8 +2,12 @@ package plugin import ( "bufio" + "context" + "crypto/subtle" + "crypto/tls" "errors" "fmt" + "hash" "io" "io/ioutil" "log" @@ -17,6 +21,8 @@ import ( "sync/atomic" "time" "unicode" + + hclog "github.com/hashicorp/go-hclog" ) // If this is 1, then we've called CleanupClients. This can be used @@ -35,6 +41,22 @@ var ( // ErrProcessNotFound is returned when a client is instantiated to // reattach to an existing process and it isn't found. ErrProcessNotFound = errors.New("Reattachment process not found") + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") ) // Client handles the lifecycle of a plugin application. It launches @@ -55,7 +77,10 @@ type Client struct { l sync.Mutex address net.Addr process *os.Process - client *RPCClient + client ClientProtocol + protocol Protocol + logger hclog.Logger + doneCtx context.Context } // ClientConfig is the configuration used to initialize a new @@ -79,6 +104,13 @@ type ClientConfig struct { Cmd *exec.Cmd Reattach *ReattachConfig + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + // Managed represents if the client should be managed by the // plugin package or not. If true, then by calling CleanupClients, // it will automatically be cleaned up. Otherwise, the client @@ -109,14 +141,74 @@ type ClientConfig struct { // sync any of these streams. SyncStdout io.Writer SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol + + // Logger is the logger that the client will used. If none is provided, + // it will default to hclog's default logger. + Logger hclog.Logger } // ReattachConfig is used to configure a client to reattach to an // already-running plugin process. You can retrieve this information by // calling ReattachConfig on Client. type ReattachConfig struct { - Addr net.Addr - Pid int + Protocol Protocol + Addr net.Addr + Pid int +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil } // This makes sure all the managed subprocesses are killed and properly @@ -174,7 +266,22 @@ func NewClient(config *ClientConfig) (c *Client) { config.SyncStderr = ioutil.Discard } - c = &Client{config: config} + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: hclog.Trace, + Name: "plugin", + }) + } + + c = &Client{ + config: config, + logger: config.Logger, + } if config.Managed { managedClientsLock.Lock() managedClients = append(managedClients, c) @@ -184,11 +291,11 @@ func NewClient(config *ClientConfig) (c *Client) { return } -// Client returns an RPC client for the plugin. +// Client returns the protocol client for this connection. // -// Subsequent calls to this will return the same RPC client. -func (c *Client) Client() (*RPCClient, error) { - addr, err := c.Start() +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() if err != nil { return nil, err } @@ -200,29 +307,18 @@ func (c *Client) Client() (*RPCClient, error) { return c.client, nil } - // Connect to the client - conn, err := net.Dial(addr.Network(), addr.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) - } + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) - // Create the actual RPC client - c.client, err = NewRPCClient(conn, c.config.Plugins) - if err != nil { - conn.Close() - return nil, err + case ProtocolGRPC: + c.client, err = newGRPCClient(c.doneCtx, c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) } - // Begin the stream syncing so that stdin, out, err work properly - err = c.client.SyncStreams( - c.config.SyncStdout, - c.config.SyncStderr) if err != nil { - c.client.Close() c.client = nil return nil, err } @@ -274,8 +370,7 @@ func (c *Client) Kill() { if err != nil { // If there was an error just log it. We're going to force // kill in a moment anyways. - log.Printf( - "[WARN] plugin: error closing client during Kill: %s", err) + c.logger.Warn("error closing client during Kill", "err", err) } } } @@ -318,13 +413,21 @@ func (c *Client) Start() (addr net.Addr, err error) { { cmdSet := c.config.Cmd != nil attachSet := c.config.Reattach != nil + secureSet := c.config.SecureConfig != nil if cmdSet == attachSet { return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") } + + if secureSet && attachSet { + return nil, ErrSecureConfigAndReattach + } } // Create the logging channel for when we kill c.doneLogging = make(chan struct{}) + // Create a context for when we kill + var ctxCancel context.CancelFunc + c.doneCtx, ctxCancel = context.WithCancel(context.Background()) if c.config.Reattach != nil { // Verify the process still exists. If not, then it is an error @@ -350,7 +453,7 @@ func (c *Client) Start() (addr net.Addr, err error) { pidWait(pid) // Log so we can see it - log.Printf("[DEBUG] plugin: reattached plugin process exited\n") + c.logger.Debug("reattached plugin process exited") // Mark it c.l.Lock() @@ -359,11 +462,19 @@ func (c *Client) Start() (addr net.Addr, err error) { // Close the logging channel since that doesn't work on reattach close(c.doneLogging) + + // Cancel the context + ctxCancel() }(p.Pid) // Set the address and process c.address = c.config.Reattach.Addr c.process = p + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } return c.address, nil } @@ -384,7 +495,15 @@ func (c *Client) Start() (addr net.Addr, err error) { cmd.Stderr = stderr_w cmd.Stdout = stdout_w - log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args) + if c.config.SecureConfig != nil { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) err = cmd.Start() if err != nil { return @@ -418,12 +537,15 @@ func (c *Client) Start() (addr net.Addr, err error) { cmd.Wait() // Log and make sure to flush the logs write away - log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path) + c.logger.Debug("plugin process exited", "path", cmd.Path) os.Stderr.Sync() // Mark that we exited close(exitCh) + // Cancel the context, marking that we exited + ctxCancel() + // Set that we exited, which takes a lock c.l.Lock() defer c.l.Unlock() @@ -465,7 +587,7 @@ func (c *Client) Start() (addr net.Addr, err error) { timeout := time.After(c.config.StartTimeout) // Start looking for the address - log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path) + c.logger.Debug("waiting for RPC address", "path", cmd.Path) select { case <-timeout: err = errors.New("timeout while waiting for plugin to start") @@ -475,7 +597,7 @@ func (c *Client) Start() (addr net.Addr, err error) { // Trim the line and split by "|" in order to get the parts of // the output. line := strings.TrimSpace(string(lineBytes)) - parts := strings.SplitN(line, "|", 4) + parts := strings.SplitN(line, "|", 6) if len(parts) < 4 { err = fmt.Errorf( "Unrecognized remote plugin message: %s\n\n"+ @@ -495,7 +617,7 @@ func (c *Client) Start() (addr net.Addr, err error) { if int(coreProtocol) != CoreProtocolVersion { err = fmt.Errorf("Incompatible core API version with plugin. "+ - "Plugin version: %s, Ours: %d\n\n"+ + "Plugin version: %s, Core version: %d\n\n"+ "To fix this, the plugin usually only needs to be recompiled.\n"+ "Please report this to the plugin author.", parts[0], CoreProtocolVersion) return @@ -513,7 +635,7 @@ func (c *Client) Start() (addr net.Addr, err error) { // Test the API version if uint(protocol) != c.config.ProtocolVersion { err = fmt.Errorf("Incompatible API version with plugin. "+ - "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion) + "Plugin version: %s, Core version: %d", parts[1], c.config.ProtocolVersion) return } @@ -525,6 +647,27 @@ func (c *Client) Start() (addr net.Addr, err error) { default: err = fmt.Errorf("Unknown address type: %s", parts[3]) } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return + } + } c.address = addr @@ -555,9 +698,57 @@ func (c *Client) ReattachConfig() *ReattachConfig { } return &ReattachConfig{ - Addr: c.address, - Pid: c.config.Cmd.Process.Pid, + Protocol: c.protocol, + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { + return func(_ string, _ time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + return conn, nil + } +} + +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + conn, err := netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil } func (c *Client) logStderr(r io.Reader) { @@ -566,9 +757,31 @@ func (c *Client) logStderr(r io.Reader) { line, err := bufR.ReadString('\n') if line != "" { c.config.Stderr.Write([]byte(line)) - line = strings.TrimRightFunc(line, unicode.IsSpace) - log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line) + + l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + l.Debug(line) + } else { + out := flattenKVPairs(entry.KVPairs) + + l = l.With("timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + } + } } if err == io.EOF { diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go new file mode 100644 index 0000000..49fd21c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -0,0 +1,455 @@ +package plugin + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/oklog/run" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// streamer interface is used in the broker to send/receive connection +// information. +type streamer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + Close() +} + +// sendErr is used to pass errors back during a send. +type sendErr struct { + i *ConnInfo + ch chan error +} + +// gRPCBrokerServer is used by the plugin to start a stream and to send +// connection information to/from the plugin. Implements GRPCBrokerServer and +// streamer interfaces. +type gRPCBrokerServer struct { + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerServer() *gRPCBrokerServer { + return &gRPCBrokerServer{ + send: make(chan *sendErr), + recv: make(chan *ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerServer interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the client. +func (s *gRPCBrokerServer) StartStream(stream GRPCBroker_StartStreamServer) error { + doneCh := stream.Context().Done() + defer s.Close() + + // Proccess send stream + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + // Process receive stream + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the client. +func (s *gRPCBrokerServer) Send(i *ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the client from the stream to the broker. +func (s *gRPCBrokerServer) Recv() (*ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerServer) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// gRPCBrokerClientImpl is used by the client to start a stream and to send +// connection information to/from the client. Implements GRPCBrokerClient and +// streamer interfaces. +type gRPCBrokerClientImpl struct { + // client is the underlying GRPC client used to make calls to the server. + client GRPCBrokerClient + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { + return &gRPCBrokerClientImpl{ + client: NewGRPCBrokerClient(conn), + send: make(chan *sendErr), + recv: make(chan *ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerClient interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the plugin. +func (s *gRPCBrokerClientImpl) StartStream() error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + defer s.Close() + + stream, err := s.client.StartStream(ctx) + if err != nil { + return err + } + doneCh := stream.Context().Done() + + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the plugin. +func (s *gRPCBrokerClientImpl) Send(i *ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the plugin to the broker. +func (s *gRPCBrokerClientImpl) Recv() (*ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerClientImpl) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// GRPCBroker is responsible for brokering connections by unique ID. +// +// It is used by plugins to create multiple gRPC connections and data +// streams between the plugin process and the host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type GRPCBroker struct { + nextId uint32 + streamer streamer + streams map[uint32]*gRPCBrokerPending + tls *tls.Config + doneCh chan struct{} + o sync.Once + + sync.Mutex +} + +type gRPCBrokerPending struct { + ch chan *ConnInfo + doneCh chan struct{} +} + +func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { + return &GRPCBroker{ + streamer: s, + streams: make(map[uint32]*gRPCBrokerPending), + tls: tls, + doneCh: make(chan struct{}), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + listener, err := serverListener() + if err != nil { + return nil, err + } + + err = b.streamer.Send(&ConnInfo{ + ServiceId: id, + Network: listener.Addr().Network(), + Address: listener.Addr().String(), + }) + if err != nil { + return nil, err + } + + return listener, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve a gRPC server on that stream ID. This is used to easily serve +// complex arguments. Each AcceptAndServe call opens a new listener socket and +// sends the connection info down the stream to the dialer. Since a new +// connection is opened every call, these calls should be used sparingly. +// Multiple gRPC server implementations can be registered to a single +// AcceptAndServe call. +func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { + listener, err := b.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + defer listener.Close() + + var opts []grpc.ServerOption + if b.tls != nil { + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} + } + + server := s(opts) + + // Here we use a run group to close this goroutine if the server is shutdown + // or the broker is shutdown. + var g run.Group + { + // Serve on the listener, if shutting down call GracefulStop. + g.Add(func() error { + return server.Serve(listener) + }, func(err error) { + server.GracefulStop() + }) + } + { + // block on the closeCh or the doneCh. If we are shutting down close the + // closeCh. + closeCh := make(chan struct{}) + g.Add(func() error { + select { + case <-b.doneCh: + case <-closeCh: + } + return nil + }, func(err error) { + close(closeCh) + }) + } + + // Block until we are done + g.Run() +} + +// Close closes the stream and all servers. +func (b *GRPCBroker) Close() error { + b.streamer.Close() + b.o.Do(func() { + close(b.doneCh) + }) + return nil +} + +// Dial opens a connection by ID. +func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + var c *ConnInfo + + // Open the stream + p := b.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + return nil, fmt.Errorf("timeout waiting for connection info") + } + + var addr net.Addr + switch c.Network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", c.Address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", c.Address) + default: + err = fmt.Errorf("Unknown address type: %s", c.Address) + } + if err != nil { + return nil, err + } + + return dialGRPCConn(b.tls, netAddrDialer(addr)) +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of calls. In practice +// we've never seen it happen. +func (m *GRPCBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of GRPCBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *GRPCBroker) Run() { + for { + stream, err := m.streamer.Recv() + if err != nil { + // Once we receive an error, just exit + break + } + + // Initialize the waiter + p := m.getStream(stream.ServiceId) + select { + case p.ch <- stream: + default: + } + + go m.timeoutWait(stream.ServiceId, p) + } +} + +func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &gRPCBrokerPending{ + ch: make(chan *ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go new file mode 100644 index 0000000..d490daf --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go @@ -0,0 +1,190 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_broker.proto + +/* +Package plugin is a generated protocol buffer package. + +It is generated from these files: + grpc_broker.proto + +It has these top-level messages: + ConnInfo +*/ +package plugin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ConnInfo struct { + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address" json:"address,omitempty"` +} + +func (m *ConnInfo) Reset() { *m = ConnInfo{} } +func (m *ConnInfo) String() string { return proto.CompactTextString(m) } +func (*ConnInfo) ProtoMessage() {} +func (*ConnInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *ConnInfo) GetServiceId() uint32 { + if m != nil { + return m.ServiceId + } + return 0 +} + +func (m *ConnInfo) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *ConnInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func init() { + proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for GRPCBroker service + +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], c.cc, "/plugin.GRPCBroker/StartStream", opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for GRPCBroker service + +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { + s.RegisterService(&_GRPCBroker_serviceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_broker.proto", +} + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 170 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x93, 0xd8, 0xc0, 0x4e, 0x36, 0x06, 0x04, 0x00, 0x00, + 0xff, 0xff, 0x7b, 0x5d, 0xfb, 0xe1, 0xc7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/grpc_broker.proto new file mode 100644 index 0000000..f578348 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; +package plugin; + +message ConnInfo { + uint32 service_id = 1; + string network = 2; + string address = 3; +} + +service GRPCBroker { + rpc StartStream(stream ConnInfo) returns (stream ConnInfo); +} + + diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 0000000..44294d0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,107 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "net" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0, 5) + + // We use a custom dialer so that we can connect over unix domain sockets + opts = append(opts, grpc.WithDialer(dialer)) + + // go-plugin expects to block the connection + opts = append(opts, grpc.WithBlock()) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if tls == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(tls))) + } + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return conn, nil +} + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer) + if err != nil { + return nil, err + } + + // Start the broker. + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig) + go broker.Run() + go brokerGRPCClient.StartStream() + + return &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + doneCtx: doneCtx, + broker: broker, + }, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin + + doneCtx context.Context + broker *GRPCBroker +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + c.broker.Close() + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.doneCtx, c.broker, c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 0000000..3a72739 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,132 @@ +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register the broker service + brokerServer := newGRPCBrokerServer() + RegisterGRPCBrokerServer(s.server, brokerServer) + s.broker = newGRPCBroker(brokerServer, s.TLS) + go s.broker.Run() + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatible plugin", k) + } + + if err := p.GRPCServer(s.broker, s.server); err != nil { + return fmt.Errorf("error registring %q: %s", k, err) + } + } + + return nil +} + +// Stop calls Stop on the underlying grpc.Server +func (s *GRPCServer) Stop() { + s.server.Stop() +} + +// GracefulStop calls GracefulStop on the underlying grpc.Server +func (s *GRPCServer) GracefulStop() { + s.server.GracefulStop() +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + // Start serving in a goroutine + go s.server.Serve(lis) + + // Wait until graceful completion + <-s.DoneCh +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go new file mode 100644 index 0000000..2996c14 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "encoding/json" + "time" +) + +// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host +type logEntry struct { + Message string `json:"@message"` + Level string `json:"@level"` + Timestamp time.Time `json:"timestamp"` + KVPairs []*logEntryKV `json:"kv_pairs"` +} + +// logEntryKV is a key value pair within the Output payload +type logEntryKV struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// flattenKVPairs is used to flatten KVPair slice into []interface{} +// for hclog consumption. +func flattenKVPairs(kvs []*logEntryKV) []interface{} { + var result []interface{} + for _, kv := range kvs { + result = append(result, kv.Key) + result = append(result, kv.Value) + } + + return result +} + +// parseJSON handles parsing JSON output +func parseJSON(input string) (*logEntry, error) { + var raw map[string]interface{} + entry := &logEntry{} + + err := json.Unmarshal([]byte(input), &raw) + if err != nil { + return nil, err + } + + // Parse hclog-specific objects + if v, ok := raw["@message"]; ok { + entry.Message = v.(string) + delete(raw, "@message") + } + + if v, ok := raw["@level"]; ok { + entry.Level = v.(string) + delete(raw, "@level") + } + + if v, ok := raw["@timestamp"]; ok { + t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) + if err != nil { + return nil, err + } + entry.Timestamp = t + delete(raw, "@timestamp") + } + + // Parse dynamic KV args from the hclog payload. + for k, v := range raw { + entry.KVPairs = append(entry.KVPairs, &logEntryKV{ + Key: k, + Value: v, + }) + } + + return entry, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go index 37c8fd6..79d9674 100644 --- a/vendor/github.com/hashicorp/go-plugin/plugin.go +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -9,7 +9,11 @@ package plugin import ( + "context" + "errors" "net/rpc" + + "google.golang.org/grpc" ) // Plugin is the interface that is implemented to serve/connect to an @@ -23,3 +27,32 @@ type Plugin interface { // serving that communicates to the server end of the plugin. Client(*MuxBroker, *rpc.Client) (interface{}, error) } + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*GRPCBroker, *grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. The provided context will be canceled by + // go-plugin in the event of the plugin process exiting. + GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 0000000..0cfc19e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go index 29f9bf0..f30a4b1 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -1,6 +1,7 @@ package plugin import ( + "crypto/tls" "fmt" "io" "net" @@ -19,6 +20,42 @@ type RPCClient struct { stdout, stderr net.Conn } +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + // NewRPCClient creates a client from an already-open connection-like value. // Dial is typically used instead. func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { @@ -121,3 +158,13 @@ func (c *RPCClient) Dispense(name string) (interface{}, error) { return p.Client(c.broker, rpc.NewClient(conn)) } + +// Ping pings the connection to ensure it is still alive. +// +// The error from the RPC call is returned exactly if you want to inspect +// it for further error analysis. Any error returned from here would indicate +// that the connection to the plugin is not healthy. +func (c *RPCClient) Ping() error { + var empty struct{} + return c.control.Call("Control.Ping", true, &empty) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go index 3984dc8..5bb18dd 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -34,10 +34,14 @@ type RPCServer struct { lock sync.Mutex } -// Accept accepts connections on a listener and serves requests for -// each incoming connection. Accept blocks; the caller typically invokes -// it in a go statement. -func (s *RPCServer) Accept(lis net.Listener) { +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { for { conn, err := lis.Accept() if err != nil { @@ -122,6 +126,14 @@ type controlServer struct { server *RPCServer } +// Ping can be called to verify the connection (and likely the binary) +// is still alive to a plugin. +func (c *controlServer) Ping( + null bool, response *struct{}) error { + *response = struct{}{} + return nil +} + func (c *controlServer) Quit( null bool, response *struct{}) error { // End the server diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index b5c5270..1e808b9 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -1,6 +1,8 @@ package plugin import ( + "crypto/tls" + "encoding/base64" "errors" "fmt" "io/ioutil" @@ -11,6 +13,10 @@ import ( "runtime" "strconv" "sync/atomic" + + "github.com/hashicorp/go-hclog" + + "google.golang.org/grpc" ) // CoreProtocolVersion is the ProtocolVersion of the plugin system itself. @@ -45,14 +51,41 @@ type ServeConfig struct { // HandshakeConfig is the configuration that must match clients. HandshakeConfig + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + // Plugins are the plugins that are served. Plugins map[string]Plugin + + // GRPCServer should be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger +} + +// Protocol returns the protocol that this server should speak. +func (c *ServeConfig) Protocol() Protocol { + result := ProtocolNetRPC + if c.GRPCServer != nil { + result = ProtocolGRPC + } + + return result } // Serve serves the plugins given by ServeConfig. // // Serve doesn't return until the plugin is done being executed. Any -// errors will be outputted to the log. +// errors will be outputted to os.Stderr. // // This is the method that plugins should call in their main() functions. func Serve(opts *ServeConfig) { @@ -77,6 +110,16 @@ func Serve(opts *ServeConfig) { // Logging goes to the original stderr log.SetOutput(os.Stderr) + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + // Create our new stdout, stderr files. These will override our built-in // stdout/stderr so that it works across the stream boundary. stdout_r, stdout_w, err := os.Pipe() @@ -93,30 +136,86 @@ func Serve(opts *ServeConfig) { // Register a listener so we can accept a connection listener, err := serverListener() if err != nil { - log.Printf("[ERR] plugin: plugin init: %s", err) + logger.Error("plugin init error", "error", err) return } - defer listener.Close() + + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config + if opts.TLSProvider != nil { + tlsConfig, err = opts.TLSProvider() + if err != nil { + logger.Error("plugin tls init", "error", err) + return + } + } // Create the channel to tell us when we're done doneCh := make(chan struct{}) - // Create the RPC server to dispense - server := &RPCServer{ - Plugins: opts.Plugins, - Stdout: stdout_r, - Stderr: stderr_r, - DoneCh: doneCh, + // Build the server type + var server ServerProtocol + switch opts.Protocol() { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: opts.Plugins, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + // Create the gRPC server + server = &GRPCServer{ + Plugins: opts.Plugins, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + default: + panic("unknown server protocol: " + opts.Protocol()) } + // Initialize the servers + if err := server.Init(); err != nil { + logger.Error("protocol init", "error", err) + return + } + + // Build the extra configuration + extra := "" + if v := server.Config(); v != "" { + extra = base64.StdEncoding.EncodeToString([]byte(v)) + } + if extra != "" { + extra = "|" + extra + } + + logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) + // Output the address and service name to stdout so that core can bring it up. - log.Printf("[DEBUG] plugin: plugin address: %s %s\n", - listener.Addr().Network(), listener.Addr().String()) - fmt.Printf("%d|%d|%s|%s\n", + fmt.Printf("%d|%d|%s|%s|%s%s\n", CoreProtocolVersion, opts.ProtocolVersion, listener.Addr().Network(), - listener.Addr().String()) + listener.Addr().String(), + opts.Protocol(), + extra) os.Stdout.Sync() // Eat the interrupts @@ -127,9 +226,7 @@ func Serve(opts *ServeConfig) { for { <-ch newCount := atomic.AddInt32(&count, 1) - log.Printf( - "[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.", - newCount) + logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) } }() @@ -137,10 +234,8 @@ func Serve(opts *ServeConfig) { os.Stdout = stdout_w os.Stderr = stderr_w - // Serve - go server.Accept(listener) - - // Wait for the graceful exit + // Accept connections and wait for completion + go server.Serve(listener) <-doneCh } diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go index 9086a1b..df29593 100644 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -2,9 +2,12 @@ package plugin import ( "bytes" + "context" "net" "net/rpc" - "testing" + + "github.com/mitchellh/go-testing-interface" + "google.golang.org/grpc" ) // The testing file contains test helpers that you can use outside of @@ -12,7 +15,7 @@ import ( // TestConn is a helper function for returning a client and server // net.Conn connected to each other. -func TestConn(t *testing.T) (net.Conn, net.Conn) { +func TestConn(t testing.T) (net.Conn, net.Conn) { // Listen to any local port. This listener will be closed // after a single connection is established. l, err := net.Listen("tcp", "127.0.0.1:0") @@ -46,7 +49,7 @@ func TestConn(t *testing.T) (net.Conn, net.Conn) { } // TestRPCConn returns a rpc client and server connected to each other. -func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) { +func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { clientConn, serverConn := TestConn(t) server := rpc.NewServer() @@ -58,7 +61,7 @@ func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) { // TestPluginRPCConn returns a plugin RPC client and server that are connected // together and configured. -func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { +func TestPluginRPCConn(t testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { // Create two net.Conns we can use to shuttle our control connection clientConn, serverConn := TestConn(t) @@ -74,3 +77,78 @@ func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServ return client, server } + +// TestGRPCConn returns a gRPC client conn and grpc server that are connected +// together and configured. The register function is used to register services +// prior to the Serve call. This is used to test gRPC connections. +func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := grpc.NewServer() + register(server) + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + return conn, server +} + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start up the server + server := &GRPCServer{ + Plugins: ps, + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, nil) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Create the client + client := &GRPCClient{ + Conn: conn, + Plugins: ps, + broker: broker, + doneCtx: context.Background(), + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/go-safetemp/LICENSE b/vendor/github.com/hashicorp/go-safetemp/LICENSE new file mode 100644 index 0000000..be2cc4d --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-safetemp/README.md b/vendor/github.com/hashicorp/go-safetemp/README.md new file mode 100644 index 0000000..02ece33 --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/README.md @@ -0,0 +1,10 @@ +# go-safetemp +[![Godoc](https://godoc.org/github.com/hashcorp/go-safetemp?status.svg)](https://godoc.org/github.com/hashicorp/go-safetemp) + +Functions for safely working with temporary directories and files. + +## Why? + +The Go standard library provides the excellent `ioutil` package for +working with temporary directories and files. This library builds on top +of that to provide safe abstractions above that. diff --git a/vendor/github.com/hashicorp/go-safetemp/safetemp.go b/vendor/github.com/hashicorp/go-safetemp/safetemp.go new file mode 100644 index 0000000..c4ae72b --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/safetemp.go @@ -0,0 +1,40 @@ +package safetemp + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// Dir creates a new temporary directory that isn't yet created. This +// can be used with calls that expect a non-existent directory. +// +// The directory is created as a child of a temporary directory created +// within the directory dir starting with prefix. The temporary directory +// returned is always named "temp". The parent directory has the specified +// prefix. +// +// The returned io.Closer should be used to clean up the returned directory. +// This will properly remove the returned directory and any other temporary +// files created. +// +// If an error is returned, the Closer does not need to be called (and will +// be nil). +func Dir(dir, prefix string) (string, io.Closer, error) { + // Create the temporary directory + td, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", nil, err + } + + return filepath.Join(td, "temp"), pathCloser(td), nil +} + +// pathCloser implements io.Closer to remove the given path on Close. +type pathCloser string + +// Close deletes this path. +func (p pathCloser) Close() error { + return os.RemoveAll(string(p)) +} diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml new file mode 100644 index 0000000..7698490 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml @@ -0,0 +1,12 @@ +language: go + +sudo: false + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +script: + - go test -bench . -benchmem -v ./... diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md index 21fdda4..fbde8b9 100644 --- a/vendor/github.com/hashicorp/go-uuid/README.md +++ b/vendor/github.com/hashicorp/go-uuid/README.md @@ -1,6 +1,6 @@ -# uuid +# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) -Generates UUID-format strings using purely high quality random bytes. +Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes. Documentation ============= diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod new file mode 100644 index 0000000..dd57f9d --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-uuid diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go index 322b522..ff9364c 100644 --- a/vendor/github.com/hashicorp/go-uuid/uuid.go +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -6,13 +6,21 @@ import ( "fmt" ) -// GenerateUUID is used to generate a random UUID -func GenerateUUID() (string, error) { - buf := make([]byte, 16) +// GenerateRandomBytes is used to generate random bytes of given size. +func GenerateRandomBytes(size int) ([]byte, error) { + buf := make([]byte, size) if _, err := rand.Read(buf); err != nil { - return "", fmt.Errorf("failed to read random bytes: %v", err) + return nil, fmt.Errorf("failed to read random bytes: %v", err) } + return buf, nil +} +// GenerateUUID is used to generate a random UUID +func GenerateUUID() (string, error) { + buf, err := GenerateRandomBytes(16) + if err != nil { + return "", err + } return FormatUUID(buf) } diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml index 9f30eec..542ca8b 100644 --- a/vendor/github.com/hashicorp/go-version/.travis.yml +++ b/vendor/github.com/hashicorp/go-version/.travis.yml @@ -6,6 +6,8 @@ go: - 1.2 - 1.3 - 1.4 + - 1.9 + - "1.10" script: - go test diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index 8c73df0..d055759 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -2,6 +2,7 @@ package version import ( "fmt" + "reflect" "regexp" "strings" ) @@ -113,6 +114,26 @@ func parseSingle(v string) (*Constraint, error) { }, nil } +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + //------------------------------------------------------------------- // Constraint functions //------------------------------------------------------------------- @@ -126,22 +147,27 @@ func constraintNotEqual(v, c *Version) bool { } func constraintGreaterThan(v, c *Version) bool { - return v.Compare(c) == 1 + return prereleaseCheck(v, c) && v.Compare(c) == 1 } func constraintLessThan(v, c *Version) bool { - return v.Compare(c) == -1 + return prereleaseCheck(v, c) && v.Compare(c) == -1 } func constraintGreaterThanEqual(v, c *Version) bool { - return v.Compare(c) >= 0 + return prereleaseCheck(v, c) && v.Compare(c) >= 0 } func constraintLessThanEqual(v, c *Version) bool { - return v.Compare(c) <= 0 + return prereleaseCheck(v, c) && v.Compare(c) <= 0 } func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + // If the version being checked is naturally less than the constraint, then there // is no way for the version to be valid against the constraint if v.LessThan(c) { diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod new file mode 100644 index 0000000..f528555 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index ae2f6b6..4d1e6e2 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -15,8 +15,8 @@ var versionRegexp *regexp.Regexp // The raw regular expression string used for testing the validity // of a version. const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + `?` // Version represents a single version. @@ -25,6 +25,7 @@ type Version struct { pre string segments []int64 si int + original string } func init() { @@ -59,11 +60,17 @@ func NewVersion(v string) (*Version, error) { segments = append(segments, 0) } + pre := matches[7] + if pre == "" { + pre = matches[4] + } + return &Version{ - metadata: matches[7], - pre: matches[4], + metadata: matches[10], + pre: pre, segments: segments, si: si, + original: v, }, nil } @@ -166,24 +173,42 @@ func comparePart(preSelf string, preOther string) int { return 0 } + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + // if a part is empty, we use the other to decide if preSelf == "" { - _, notIsNumeric := strconv.ParseInt(preOther, 10, 64) - if notIsNumeric == nil { + if otherNumeric { return -1 } return 1 } if preOther == "" { - _, notIsNumeric := strconv.ParseInt(preSelf, 10, 64) - if notIsNumeric == nil { + if selfNumeric { return 1 } return -1 } - if preSelf > preOther { + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { return 1 } @@ -283,11 +308,19 @@ func (v *Version) Segments() []int { // for a version "1.2.3-beta", segments will return a slice of // 1, 2, 3. func (v *Version) Segments64() []int64 { - return v.segments + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result } // String returns the full version string included pre-release // and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. func (v *Version) String() string { var buf bytes.Buffer fmtParts := make([]string, len(v.segments)) @@ -306,3 +339,9 @@ func (v *Version) String() string { return buf.String() } + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} diff --git a/vendor/github.com/hashicorp/hcl2/LICENSE b/vendor/github.com/hashicorp/hcl2/LICENSE new file mode 100644 index 0000000..82b4de9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/decode.go b/vendor/github.com/hashicorp/hcl2/gohcl/decode.go new file mode 100644 index 0000000..3a149a8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/decode.go @@ -0,0 +1,304 @@ +package gohcl + +import ( + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" +) + +// DecodeBody extracts the configuration within the given body into the given +// value. This value must be a non-nil pointer to either a struct or +// a map, where in the former case the configuration will be decoded using +// struct tags and in the latter case only attributes are allowed and their +// values are decoded into the map. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + rv := reflect.ValueOf(val) + if rv.Kind() != reflect.Ptr { + panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) + } + + return decodeBodyToValue(body, ctx, rv.Elem()) +} + +func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + et := val.Type() + switch et.Kind() { + case reflect.Struct: + return decodeBodyToStruct(body, ctx, val) + case reflect.Map: + return decodeBodyToMap(body, ctx, val) + default: + panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) + } +} + +func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + schema, partial := ImpliedBodySchema(val.Interface()) + + var content *hcl.BodyContent + var leftovers hcl.Body + var diags hcl.Diagnostics + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + if content == nil { + return diags + } + + tags := getFieldTags(val.Type()) + + if tags.Remain != nil { + fieldIdx := *tags.Remain + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + switch { + case bodyType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(leftovers)) + case attrsType.AssignableTo(field.Type): + attrs, attrsDiags := leftovers.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + fieldV.Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...) + } + } + + for name, fieldIdx := range tags.Attributes { + attr := content.Attributes[name] + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + + if attr == nil { + if !exprType.AssignableTo(field.Type) { + continue + } + + // As a special case, if the target is of type hcl.Expression then + // we'll assign an actual expression that evalues to a cty null, + // so the caller can deal with it within the cty realm rather + // than within the Go realm. + synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange()) + fieldV.Set(reflect.ValueOf(synthExpr)) + continue + } + + switch { + case attrType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr)) + case exprType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr.Expr)) + default: + diags = append(diags, DecodeExpression( + attr.Expr, ctx, fieldV.Addr().Interface(), + )...) + } + } + + blocksByType := content.Blocks.ByType() + + for typeName, fieldIdx := range tags.Blocks { + blocks := blocksByType[typeName] + field := val.Type().Field(fieldIdx) + + ty := field.Type + isSlice := false + isPtr := false + if ty.Kind() == reflect.Slice { + isSlice = true + ty = ty.Elem() + } + if ty.Kind() == reflect.Ptr { + isPtr = true + ty = ty.Elem() + } + + if len(blocks) > 1 && !isSlice { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", typeName), + Detail: fmt.Sprintf( + "Only one %s block is allowed. Another was defined at %s.", + typeName, blocks[0].DefRange.String(), + ), + Subject: &blocks[1].DefRange, + }) + continue + } + + if len(blocks) == 0 { + if isSlice || isPtr { + val.Field(fieldIdx).Set(reflect.Zero(field.Type)) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", typeName), + Detail: fmt.Sprintf("A %s block is required.", typeName), + Subject: body.MissingItemRange().Ptr(), + }) + } + continue + } + + switch { + + case isSlice: + elemType := ty + if isPtr { + elemType = reflect.PtrTo(ty) + } + sli := reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks)) + + for i, block := range blocks { + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + sli.Index(i).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...) + } + } + + val.Field(fieldIdx).Set(sli) + + default: + block := blocks[0] + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + val.Field(fieldIdx).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) + } + + } + + } + + return diags +} + +func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + attrs, diags := body.JustAttributes() + if attrs == nil { + return diags + } + + mv := reflect.MakeMap(v.Type()) + + for k, attr := range attrs { + switch { + case attrType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr)) + case exprType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr)) + default: + ev := reflect.New(v.Type().Elem()) + diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...) + mv.SetMapIndex(reflect.ValueOf(k), ev.Elem()) + } + } + + v.Set(mv) + + return diags +} + +func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + var diags hcl.Diagnostics + + ty := v.Type() + + switch { + case blockType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block)) + case bodyType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block.Body)) + case attrsType.AssignableTo(ty): + attrs, attrsDiags := block.Body.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + v.Elem().Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...) + + if len(block.Labels) > 0 { + blockTags := getFieldTags(ty) + for li, lv := range block.Labels { + lfieldIdx := blockTags.Labels[li].FieldIndex + v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) + } + } + + } + + return diags +} + +// DecodeExpression extracts the value of the given expression into the given +// value. This value must be something that gocty is able to decode into, +// since the final decoding is delegated to that package. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + srcVal, diags := expr.Value(ctx) + + convTy, err := gocty.ImpliedType(val) + if err != nil { + panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err)) + } + + srcVal, err = convert.Convert(srcVal, convTy) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + return diags + } + + err = gocty.FromCtyValue(srcVal, val) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go new file mode 100644 index 0000000..8500214 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go @@ -0,0 +1,49 @@ +// Package gohcl allows decoding HCL configurations into Go data structures. +// +// It provides a convenient and concise way of describing the schema for +// configuration and then accessing the resulting data via native Go +// types. +// +// A struct field tag scheme is used, similar to other decoding and +// unmarshalling libraries. The tags are formatted as in the following example: +// +// ThingType string `hcl:"thing_type,attr"` +// +// Within each tag there are two comma-separated tokens. The first is the +// name of the corresponding construct in configuration, while the second +// is a keyword giving the kind of construct expected. The following +// kind keywords are supported: +// +// attr (the default) indicates that the value is to be populated from an attribute +// block indicates that the value is to populated from a block +// label indicates that the value is to populated from a block label +// remain indicates that the value is to be populated from the remaining body after populating other fields +// +// "attr" fields may either be of type *hcl.Expression, in which case the raw +// expression is assigned, or of any type accepted by gocty, in which case +// gocty will be used to assign the value to a native Go type. +// +// "block" fields may be of type *hcl.Block or hcl.Body, in which case the +// corresponding raw value is assigned, or may be a struct that recursively +// uses the same tags. Block fields may also be slices of any of these types, +// in which case multiple blocks of the corresponding type are decoded into +// the slice. +// +// "label" fields are considered only in a struct used as the type of a field +// marked as "block", and are used sequentially to capture the labels of +// the blocks being decoded. In this case, the name token is used only as +// an identifier for the label in diagnostic messages. +// +// "remain" can be placed on a single field that may be either of type +// hcl.Body or hcl.Attributes, in which case any remaining body content is +// placed into this field for delayed processing. If no "remain" field is +// present then any attributes or blocks not matched by another valid tag +// will cause an error diagnostic. +// +// Broadly-speaking this package deals with two types of error. The first is +// errors in the configuration itself, which are returned as diagnostics +// written with the configuration author as the target audience. The second +// is bugs in the calling program, such as invalid struct tags, which are +// surfaced via panics since there can be no useful runtime handling of such +// errors and they should certainly not be returned to the user as diagnostics. +package gohcl diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/schema.go b/vendor/github.com/hashicorp/hcl2/gohcl/schema.go new file mode 100644 index 0000000..88164cb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/schema.go @@ -0,0 +1,174 @@ +package gohcl + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the +// given value, which must be a struct value or a pointer to one. If an +// inappropriate value is passed, this function will panic. +// +// The second return argument indicates whether the given struct includes +// a "remain" field, and thus the returned schema is non-exhaustive. +// +// This uses the tags on the fields of the struct to discover how each +// field's value should be expressed within configuration. If an invalid +// mapping is attempted, this function will panic. +func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { + ty := reflect.TypeOf(val) + + if ty.Kind() == reflect.Ptr { + ty = ty.Elem() + } + + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("given value must be struct, not %T", val)) + } + + var attrSchemas []hcl.AttributeSchema + var blockSchemas []hcl.BlockHeaderSchema + + tags := getFieldTags(ty) + + attrNames := make([]string, 0, len(tags.Attributes)) + for n := range tags.Attributes { + attrNames = append(attrNames, n) + } + sort.Strings(attrNames) + for _, n := range attrNames { + idx := tags.Attributes[n] + optional := tags.Optional[n] + field := ty.Field(idx) + + var required bool + + switch { + case field.Type.AssignableTo(exprType): + // If we're decoding to hcl.Expression then absense can be + // indicated via a null value, so we don't specify that + // the field is required during decoding. + required = false + case field.Type.Kind() != reflect.Ptr && !optional: + required = true + default: + required = false + } + + attrSchemas = append(attrSchemas, hcl.AttributeSchema{ + Name: n, + Required: required, + }) + } + + blockNames := make([]string, 0, len(tags.Blocks)) + for n := range tags.Blocks { + blockNames = append(blockNames, n) + } + sort.Strings(blockNames) + for _, n := range blockNames { + idx := tags.Blocks[n] + field := ty.Field(idx) + fty := field.Type + if fty.Kind() == reflect.Slice { + fty = fty.Elem() + } + if fty.Kind() == reflect.Ptr { + fty = fty.Elem() + } + if fty.Kind() != reflect.Struct { + panic(fmt.Sprintf( + "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, + )) + } + ftags := getFieldTags(fty) + var labelNames []string + if len(ftags.Labels) > 0 { + labelNames = make([]string, len(ftags.Labels)) + for i, l := range ftags.Labels { + labelNames[i] = l.Name + } + } + + blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ + Type: n, + LabelNames: labelNames, + }) + } + + partial = tags.Remain != nil + schema = &hcl.BodySchema{ + Attributes: attrSchemas, + Blocks: blockSchemas, + } + return schema, partial +} + +type fieldTags struct { + Attributes map[string]int + Blocks map[string]int + Labels []labelField + Remain *int + Optional map[string]bool +} + +type labelField struct { + FieldIndex int + Name string +} + +func getFieldTags(ty reflect.Type) *fieldTags { + ret := &fieldTags{ + Attributes: map[string]int{}, + Blocks: map[string]int{}, + Optional: map[string]bool{}, + } + + ct := ty.NumField() + for i := 0; i < ct; i++ { + field := ty.Field(i) + tag := field.Tag.Get("hcl") + if tag == "" { + continue + } + + comma := strings.Index(tag, ",") + var name, kind string + if comma != -1 { + name = tag[:comma] + kind = tag[comma+1:] + } else { + name = tag + kind = "attr" + } + + switch kind { + case "attr": + ret.Attributes[name] = i + case "block": + ret.Blocks[name] = i + case "label": + ret.Labels = append(ret.Labels, labelField{ + FieldIndex: i, + Name: name, + }) + case "remain": + if ret.Remain != nil { + panic("only one 'remain' tag is permitted") + } + idx := i // copy, because this loop will continue assigning to i + ret.Remain = &idx + case "optional": + ret.Attributes[name] = i + ret.Optional[name] = true + default: + panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name)) + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/types.go b/vendor/github.com/hashicorp/hcl2/gohcl/types.go new file mode 100644 index 0000000..a94f275 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/types.go @@ -0,0 +1,16 @@ +package gohcl + +import ( + "reflect" + + "github.com/hashicorp/hcl2/hcl" +) + +var victimExpr hcl.Expression +var victimBody hcl.Body + +var exprType = reflect.TypeOf(&victimExpr).Elem() +var bodyType = reflect.TypeOf(&victimBody).Elem() +var blockType = reflect.TypeOf((*hcl.Block)(nil)) +var attrType = reflect.TypeOf((*hcl.Attribute)(nil)) +var attrsType = reflect.TypeOf(hcl.Attributes(nil)) diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go new file mode 100644 index 0000000..6ecf744 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go @@ -0,0 +1,103 @@ +package hcl + +import ( + "fmt" +) + +// DiagnosticSeverity represents the severity of a diagnostic. +type DiagnosticSeverity int + +const ( + // DiagInvalid is the invalid zero value of DiagnosticSeverity + DiagInvalid DiagnosticSeverity = iota + + // DiagError indicates that the problem reported by a diagnostic prevents + // further progress in parsing and/or evaluating the subject. + DiagError + + // DiagWarning indicates that the problem reported by a diagnostic warrants + // user attention but does not prevent further progress. It is most + // commonly used for showing deprecation notices. + DiagWarning +) + +// Diagnostic represents information to be presented to a user about an +// error or anomoly in parsing or evaluating configuration. +type Diagnostic struct { + Severity DiagnosticSeverity + + // Summary and detail contain the English-language description of the + // problem. Summary is a terse description of the general problem and + // detail is a more elaborate, often-multi-sentence description of + // the probem and what might be done to solve it. + Summary string + Detail string + Subject *Range + Context *Range +} + +// Diagnostics is a list of Diagnostic instances. +type Diagnostics []*Diagnostic + +// error implementation, so that diagnostics can be returned via APIs +// that normally deal in vanilla Go errors. +// +// This presents only minimal context about the error, for compatibility +// with usual expectations about how errors will present as strings. +func (d *Diagnostic) Error() string { + return fmt.Sprintf("%s: %s; %s", d.Subject, d.Summary, d.Detail) +} + +// error implementation, so that sets of diagnostics can be returned via +// APIs that normally deal in vanilla Go errors. +func (d Diagnostics) Error() string { + count := len(d) + switch { + case count == 0: + return "no diagnostics" + case count == 1: + return d[0].Error() + default: + return fmt.Sprintf("%s, and %d other diagnostic(s)", d[0].Error(), count-1) + } +} + +// Append appends a new error to a Diagnostics and return the whole Diagnostics. +// +// This is provided as a convenience for returning from a function that +// collects and then returns a set of diagnostics: +// +// return nil, diags.Append(&hcl.Diagnostic{ ... }) +// +// Note that this modifies the array underlying the diagnostics slice, so +// must be used carefully within a single codepath. It is incorrect (and rude) +// to extend a diagnostics created by a different subsystem. +func (d Diagnostics) Append(diag *Diagnostic) Diagnostics { + return append(d, diag) +} + +// Extend concatenates the given Diagnostics with the receiver and returns +// the whole new Diagnostics. +// +// This is similar to Append but accepts multiple diagnostics to add. It has +// all the same caveats and constraints. +func (d Diagnostics) Extend(diags Diagnostics) Diagnostics { + return append(d, diags...) +} + +// HasErrors returns true if the receiver contains any diagnostics of +// severity DiagError. +func (d Diagnostics) HasErrors() bool { + for _, diag := range d { + if diag.Severity == DiagError { + return true + } + } + return false +} + +// A DiagnosticWriter emits diagnostics somehow. +type DiagnosticWriter interface { + WriteDiagnostic(*Diagnostic) error + WriteDiagnostics(Diagnostics) error +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go new file mode 100644 index 0000000..dfa473a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go @@ -0,0 +1,168 @@ +package hcl + +import ( + "bufio" + "errors" + "fmt" + "io" + + wordwrap "github.com/mitchellh/go-wordwrap" +) + +type diagnosticTextWriter struct { + files map[string]*File + wr io.Writer + width uint + color bool +} + +// NewDiagnosticTextWriter creates a DiagnosticWriter that writes diagnostics +// to the given writer as formatted text. +// +// It is designed to produce text appropriate to print in a monospaced font +// in a terminal of a particular width, or optionally with no width limit. +// +// The given width may be zero to disable word-wrapping of the detail text +// and truncation of source code snippets. +// +// If color is set to true, the output will include VT100 escape sequences to +// color-code the severity indicators. It is suggested to turn this off if +// the target writer is not a terminal. +func NewDiagnosticTextWriter(wr io.Writer, files map[string]*File, width uint, color bool) DiagnosticWriter { + return &diagnosticTextWriter{ + files: files, + wr: wr, + width: width, + color: color, + } +} + +func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error { + if diag == nil { + return errors.New("nil diagnostic") + } + + var colorCode, highlightCode, resetCode string + if w.color { + switch diag.Severity { + case DiagError: + colorCode = "\x1b[31m" + case DiagWarning: + colorCode = "\x1b[33m" + } + resetCode = "\x1b[0m" + highlightCode = "\x1b[1;4m" + } + + var severityStr string + switch diag.Severity { + case DiagError: + severityStr = "Error" + case DiagWarning: + severityStr = "Warning" + default: + // should never happen + severityStr = "???????" + } + + fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary) + + if diag.Subject != nil { + snipRange := *diag.Subject + highlightRange := snipRange + if diag.Context != nil { + // Show enough of the source code to include both the subject + // and context ranges, which overlap in all reasonable + // situations. + snipRange = RangeOver(snipRange, *diag.Context) + } + // We can't illustrate an empty range, so we'll turn such ranges into + // single-character ranges, which might not be totally valid (may point + // off the end of a line, or off the end of the file) but are good + // enough for the bounds checks we do below. + if snipRange.Empty() { + snipRange.End.Byte++ + snipRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + file := w.files[diag.Subject.Filename] + if file == nil || file.Bytes == nil { + fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line) + } else { + + var contextLine string + if diag.Subject != nil { + contextLine = contextString(file, diag.Subject.Start.Byte) + if contextLine != "" { + contextLine = ", in " + contextLine + } + } + + fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine) + + src := file.Bytes + sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines) + + for sc.Scan() { + lineRange := sc.Range() + if !lineRange.Overlaps(snipRange) { + continue + } + + beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) + if highlightedRange.Empty() { + fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes()) + } else { + before := beforeRange.SliceBytes(src) + highlighted := highlightedRange.SliceBytes(src) + after := afterRange.SliceBytes(src) + fmt.Fprintf( + w.wr, "%4d: %s%s%s%s%s\n", + lineRange.Start.Line, + before, + highlightCode, highlighted, resetCode, + after, + ) + } + + } + + w.wr.Write([]byte{'\n'}) + } + } + + if diag.Detail != "" { + detail := diag.Detail + if w.width != 0 { + detail = wordwrap.WrapString(detail, w.width) + } + fmt.Fprintf(w.wr, "%s\n\n", detail) + } + + return nil +} + +func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error { + for _, diag := range diags { + err := w.WriteDiagnostic(diag) + if err != nil { + return err + } + } + return nil +} + +func contextString(file *File, offset int) string { + type contextStringer interface { + ContextString(offset int) string + } + + if cser, ok := file.Nav.(contextStringer); ok { + return cser.ContextString(offset) + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go b/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go new file mode 100644 index 0000000..c128334 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go @@ -0,0 +1,24 @@ +package hcl + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/doc.go b/vendor/github.com/hashicorp/hcl2/hcl/doc.go new file mode 100644 index 0000000..01318c9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/doc.go @@ -0,0 +1 @@ +package hcl diff --git a/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go b/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go new file mode 100644 index 0000000..915910a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go @@ -0,0 +1,25 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// An EvalContext provides the variables and functions that should be used +// to evaluate an expression. +type EvalContext struct { + Variables map[string]cty.Value + Functions map[string]function.Function + parent *EvalContext +} + +// NewChild returns a new EvalContext that is a child of the receiver. +func (ctx *EvalContext) NewChild() *EvalContext { + return &EvalContext{parent: ctx} +} + +// Parent returns the parent of the receiver, or nil if the receiver has +// no parent. +func (ctx *EvalContext) Parent() *EvalContext { + return ctx.parent +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go new file mode 100644 index 0000000..6963fba --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go @@ -0,0 +1,46 @@ +package hcl + +// ExprCall tests if the given expression is a function call and, +// if so, extracts the function name and the expressions that represent +// the arguments. If the given expression is not statically a function call, +// error diagnostics are returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprCall that takes no arguments and returns +// *StaticCall. This method should return nil if a static call cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprCall(expr Expression) (*StaticCall, Diagnostics) { + type exprCall interface { + ExprCall() *StaticCall + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprCall) + return supported + }) + + if exC, supported := physExpr.(exprCall); supported { + if call := exC.ExprCall(); call != nil { + return call, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static function call is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} + +// StaticCall represents a function call that was extracted statically from +// an expression using ExprCall. +type StaticCall struct { + Name string + NameRange Range + Arguments []Expression + ArgsRange Range +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go new file mode 100644 index 0000000..d05cca0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go @@ -0,0 +1,37 @@ +package hcl + +// ExprList tests if the given expression is a static list construct and, +// if so, extracts the expressions that represent the list elements. +// If the given expression is not a static list, error diagnostics are +// returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprList that takes no arguments and returns +// []Expression. This method should return nil if a static list cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprList(expr Expression) ([]Expression, Diagnostics) { + type exprList interface { + ExprList() []Expression + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprList) + return supported + }) + + if exL, supported := physExpr.(exprList); supported { + if list := exL.ExprList(); list != nil { + return list, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static list expression is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go new file mode 100644 index 0000000..96d1ce4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go @@ -0,0 +1,44 @@ +package hcl + +// ExprMap tests if the given expression is a static map construct and, +// if so, extracts the expressions that represent the map elements. +// If the given expression is not a static map, error diagnostics are +// returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprMap that takes no arguments and returns +// []KeyValuePair. This method should return nil if a static map cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprMap(expr Expression) ([]KeyValuePair, Diagnostics) { + type exprMap interface { + ExprMap() []KeyValuePair + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprMap) + return supported + }) + + if exM, supported := physExpr.(exprMap); supported { + if pairs := exM.ExprMap(); pairs != nil { + return pairs, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static map expression is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} + +// KeyValuePair represents a pair of expressions that serve as a single item +// within a map or object definition construct. +type KeyValuePair struct { + Key Expression + Value Expression +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go new file mode 100644 index 0000000..6d5d205 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go @@ -0,0 +1,68 @@ +package hcl + +type unwrapExpression interface { + UnwrapExpression() Expression +} + +// UnwrapExpression removes any "wrapper" expressions from the given expression, +// to recover the representation of the physical expression given in source +// code. +// +// Sometimes wrapping expressions are used to modify expression behavior, e.g. +// in extensions that need to make some local variables available to certain +// sub-trees of the configuration. This can make it difficult to reliably +// type-assert on the physical AST types used by the underlying syntax. +// +// Unwrapping an expression may modify its behavior by stripping away any +// additional constraints or capabilities being applied to the Value and +// Variables methods, so this function should generally only be used prior +// to operations that concern themselves with the static syntax of the input +// configuration, and not with the effective value of the expression. +// +// Wrapper expression types must support unwrapping by implementing a method +// called UnwrapExpression that takes no arguments and returns the embedded +// Expression. Implementations of this method should peel away only one level +// of wrapping, if multiple are present. This method may return nil to +// indicate _dynamically_ that no wrapped expression is available, for +// expression types that might only behave as wrappers in certain cases. +func UnwrapExpression(expr Expression) Expression { + for { + unwrap, wrapped := expr.(unwrapExpression) + if !wrapped { + return expr + } + innerExpr := unwrap.UnwrapExpression() + if innerExpr == nil { + return expr + } + expr = innerExpr + } +} + +// UnwrapExpressionUntil is similar to UnwrapExpression except it gives the +// caller an opportunity to test each level of unwrapping to see each a +// particular expression is accepted. +// +// This could be used, for example, to unwrap until a particular other +// interface is satisfied, regardless of wrap wrapping level it is satisfied +// at. +// +// The given callback function must return false to continue wrapping, or +// true to accept and return the proposed expression given. If the callback +// function rejects even the final, physical expression then the result of +// this function is nil. +func UnwrapExpressionUntil(expr Expression, until func(Expression) bool) Expression { + for { + if until(expr) { + return expr + } + unwrap, wrapped := expr.(unwrapExpression) + if !wrapped { + return nil + } + expr = unwrap.UnwrapExpression() + if expr == nil { + return nil + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go new file mode 100644 index 0000000..ccc1c0a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go @@ -0,0 +1,24 @@ +package hclsyntax + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go new file mode 100644 index 0000000..617bc29 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go @@ -0,0 +1,7 @@ +// Package hclsyntax contains the parser, AST, etc for HCL's native language, +// as opposed to the JSON variant. +// +// In normal use applications should rarely depend on this package directly, +// instead preferring the higher-level interface of the main hcl package and +// its companion package hclparse. +package hclsyntax diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go new file mode 100644 index 0000000..cfc7cd9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go @@ -0,0 +1,1275 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// Expression is the abstract type for nodes that behave as HCL expressions. +type Expression interface { + Node + + // The hcl.Expression methods are duplicated here, rather than simply + // embedded, because both Node and hcl.Expression have a Range method + // and so they conflict. + + Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + Variables() []hcl.Traversal + StartRange() hcl.Range +} + +// Assert that Expression implements hcl.Expression +var assertExprImplExpr hcl.Expression = Expression(nil) + +// LiteralValueExpr is an expression that just always returns a given value. +type LiteralValueExpr struct { + Val cty.Value + SrcRange hcl.Range +} + +func (e *LiteralValueExpr) walkChildNodes(w internalWalkFunc) { + // Literal values have no child nodes +} + +func (e *LiteralValueExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Val, nil +} + +func (e *LiteralValueExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *LiteralValueExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *LiteralValueExpr) AsTraversal() hcl.Traversal { + // This one's a little weird: the contract for AsTraversal is to interpret + // an expression as if it were traversal syntax, and traversal syntax + // doesn't have the special keywords "null", "true", and "false" so these + // are expected to be treated like variables in that case. + // Since our parser already turned them into LiteralValueExpr by the time + // we get here, we need to undo this and infer the name that would've + // originally led to our value. + // We don't do anything for any other values, since they don't overlap + // with traversal roots. + + if e.Val.IsNull() { + // In practice the parser only generates null values of the dynamic + // pseudo-type for literals, so we can safely assume that any null + // was orignally the keyword "null". + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "null", + SrcRange: e.SrcRange, + }, + } + } + + switch e.Val { + case cty.True: + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "true", + SrcRange: e.SrcRange, + }, + } + case cty.False: + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "false", + SrcRange: e.SrcRange, + }, + } + default: + // No traversal is possible for any other value. + return nil + } +} + +// ScopeTraversalExpr is an Expression that retrieves a value from the scope +// using a traversal. +type ScopeTraversalExpr struct { + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *ScopeTraversalExpr) walkChildNodes(w internalWalkFunc) { + // Scope traversals have no child nodes +} + +func (e *ScopeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Traversal.TraverseAbs(ctx) +} + +func (e *ScopeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ScopeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *ScopeTraversalExpr) AsTraversal() hcl.Traversal { + return e.Traversal +} + +// RelativeTraversalExpr is an Expression that retrieves a value from another +// value using a _relative_ traversal. +type RelativeTraversalExpr struct { + Source Expression + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) { + // Scope traversals have no child nodes +} + +func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + src, diags := e.Source.Value(ctx) + ret, travDiags := e.Traversal.TraverseRel(src) + diags = append(diags, travDiags...) + return ret, diags +} + +func (e *RelativeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *RelativeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *RelativeTraversalExpr) AsTraversal() hcl.Traversal { + // We can produce a traversal only if our source can. + st, diags := hcl.AbsTraversalForExpr(e.Source) + if diags.HasErrors() { + return nil + } + + ret := make(hcl.Traversal, len(st)+len(e.Traversal)) + copy(ret, st) + copy(ret[len(st):], e.Traversal) + return ret +} + +// FunctionCallExpr is an Expression that calls a function from the EvalContext +// and returns its result. +type FunctionCallExpr struct { + Name string + Args []Expression + + // If true, the final argument should be a tuple, list or set which will + // expand to be one argument per element. + ExpandFinal bool + + NameRange hcl.Range + OpenParenRange hcl.Range + CloseParenRange hcl.Range +} + +func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) { + for i, arg := range e.Args { + e.Args[i] = w(arg).(Expression) + } +} + +func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var f function.Function + exists := false + hasNonNilMap := false + thisCtx := ctx + for thisCtx != nil { + if thisCtx.Functions == nil { + thisCtx = thisCtx.Parent() + continue + } + hasNonNilMap = true + f, exists = thisCtx.Functions[e.Name] + if exists { + break + } + thisCtx = thisCtx.Parent() + } + + if !exists { + if !hasNonNilMap { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Function calls not allowed", + Detail: "Functions may not be called here.", + Subject: e.Range().Ptr(), + }, + } + } + + avail := make([]string, 0, len(ctx.Functions)) + for name := range ctx.Functions { + avail = append(avail, name) + } + suggestion := nameSuggestion(e.Name, avail) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Call to unknown function", + Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion), + Subject: &e.NameRange, + Context: e.Range().Ptr(), + }, + } + } + + params := f.Params() + varParam := f.VarParam() + + args := e.Args + if e.ExpandFinal { + if len(args) < 1 { + // should never happen if the parser is behaving + panic("ExpandFinal set on function call with no arguments") + } + expandExpr := args[len(args)-1] + expandVal, expandDiags := expandExpr.Value(ctx) + diags = append(diags, expandDiags...) + if expandDiags.HasErrors() { + return cty.DynamicVal, diags + } + + switch { + case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType(): + if expandVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must not be null.", + Context: expandExpr.Range().Ptr(), + Subject: e.Range().Ptr(), + }) + return cty.DynamicVal, diags + } + if !expandVal.IsKnown() { + return cty.DynamicVal, diags + } + + newArgs := make([]Expression, 0, (len(args)-1)+expandVal.LengthInt()) + newArgs = append(newArgs, args[:len(args)-1]...) + it := expandVal.ElementIterator() + for it.Next() { + _, val := it.Element() + newArgs = append(newArgs, &LiteralValueExpr{ + Val: val, + SrcRange: expandExpr.Range(), + }) + } + args = newArgs + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.", + Context: expandExpr.Range().Ptr(), + Subject: e.Range().Ptr(), + }) + return cty.DynamicVal, diags + } + } + + if len(args) < len(params) { + missing := params[len(args)] + qual := "" + if varParam != nil { + qual = " at least" + } + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Not enough function arguments", + Detail: fmt.Sprintf( + "Function %q expects%s %d argument(s). Missing value for %q.", + e.Name, qual, len(params), missing.Name, + ), + Subject: &e.CloseParenRange, + Context: e.Range().Ptr(), + }, + } + } + + if varParam == nil && len(args) > len(params) { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Too many function arguments", + Detail: fmt.Sprintf( + "Function %q expects only %d argument(s).", + e.Name, len(params), + ), + Subject: args[len(params)].StartRange().Ptr(), + Context: e.Range().Ptr(), + }, + } + } + + argVals := make([]cty.Value, len(args)) + + for i, argExpr := range args { + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + + val, argDiags := argExpr.Value(ctx) + if len(argDiags) > 0 { + diags = append(diags, argDiags...) + } + + // Try to convert our value to the parameter type + val, err := convert.Convert(val, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + }) + } + + argVals[i] = val + } + + if diags.HasErrors() { + // Don't try to execute the function if we already have errors with + // the arguments, because the result will probably be a confusing + // error message. + return cty.DynamicVal, diags + } + + resultVal, err := f.Call(argVals) + if err != nil { + switch terr := err.(type) { + case function.ArgError: + i := terr.Index + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + argExpr := e.Args[i] + + // TODO: we should also unpick a PathError here and show the + // path to the deep value where the error was detected. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + }) + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Error in function call", + Detail: fmt.Sprintf( + "Call to function %q failed: %s.", + e.Name, err, + ), + Subject: e.StartRange().Ptr(), + Context: e.Range().Ptr(), + }) + } + + return cty.DynamicVal, diags + } + + return resultVal, diags +} + +func (e *FunctionCallExpr) Range() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.CloseParenRange) +} + +func (e *FunctionCallExpr) StartRange() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.OpenParenRange) +} + +// Implementation for hcl.ExprCall. +func (e *FunctionCallExpr) ExprCall() *hcl.StaticCall { + ret := &hcl.StaticCall{ + Name: e.Name, + NameRange: e.NameRange, + Arguments: make([]hcl.Expression, len(e.Args)), + ArgsRange: hcl.RangeBetween(e.OpenParenRange, e.CloseParenRange), + } + // Need to convert our own Expression objects into hcl.Expression. + for i, arg := range e.Args { + ret.Arguments[i] = arg + } + return ret +} + +type ConditionalExpr struct { + Condition Expression + TrueResult Expression + FalseResult Expression + + SrcRange hcl.Range +} + +func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) { + e.Condition = w(e.Condition).(Expression) + e.TrueResult = w(e.TrueResult).(Expression) + e.FalseResult = w(e.FalseResult).(Expression) +} + +func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + trueResult, trueDiags := e.TrueResult.Value(ctx) + falseResult, falseDiags := e.FalseResult.Value(ctx) + var diags hcl.Diagnostics + + // Try to find a type that both results can be converted to. + resultType, convs := convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()}) + if resultType == cty.NilType { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + // FIXME: Need a helper function for showing natural-language type diffs, + // since this will generate some useless messages in some cases, like + // "These expressions are object and object respectively" if the + // object types don't exactly match. + "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.", + trueResult.Type(), falseResult.Type(), + ), + Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), + Context: &e.SrcRange, + }, + } + } + + condResult, condDiags := e.Condition.Value(ctx) + diags = append(diags, condDiags...) + if condResult.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null condition", + Detail: "The condition value is null. Conditions must either be true or false.", + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.UnknownVal(resultType), diags + } + if !condResult.IsKnown() { + return cty.UnknownVal(resultType), diags + } + condResult, err := convert.Convert(condResult, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect condition type", + Detail: fmt.Sprintf("The condition expression must be of type bool."), + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.UnknownVal(resultType), diags + } + + if condResult.True() { + diags = append(diags, trueDiags...) + if convs[0] != nil { + var err error + trueResult, err = convs[0](trueResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The true result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.TrueResult.Range().Ptr(), + Context: &e.SrcRange, + }) + trueResult = cty.UnknownVal(resultType) + } + } + return trueResult, diags + } else { + diags = append(diags, falseDiags...) + if convs[1] != nil { + var err error + falseResult, err = convs[1](falseResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The false result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.TrueResult.Range().Ptr(), + Context: &e.SrcRange, + }) + falseResult = cty.UnknownVal(resultType) + } + } + return falseResult, diags + } +} + +func (e *ConditionalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ConditionalExpr) StartRange() hcl.Range { + return e.Condition.StartRange() +} + +type IndexExpr struct { + Collection Expression + Key Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *IndexExpr) walkChildNodes(w internalWalkFunc) { + e.Collection = w(e.Collection).(Expression) + e.Key = w(e.Key).(Expression) +} + +func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + coll, collDiags := e.Collection.Value(ctx) + key, keyDiags := e.Key.Value(ctx) + diags = append(diags, collDiags...) + diags = append(diags, keyDiags...) + + return hcl.Index(coll, key, &e.SrcRange) +} + +func (e *IndexExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *IndexExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type TupleConsExpr struct { + Exprs []Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) { + for i, expr := range e.Exprs { + e.Exprs[i] = w(expr).(Expression) + } +} + +func (e *TupleConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals []cty.Value + var diags hcl.Diagnostics + + vals = make([]cty.Value, len(e.Exprs)) + for i, expr := range e.Exprs { + val, valDiags := expr.Value(ctx) + vals[i] = val + diags = append(diags, valDiags...) + } + + return cty.TupleVal(vals), diags +} + +func (e *TupleConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TupleConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +// Implementation for hcl.ExprList +func (e *TupleConsExpr) ExprList() []hcl.Expression { + ret := make([]hcl.Expression, len(e.Exprs)) + for i, expr := range e.Exprs { + ret[i] = expr + } + return ret +} + +type ObjectConsExpr struct { + Items []ObjectConsItem + + SrcRange hcl.Range + OpenRange hcl.Range +} + +type ObjectConsItem struct { + KeyExpr Expression + ValueExpr Expression +} + +func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) { + for i, item := range e.Items { + e.Items[i].KeyExpr = w(item.KeyExpr).(Expression) + e.Items[i].ValueExpr = w(item.ValueExpr).(Expression) + } +} + +func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals map[string]cty.Value + var diags hcl.Diagnostics + + // This will get set to true if we fail to produce any of our keys, + // either because they are actually unknown or if the evaluation produces + // errors. In all of these case we must return DynamicPseudoType because + // we're unable to know the full set of keys our object has, and thus + // we can't produce a complete value of the intended type. + // + // We still evaluate all of the item keys and values to make sure that we + // get as complete as possible a set of diagnostics. + known := true + + vals = make(map[string]cty.Value, len(e.Items)) + for _, item := range e.Items { + key, keyDiags := item.KeyExpr.Value(ctx) + diags = append(diags, keyDiags...) + + val, valDiags := item.ValueExpr.Value(ctx) + diags = append(diags, valDiags...) + + if keyDiags.HasErrors() { + known = false + continue + } + + if key.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null value as key", + Detail: "Can't use a null value as a key.", + Subject: item.ValueExpr.Range().Ptr(), + }) + known = false + continue + } + + var err error + key, err = convert.Convert(key, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect key type", + Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()), + Subject: item.ValueExpr.Range().Ptr(), + }) + known = false + continue + } + + if !key.IsKnown() { + known = false + continue + } + + keyStr := key.AsString() + + vals[keyStr] = val + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.ObjectVal(vals), diags +} + +func (e *ObjectConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ObjectConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +// Implementation for hcl.ExprMap +func (e *ObjectConsExpr) ExprMap() []hcl.KeyValuePair { + ret := make([]hcl.KeyValuePair, len(e.Items)) + for i, item := range e.Items { + ret[i] = hcl.KeyValuePair{ + Key: item.KeyExpr, + Value: item.ValueExpr, + } + } + return ret +} + +// ObjectConsKeyExpr is a special wrapper used only for ObjectConsExpr keys, +// which deals with the special case that a naked identifier in that position +// must be interpreted as a literal string rather than evaluated directly. +type ObjectConsKeyExpr struct { + Wrapped Expression +} + +func (e *ObjectConsKeyExpr) literalName() string { + // This is our logic for deciding whether to behave like a literal string. + // We lean on our AbsTraversalForExpr implementation here, which already + // deals with some awkward cases like the expression being the result + // of the keywords "null", "true" and "false" which we'd want to interpret + // as keys here too. + return hcl.ExprAsKeyword(e.Wrapped) +} + +func (e *ObjectConsKeyExpr) walkChildNodes(w internalWalkFunc) { + // We only treat our wrapped expression as a real expression if we're + // not going to interpret it as a literal. + if e.literalName() == "" { + e.Wrapped = w(e.Wrapped).(Expression) + } +} + +func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if ln := e.literalName(); ln != "" { + return cty.StringVal(ln), nil + } + return e.Wrapped.Value(ctx) +} + +func (e *ObjectConsKeyExpr) Range() hcl.Range { + return e.Wrapped.Range() +} + +func (e *ObjectConsKeyExpr) StartRange() hcl.Range { + return e.Wrapped.StartRange() +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *ObjectConsKeyExpr) AsTraversal() hcl.Traversal { + // We can produce a traversal only if our wrappee can. + st, diags := hcl.AbsTraversalForExpr(e.Wrapped) + if diags.HasErrors() { + return nil + } + + return st +} + +func (e *ObjectConsKeyExpr) UnwrapExpression() Expression { + return e.Wrapped +} + +// ForExpr represents iteration constructs: +// +// tuple = [for i, v in list: upper(v) if i > 2] +// object = {for k, v in map: k => upper(v)} +// object_of_tuples = {for v in list: v.key: v...} +type ForExpr struct { + KeyVar string // empty if ignoring the key + ValVar string + + CollExpr Expression + + KeyExpr Expression // nil when producing a tuple + ValExpr Expression + CondExpr Expression // null if no "if" clause is present + + Group bool // set if the ellipsis is used on the value in an object for + + SrcRange hcl.Range + OpenRange hcl.Range + CloseRange hcl.Range +} + +func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + collVal, collDiags := e.CollExpr.Value(ctx) + diags = append(diags, collDiags...) + + if collVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over null value", + Detail: "A null value cannot be used as the collection in a 'for' expression.", + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + if collVal.Type() == cty.DynamicPseudoType { + return cty.DynamicVal, diags + } + if !collVal.CanIterateElements() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over non-iterable value", + Detail: fmt.Sprintf( + "A value of type %s cannot be used as the collection in a 'for' expression.", + collVal.Type().FriendlyName(), + ), + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + if !collVal.IsKnown() { + return cty.DynamicVal, diags + } + + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + + // Before we start we'll do an early check to see if any CondExpr we've + // been given is of the wrong type. This isn't 100% reliable (it may + // be DynamicVal until real values are given) but it should catch some + // straightforward cases and prevent a barrage of repeated errors. + if e.CondExpr != nil { + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = cty.DynamicVal + } + childCtx.Variables[e.ValVar] = cty.DynamicVal + + result, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if result.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + _, err := convert.Convert(result, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + if condDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + + if e.KeyExpr != nil { + // Producing an object + var vals map[string]cty.Value + var groupVals map[string][]cty.Value + if e.Group { + groupVals = map[string][]cty.Value{} + } else { + vals = map[string]cty.Value{} + } + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + if !include.IsKnown() { + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + keyRaw, keyDiags := e.KeyExpr.Value(childCtx) + diags = append(diags, keyDiags...) + if keyRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: "Key expression in 'for' expression must not produce a null value.", + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + if !keyRaw.IsKnown() { + known = false + continue + } + + key, err := convert.Convert(keyRaw, cty.String) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + + if e.Group { + k := key.AsString() + groupVals[k] = append(groupVals[k], val) + } else { + k := key.AsString() + if _, exists := vals[k]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate object key", + Detail: fmt.Sprintf( + "Two different items produced the key %q in this for expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.", + k, + ), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } else { + vals[key.AsString()] = val + } + } + } + + if !known { + return cty.DynamicVal, diags + } + + if e.Group { + vals = map[string]cty.Value{} + for k, gvs := range groupVals { + vals[k] = cty.TupleVal(gvs) + } + } + + return cty.ObjectVal(vals), diags + + } else { + // Producing a tuple + vals := []cty.Value{} + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + if !includeRaw.IsKnown() { + // We will eventually return DynamicVal, but we'll continue + // iterating in case there are other diagnostics to gather + // for later elements. + known = false + continue + } + + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + vals = append(vals, val) + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.TupleVal(vals), diags + } +} + +func (e *ForExpr) walkChildNodes(w internalWalkFunc) { + e.CollExpr = w(e.CollExpr).(Expression) + + scopeNames := map[string]struct{}{} + if e.KeyVar != "" { + scopeNames[e.KeyVar] = struct{}{} + } + if e.ValVar != "" { + scopeNames[e.ValVar] = struct{}{} + } + + if e.KeyExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: &e.KeyExpr, + }) + } + w(ChildScope{ + LocalNames: scopeNames, + Expr: &e.ValExpr, + }) + if e.CondExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: &e.CondExpr, + }) + } +} + +func (e *ForExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ForExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type SplatExpr struct { + Source Expression + Each Expression + Item *AnonSymbolExpr + + SrcRange hcl.Range + MarkerRange hcl.Range +} + +func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + sourceVal, diags := e.Source.Value(ctx) + if diags.HasErrors() { + // We'll evaluate our "Each" expression here just to see if it + // produces any more diagnostics we can report. Since we're not + // assigning a value to our AnonSymbolExpr here it will return + // DynamicVal, which should short-circuit any use of it. + _, itemDiags := e.Item.Value(ctx) + diags = append(diags, itemDiags...) + return cty.DynamicVal, diags + } + + if sourceVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Splat of null value", + Detail: "Splat expressions (with the * symbol) cannot be applied to null values.", + Subject: e.Source.Range().Ptr(), + Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(), + }) + return cty.DynamicVal, diags + } + if !sourceVal.IsKnown() { + return cty.DynamicVal, diags + } + + // A "special power" of splat expressions is that they can be applied + // both to tuples/lists and to other values, and in the latter case + // the value will be treated as an implicit single-value list. We'll + // deal with that here first. + if !(sourceVal.Type().IsTupleType() || sourceVal.Type().IsListType()) { + sourceVal = cty.ListVal([]cty.Value{sourceVal}) + } + + vals := make([]cty.Value, 0, sourceVal.LengthInt()) + it := sourceVal.ElementIterator() + if ctx == nil { + // we need a context to use our AnonSymbolExpr, so we'll just + // make an empty one here to use as a placeholder. + ctx = ctx.NewChild() + } + isKnown := true + for it.Next() { + _, sourceItem := it.Element() + e.Item.setValue(ctx, sourceItem) + newItem, itemDiags := e.Each.Value(ctx) + diags = append(diags, itemDiags...) + if itemDiags.HasErrors() { + isKnown = false + } + vals = append(vals, newItem) + } + e.Item.clearValue(ctx) // clean up our temporary value + + if !isKnown { + return cty.DynamicVal, diags + } + + return cty.TupleVal(vals), diags +} + +func (e *SplatExpr) walkChildNodes(w internalWalkFunc) { + e.Source = w(e.Source).(Expression) + e.Each = w(e.Each).(Expression) +} + +func (e *SplatExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *SplatExpr) StartRange() hcl.Range { + return e.MarkerRange +} + +// AnonSymbolExpr is used as a placeholder for a value in an expression that +// can be applied dynamically to any value at runtime. +// +// This is a rather odd, synthetic expression. It is used as part of the +// representation of splat expressions as a placeholder for the current item +// being visited in the splat evaluation. +// +// AnonSymbolExpr cannot be evaluated in isolation. If its Value is called +// directly then cty.DynamicVal will be returned. Instead, it is evaluated +// in terms of another node (i.e. a splat expression) which temporarily +// assigns it a value. +type AnonSymbolExpr struct { + SrcRange hcl.Range + values map[*hcl.EvalContext]cty.Value +} + +func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if ctx == nil { + return cty.DynamicVal, nil + } + val, exists := e.values[ctx] + if !exists { + return cty.DynamicVal, nil + } + return val, nil +} + +// setValue sets a temporary local value for the expression when evaluated +// in the given context, which must be non-nil. +func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) { + if e.values == nil { + e.values = make(map[*hcl.EvalContext]cty.Value) + } + if ctx == nil { + panic("can't setValue for a nil EvalContext") + } + e.values[ctx] = val +} + +func (e *AnonSymbolExpr) clearValue(ctx *hcl.EvalContext) { + if e.values == nil { + return + } + if ctx == nil { + panic("can't clearValue for a nil EvalContext") + } + delete(e.values, ctx) +} + +func (e *AnonSymbolExpr) walkChildNodes(w internalWalkFunc) { + // AnonSymbolExpr is a leaf node in the tree +} + +func (e *AnonSymbolExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *AnonSymbolExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go new file mode 100644 index 0000000..9a5da04 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go @@ -0,0 +1,258 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +type Operation struct { + Impl function.Function + Type cty.Type +} + +var ( + OpLogicalOr = &Operation{ + Impl: stdlib.OrFunc, + Type: cty.Bool, + } + OpLogicalAnd = &Operation{ + Impl: stdlib.AndFunc, + Type: cty.Bool, + } + OpLogicalNot = &Operation{ + Impl: stdlib.NotFunc, + Type: cty.Bool, + } + + OpEqual = &Operation{ + Impl: stdlib.EqualFunc, + Type: cty.Bool, + } + OpNotEqual = &Operation{ + Impl: stdlib.NotEqualFunc, + Type: cty.Bool, + } + + OpGreaterThan = &Operation{ + Impl: stdlib.GreaterThanFunc, + Type: cty.Bool, + } + OpGreaterThanOrEqual = &Operation{ + Impl: stdlib.GreaterThanOrEqualToFunc, + Type: cty.Bool, + } + OpLessThan = &Operation{ + Impl: stdlib.LessThanFunc, + Type: cty.Bool, + } + OpLessThanOrEqual = &Operation{ + Impl: stdlib.LessThanOrEqualToFunc, + Type: cty.Bool, + } + + OpAdd = &Operation{ + Impl: stdlib.AddFunc, + Type: cty.Number, + } + OpSubtract = &Operation{ + Impl: stdlib.SubtractFunc, + Type: cty.Number, + } + OpMultiply = &Operation{ + Impl: stdlib.MultiplyFunc, + Type: cty.Number, + } + OpDivide = &Operation{ + Impl: stdlib.DivideFunc, + Type: cty.Number, + } + OpModulo = &Operation{ + Impl: stdlib.ModuloFunc, + Type: cty.Number, + } + OpNegate = &Operation{ + Impl: stdlib.NegateFunc, + Type: cty.Number, + } +) + +var binaryOps []map[TokenType]*Operation + +func init() { + // This operation table maps from the operator's token type + // to the AST operation type. All expressions produced from + // binary operators are BinaryOp nodes. + // + // Binary operator groups are listed in order of precedence, with + // the *lowest* precedence first. Operators within the same group + // have left-to-right associativity. + binaryOps = []map[TokenType]*Operation{ + { + TokenOr: OpLogicalOr, + }, + { + TokenAnd: OpLogicalAnd, + }, + { + TokenEqualOp: OpEqual, + TokenNotEqual: OpNotEqual, + }, + { + TokenGreaterThan: OpGreaterThan, + TokenGreaterThanEq: OpGreaterThanOrEqual, + TokenLessThan: OpLessThan, + TokenLessThanEq: OpLessThanOrEqual, + }, + { + TokenPlus: OpAdd, + TokenMinus: OpSubtract, + }, + { + TokenStar: OpMultiply, + TokenSlash: OpDivide, + TokenPercent: OpModulo, + }, + } +} + +type BinaryOpExpr struct { + LHS Expression + Op *Operation + RHS Expression + + SrcRange hcl.Range +} + +func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) { + e.LHS = w(e.LHS).(Expression) + e.RHS = w(e.RHS).(Expression) +} + +func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly two arguments + params := impl.Params() + lhsParam := params[0] + rhsParam := params[1] + + var diags hcl.Diagnostics + + givenLHSVal, lhsDiags := e.LHS.Value(ctx) + givenRHSVal, rhsDiags := e.RHS.Value(ctx) + diags = append(diags, lhsDiags...) + diags = append(diags, rhsDiags...) + + lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err), + Subject: e.LHS.Range().Ptr(), + Context: &e.SrcRange, + }) + } + rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err), + Subject: e.RHS.Range().Ptr(), + Context: &e.SrcRange, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{lhsVal, rhsVal} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *BinaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *BinaryOpExpr) StartRange() hcl.Range { + return e.LHS.StartRange() +} + +type UnaryOpExpr struct { + Op *Operation + Val Expression + + SrcRange hcl.Range + SymbolRange hcl.Range +} + +func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) { + e.Val = w(e.Val).(Expression) +} + +func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly one argument + params := impl.Params() + param := params[0] + + givenVal, diags := e.Val.Value(ctx) + + val, err := convert.Convert(givenVal, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err), + Subject: e.Val.Range().Ptr(), + Context: &e.SrcRange, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{val} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *UnaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *UnaryOpExpr) StartRange() hcl.Range { + return e.SymbolRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go new file mode 100644 index 0000000..a1c4727 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go @@ -0,0 +1,192 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type TemplateExpr struct { + Parts []Expression + + SrcRange hcl.Range +} + +func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) { + for i, part := range e.Parts { + e.Parts[i] = w(part).(Expression) + } +} + +func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + buf := &bytes.Buffer{} + var diags hcl.Diagnostics + isKnown := true + + for _, part := range e.Parts { + partVal, partDiags := part.Value(ctx) + diags = append(diags, partDiags...) + + if partVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "The expression result is null. Cannot include a null value in a string template.", + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + }) + continue + } + + if !partVal.IsKnown() { + // If any part is unknown then the result as a whole must be + // unknown too. We'll keep on processing the rest of the parts + // anyway, because we want to still emit any diagnostics resulting + // from evaluating those. + isKnown = false + continue + } + + strVal, err := convert.Convert(partVal, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include the given value in a string template: %s.", + err.Error(), + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + }) + continue + } + + buf.WriteString(strVal.AsString()) + } + + if !isKnown { + return cty.UnknownVal(cty.String), diags + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateExpr) StartRange() hcl.Range { + return e.Parts[0].StartRange() +} + +// TemplateJoinExpr is used to convert tuples of strings produced by template +// constructs (i.e. for loops) into flat strings, by converting the values +// tos strings and joining them. This AST node is not used directly; it's +// produced as part of the AST of a "for" loop in a template. +type TemplateJoinExpr struct { + Tuple Expression +} + +func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) { + e.Tuple = w(e.Tuple).(Expression) +} + +func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + tuple, diags := e.Tuple.Value(ctx) + + if tuple.IsNull() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got null tuple") + } + if tuple.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + if !tuple.Type().IsTupleType() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got non-tuple tuple") + } + if !tuple.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf := &bytes.Buffer{} + it := tuple.ElementIterator() + for it.Next() { + _, val := it.Element() + + if val.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "An iteration result is null. Cannot include a null value in a string template.", + ), + Subject: e.Range().Ptr(), + }) + continue + } + if val.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + strVal, err := convert.Convert(val, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include one of the interpolation results into the string template: %s.", + err.Error(), + ), + Subject: e.Range().Ptr(), + }) + continue + } + if !val.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf.WriteString(strVal.AsString()) + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateJoinExpr) Range() hcl.Range { + return e.Tuple.Range() +} + +func (e *TemplateJoinExpr) StartRange() hcl.Range { + return e.Tuple.StartRange() +} + +// TemplateWrapExpr is used instead of a TemplateExpr when a template +// consists _only_ of a single interpolation sequence. In that case, the +// template's result is the single interpolation's result, verbatim with +// no type conversions. +type TemplateWrapExpr struct { + Wrapped Expression + + SrcRange hcl.Range +} + +func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) { + e.Wrapped = w(e.Wrapped).(Expression) +} + +func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Wrapped.Value(ctx) +} + +func (e *TemplateWrapExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateWrapExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go new file mode 100644 index 0000000..9177092 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go @@ -0,0 +1,76 @@ +package hclsyntax + +// Generated by expression_vars_get.go. DO NOT EDIT. +// Run 'go generate' on this package to update the set of functions here. + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +func (e *AnonSymbolExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *BinaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ConditionalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ForExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *FunctionCallExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *IndexExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *LiteralValueExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ObjectConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ObjectConsKeyExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *RelativeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ScopeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *SplatExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateJoinExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateWrapExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TupleConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *UnaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go new file mode 100644 index 0000000..88f1980 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go @@ -0,0 +1,99 @@ +// This is a 'go generate'-oriented program for producing the "Variables" +// method on every Expression implementation found within this package. +// All expressions share the same implementation for this method, which +// just wraps the package-level function "Variables" and uses an AST walk +// to do its work. + +// +build ignore + +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "sort" +) + +func main() { + fs := token.NewFileSet() + pkgs, err := parser.ParseDir(fs, ".", nil, 0) + if err != nil { + fmt.Fprintf(os.Stderr, "error while parsing: %s\n", err) + os.Exit(1) + } + pkg := pkgs["hclsyntax"] + + // Walk all the files and collect the receivers of any "Value" methods + // that look like they are trying to implement Expression. + var recvs []string + for _, f := range pkg.Files { + for _, decl := range f.Decls { + fd, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + if fd.Name.Name != "Value" { + continue + } + results := fd.Type.Results.List + if len(results) != 2 { + continue + } + valResult := fd.Type.Results.List[0].Type.(*ast.SelectorExpr).X.(*ast.Ident) + diagsResult := fd.Type.Results.List[1].Type.(*ast.SelectorExpr).X.(*ast.Ident) + + if valResult.Name != "cty" && diagsResult.Name != "hcl" { + continue + } + + // If we have a method called Value and it returns something in + // "cty" followed by something in "hcl" then that's specific enough + // for now, even though this is not 100% exact as a correct + // implementation of Value. + + recvTy := fd.Recv.List[0].Type + + switch rtt := recvTy.(type) { + case *ast.StarExpr: + name := rtt.X.(*ast.Ident).Name + recvs = append(recvs, fmt.Sprintf("*%s", name)) + default: + fmt.Fprintf(os.Stderr, "don't know what to do with a %T receiver\n", recvTy) + } + + } + } + + sort.Strings(recvs) + + of, err := os.OpenFile("expression_vars.go", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to open output file: %s\n", err) + os.Exit(1) + } + + fmt.Fprint(of, outputPreamble) + for _, recv := range recvs { + fmt.Fprintf(of, outputMethodFmt, recv) + } + fmt.Fprint(of, "\n") + +} + +const outputPreamble = `package hclsyntax + +// Generated by expression_vars_get.go. DO NOT EDIT. +// Run 'go generate' on this package to update the set of functions here. + +import ( + "github.com/hashicorp/hcl2/hcl" +)` + +const outputMethodFmt = ` + +func (e %s) Variables() []hcl.Traversal { + return Variables(e) +}` diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go new file mode 100644 index 0000000..490c025 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go @@ -0,0 +1,20 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// File is the top-level object resulting from parsing a configuration file. +type File struct { + Body *Body + Bytes []byte +} + +func (f *File) AsHCLFile() *hcl.File { + return &hcl.File{ + Body: f.Body, + Bytes: f.Bytes, + + // TODO: The Nav object, once we have an implementation of it + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go new file mode 100644 index 0000000..841656a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go @@ -0,0 +1,9 @@ +package hclsyntax + +//go:generate go run expression_vars_gen.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt -m UnicodeDerived -p ID_Start,ID_Continue -o unicode_derived.rl +//go:generate ragel -Z scan_tokens.rl +//go:generate gofmt -w scan_tokens.go +//go:generate ragel -Z scan_string_lit.rl +//go:generate gofmt -w scan_string_lit.go +//go:generate stringer -type TokenType -output token_type_string.go diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go new file mode 100644 index 0000000..eef8b96 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go @@ -0,0 +1,21 @@ +package hclsyntax + +import ( + "bytes" +) + +type Keyword []byte + +var forKeyword = Keyword([]byte{'f', 'o', 'r'}) +var inKeyword = Keyword([]byte{'i', 'n'}) +var ifKeyword = Keyword([]byte{'i', 'f'}) +var elseKeyword = Keyword([]byte{'e', 'l', 's', 'e'}) +var endifKeyword = Keyword([]byte{'e', 'n', 'd', 'i', 'f'}) +var endforKeyword = Keyword([]byte{'e', 'n', 'd', 'f', 'o', 'r'}) + +func (kw Keyword) TokenMatches(token Token) bool { + if token.Type != TokenIdent { + return false + } + return bytes.Equal([]byte(kw), token.Bytes) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go new file mode 100644 index 0000000..4d41b6b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go @@ -0,0 +1,41 @@ +package hclsyntax + +import ( + "bytes" + "fmt" +) + +type navigation struct { + root *Body +} + +// Implementation of hcled.ContextString +func (n navigation) ContextString(offset int) string { + // We will walk our top-level blocks until we find one that contains + // the given offset, and then construct a representation of the header + // of the block. + + var block *Block + for _, candidate := range n.root.Blocks { + if candidate.Range().ContainsOffset(offset) { + block = candidate + break + } + } + + if block == nil { + return "" + } + + if len(block.Labels) == 0 { + // Easy case! + return block.Type + } + + buf := &bytes.Buffer{} + buf.WriteString(block.Type) + for _, label := range block.Labels { + fmt.Fprintf(buf, " %q", label) + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go new file mode 100644 index 0000000..fd426d4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go @@ -0,0 +1,22 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Node is the abstract type that every AST node implements. +// +// This is a closed interface, so it cannot be implemented from outside of +// this package. +type Node interface { + // This is the mechanism by which the public-facing walk functions + // are implemented. Implementations should call the given function + // for each child node and then replace that node with its return value. + // The return value might just be the same node, for non-transforming + // walks. + walkChildNodes(w internalWalkFunc) + + Range() hcl.Range +} + +type internalWalkFunc func(Node) Node diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go new file mode 100644 index 0000000..002858f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go @@ -0,0 +1,1836 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + "strconv" + "unicode/utf8" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type parser struct { + *peeker + + // set to true if any recovery is attempted. The parser can use this + // to attempt to reduce error noise by suppressing "bad token" errors + // in recovery mode, assuming that the recovery heuristics have failed + // in this case and left the peeker in a wrong place. + recovery bool +} + +func (p *parser) ParseBody(end TokenType) (*Body, hcl.Diagnostics) { + attrs := Attributes{} + blocks := Blocks{} + var diags hcl.Diagnostics + + startRange := p.PrevRange() + var endRange hcl.Range + +Token: + for { + next := p.Peek() + if next.Type == end { + endRange = p.NextRange() + p.Read() + break Token + } + + switch next.Type { + case TokenNewline: + p.Read() + continue + case TokenIdent: + item, itemDiags := p.ParseBodyItem() + diags = append(diags, itemDiags...) + switch titem := item.(type) { + case *Block: + blocks = append(blocks, titem) + case *Attribute: + if existing, exists := attrs[titem.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute redefined", + Detail: fmt.Sprintf( + "The attribute %q was already defined at %s. Each attribute may be defined only once.", + titem.Name, existing.NameRange.String(), + ), + Subject: &titem.NameRange, + }) + } else { + attrs[titem.Name] = titem + } + default: + // This should never happen for valid input, but may if a + // syntax error was detected in ParseBodyItem that prevented + // it from even producing a partially-broken item. In that + // case, it would've left at least one error in the diagnostics + // slice we already dealt with above. + // + // We'll assume ParseBodyItem attempted recovery to leave + // us in a reasonable position to try parsing the next item. + continue + } + default: + bad := p.Read() + if !p.recovery { + if bad.Type == TokenOQuote { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "Attribute names must not be quoted.", + Subject: &bad.Range, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute or block definition required", + Detail: "An attribute or block definition is required here.", + Subject: &bad.Range, + }) + } + } + endRange = p.PrevRange() // arbitrary, but somewhere inside the body means better diagnostics + + p.recover(end) // attempt to recover to the token after the end of this body + break Token + } + } + + return &Body{ + Attributes: attrs, + Blocks: blocks, + + SrcRange: hcl.RangeBetween(startRange, endRange), + EndRange: hcl.Range{ + Filename: endRange.Filename, + Start: endRange.End, + End: endRange.End, + }, + }, diags +} + +func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { + ident := p.Read() + if ident.Type != TokenIdent { + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Attribute or block definition required", + Detail: "An attribute or block definition is required here.", + Subject: &ident.Range, + }, + } + } + + next := p.Peek() + + switch next.Type { + case TokenEqual: + return p.finishParsingBodyAttribute(ident) + case TokenOQuote, TokenOBrace, TokenIdent: + return p.finishParsingBodyBlock(ident) + default: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Attribute or block definition required", + Detail: "An attribute or block definition is required here. To define an attribute, use the equals sign \"=\" to introduce the attribute value.", + Subject: &ident.Range, + }, + } + } + + return nil, nil +} + +func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics) { + eqTok := p.Read() // eat equals token + if eqTok.Type != TokenEqual { + // should never happen if caller behaves + panic("finishParsingBodyAttribute called with next not equals") + } + + var endRange hcl.Range + + expr, diags := p.ParseExpression() + if p.recovery && diags.HasErrors() { + // recovery within expressions tends to be tricky, so we've probably + // landed somewhere weird. We'll try to reset to the start of a body + // item so parsing can continue. + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + end := p.Peek() + if end.Type != TokenNewline && end.Type != TokenEOF { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after attribute definition", + Detail: "An attribute definition must end with a newline.", + Subject: &end.Range, + Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(), + }) + } + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + endRange = p.PrevRange() + p.Read() // eat newline + } + } + + return &Attribute{ + Name: string(ident.Bytes), + Expr: expr, + + SrcRange: hcl.RangeBetween(ident.Range, endRange), + NameRange: ident.Range, + EqualsRange: eqTok.Range, + }, diags +} + +func (p *parser) finishParsingBodyBlock(ident Token) (Node, hcl.Diagnostics) { + var blockType = string(ident.Bytes) + var diags hcl.Diagnostics + var labels []string + var labelRanges []hcl.Range + + var oBrace Token + +Token: + for { + tok := p.Peek() + + switch tok.Type { + + case TokenOBrace: + oBrace = p.Read() + break Token + + case TokenOQuote: + label, labelRange, labelDiags := p.parseQuotedStringLiteral() + diags = append(diags, labelDiags...) + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) + if labelDiags.HasErrors() { + p.recoverAfterBodyItem() + return &Block{ + Type: blockType, + Labels: labels, + Body: nil, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: ident.Range, // placeholder + CloseBraceRange: ident.Range, // placeholder + }, diags + } + + case TokenIdent: + tok = p.Read() // eat token + label, labelRange := string(tok.Bytes), tok.Range + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) + + default: + switch tok.Type { + case TokenEqual: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "The equals sign \"=\" indicates an attribute definition, and must not be used when defining a block.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + case TokenNewline: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "A block definition must have block content delimited by \"{\" and \"}\", starting on the same line as the block header.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "Either a quoted string block label or an opening brace (\"{\") is expected here.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + } + } + + p.recoverAfterBodyItem() + + return &Block{ + Type: blockType, + Labels: labels, + Body: nil, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: ident.Range, // placeholder + CloseBraceRange: ident.Range, // placeholder + }, diags + } + } + + // Once we fall out here, the peeker is pointed just after our opening + // brace, so we can begin our nested body parsing. + body, bodyDiags := p.ParseBody(TokenCBrace) + diags = append(diags, bodyDiags...) + cBraceRange := p.PrevRange() + + eol := p.Peek() + if eol.Type == TokenNewline || eol.Type == TokenEOF { + p.Read() // eat newline + } else { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after block definition", + Detail: "A block definition must end with a newline.", + Subject: &eol.Range, + Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(), + }) + } + p.recoverAfterBodyItem() + } + + return &Block{ + Type: blockType, + Labels: labels, + Body: body, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: oBrace.Range, + CloseBraceRange: cBraceRange, + }, diags +} + +func (p *parser) ParseExpression() (Expression, hcl.Diagnostics) { + return p.parseTernaryConditional() +} + +func (p *parser) parseTernaryConditional() (Expression, hcl.Diagnostics) { + // The ternary conditional operator (.. ? .. : ..) behaves somewhat + // like a binary operator except that the "symbol" is itself + // an expression enclosed in two punctuation characters. + // The middle expression is parsed as if the ? and : symbols + // were parentheses. The "rhs" (the "false expression") is then + // treated right-associatively so it behaves similarly to the + // middle in terms of precedence. + + startRange := p.NextRange() + var condExpr, trueExpr, falseExpr Expression + var diags hcl.Diagnostics + + condExpr, condDiags := p.parseBinaryOps(binaryOps) + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + return condExpr, diags + } + + questionMark := p.Peek() + if questionMark.Type != TokenQuestion { + return condExpr, diags + } + + p.Read() // eat question mark + + trueExpr, trueDiags := p.ParseExpression() + diags = append(diags, trueDiags...) + if p.recovery && trueDiags.HasErrors() { + return condExpr, diags + } + + colon := p.Peek() + if colon.Type != TokenColon { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing false expression in conditional", + Detail: "The conditional operator (...?...:...) requires a false expression, delimited by a colon.", + Subject: &colon.Range, + Context: hcl.RangeBetween(startRange, colon.Range).Ptr(), + }) + return condExpr, diags + } + + p.Read() // eat colon + + falseExpr, falseDiags := p.ParseExpression() + diags = append(diags, falseDiags...) + if p.recovery && falseDiags.HasErrors() { + return condExpr, diags + } + + return &ConditionalExpr{ + Condition: condExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(startRange, falseExpr.Range()), + }, diags +} + +// parseBinaryOps calls itself recursively to work through all of the +// operator precedence groups, and then eventually calls parseExpressionTerm +// for each operand. +func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl.Diagnostics) { + if len(ops) == 0 { + // We've run out of operators, so now we'll just try to parse a term. + return p.parseExpressionWithTraversals() + } + + thisLevel := ops[0] + remaining := ops[1:] + + var lhs, rhs Expression + var operation *Operation + var diags hcl.Diagnostics + + // Parse a term that might be the first operand of a binary + // operation or it might just be a standalone term. + // We won't know until we've parsed it and can look ahead + // to see if there's an operator token for this level. + lhs, lhsDiags := p.parseBinaryOps(remaining) + diags = append(diags, lhsDiags...) + if p.recovery && lhsDiags.HasErrors() { + return lhs, diags + } + + // We'll keep eating up operators until we run out, so that operators + // with the same precedence will combine in a left-associative manner: + // a+b+c => (a+b)+c, not a+(b+c) + // + // Should we later want to have right-associative operators, a way + // to achieve that would be to call back up to ParseExpression here + // instead of iteratively parsing only the remaining operators. + for { + next := p.Peek() + var newOp *Operation + var ok bool + if newOp, ok = thisLevel[next.Type]; !ok { + break + } + + // Are we extending an expression started on the previous iteration? + if operation != nil { + lhs = &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + } + } + + operation = newOp + p.Read() // eat operator token + var rhsDiags hcl.Diagnostics + rhs, rhsDiags = p.parseBinaryOps(remaining) + diags = append(diags, rhsDiags...) + if p.recovery && rhsDiags.HasErrors() { + return lhs, diags + } + } + + if operation == nil { + return lhs, diags + } + + return &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + }, diags +} + +func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) { + term, diags := p.parseExpressionTerm() + ret := term + +Traversal: + for { + next := p.Peek() + + switch next.Type { + case TokenDot: + // Attribute access or splat + dot := p.Read() + attrTok := p.Peek() + + switch attrTok.Type { + case TokenIdent: + attrTok = p.Read() // eat token + name := string(attrTok.Bytes) + rng := hcl.RangeBetween(dot.Range, attrTok.Range) + step := hcl.TraverseAttr{ + Name: name, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + + case TokenNumberLit: + // This is a weird form we inherited from HIL, allowing numbers + // to be used as attributes as a weird way of writing [n]. + // This was never actually a first-class thing in HIL, but + // HIL tolerated sequences like .0. in its variable names and + // calling applications like Terraform exploited that to + // introduce indexing syntax where none existed. + numTok := p.Read() // eat token + attrTok = numTok + + // This syntax is ambiguous if multiple indices are used in + // succession, like foo.0.1.baz: that actually parses as + // a fractional number 0.1. Since we're only supporting this + // syntax for compatibility with legacy Terraform + // configurations, and Terraform does not tend to have lists + // of lists, we'll choose to reject that here with a helpful + // error message, rather than failing later because the index + // isn't a whole number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + break + } + + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: numVal, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + + case TokenStar: + // "Attribute-only" splat expression. + // (This is a kinda weird construct inherited from HIL, which + // behaves a bit like a [*] splat except that it is only able + // to do attribute traversals into each of its elements, + // whereas foo[*] can support _any_ traversal. + marker := p.Read() // eat star + trav := make(hcl.Traversal, 0, 1) + var firstRange, lastRange hcl.Range + firstRange = p.NextRange() + for p.Peek().Type == TokenDot { + dot := p.Read() + + if p.Peek().Type == TokenNumberLit { + // Continuing the "weird stuff inherited from HIL" + // theme, we also allow numbers as attribute names + // inside splats and interpret them as indexing + // into a list, for expressions like: + // foo.bar.*.baz.0.foo + numTok := p.Read() + + // Weird special case if the user writes something + // like foo.bar.*.baz.0.0.foo, where 0.0 parses + // as a number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax with a full splat expression [*] instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + trav = append(trav, hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + trav = append(trav, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + + if p.Peek().Type != TokenIdent { + if !p.recovery { + if p.Peek().Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Nested splat expression not allowed", + Detail: "A splat expression (*) cannot be used inside another attribute-only splat expression.", + Subject: p.Peek().Range.Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + } + } + p.setRecovery() + continue Traversal + } + + attrTok := p.Read() + trav = append(trav, hcl.TraverseAttr{ + Name: string(attrTok.Bytes), + SrcRange: hcl.RangeBetween(dot.Range, attrTok.Range), + }) + lastRange = attrTok.Range + } + + itemExpr := &AnonSymbolExpr{ + SrcRange: hcl.RangeBetween(dot.Range, marker.Range), + } + var travExpr Expression + if len(trav) == 0 { + travExpr = itemExpr + } else { + travExpr = &RelativeTraversalExpr{ + Source: itemExpr, + Traversal: trav, + SrcRange: hcl.RangeBetween(firstRange, lastRange), + } + } + + ret = &SplatExpr{ + Source: ret, + Each: travExpr, + Item: itemExpr, + + SrcRange: hcl.RangeBetween(dot.Range, lastRange), + MarkerRange: hcl.RangeBetween(dot.Range, marker.Range), + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + // This leaves the peeker in a bad place, so following items + // will probably be misparsed until we hit something that + // allows us to re-sync. + // + // We will probably need to do something better here eventually + // in order to support autocomplete triggered by typing a + // period. + p.setRecovery() + } + + case TokenOBrack: + // Indexing of a collection. + // This may or may not be a hcl.Traverser, depending on whether + // the key value is something constant. + + open := p.Read() + // TODO: If we have a TokenStar inside our brackets, parse as + // a Splat expression: foo[*].baz[0]. + var close Token + p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets + keyExpr, keyDiags := p.ParseExpression() + diags = append(diags, keyDiags...) + if p.recovery && keyDiags.HasErrors() { + close = p.recover(TokenCBrack) + } else { + close = p.Read() + if close.Type != TokenCBrack && !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing close bracket on index", + Detail: "The index operator must end with a closing bracket (\"]\").", + Subject: &close.Range, + }) + close = p.recover(TokenCBrack) + } + } + p.PopIncludeNewlines() + + if lit, isLit := keyExpr.(*LiteralValueExpr); isLit { + litKey, _ := lit.Value(nil) + rng := hcl.RangeBetween(open.Range, close.Range) + step := hcl.TraverseIndex{ + Key: litKey, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + } else { + rng := hcl.RangeBetween(open.Range, close.Range) + ret = &IndexExpr{ + Collection: ret, + Key: keyExpr, + + SrcRange: rng, + OpenRange: open.Range, + } + } + + default: + break Traversal + } + } + + return ret, diags +} + +// makeRelativeTraversal takes an expression and a traverser and returns +// a traversal expression that combines the two. If the given expression +// is already a traversal, it is extended in place (mutating it) and +// returned. If it isn't, a new RelativeTraversalExpr is created and returned. +func makeRelativeTraversal(expr Expression, next hcl.Traverser, rng hcl.Range) Expression { + switch texpr := expr.(type) { + case *ScopeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + case *RelativeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + default: + return &RelativeTraversalExpr{ + Source: expr, + Traversal: hcl.Traversal{next}, + SrcRange: rng, + } + } +} + +func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { + start := p.Peek() + + switch start.Type { + case TokenOParen: + p.Read() // eat open paren + + p.PushIncludeNewlines(false) + + expr, diags := p.ParseExpression() + if diags.HasErrors() { + // attempt to place the peeker after our closing paren + // before we return, so that the next parser has some + // chance of finding a valid expression. + p.recover(TokenCParen) + p.PopIncludeNewlines() + return expr, diags + } + + close := p.Peek() + if close.Type != TokenCParen { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unbalanced parentheses", + Detail: "Expected a closing parenthesis to terminate the expression.", + Subject: &close.Range, + Context: hcl.RangeBetween(start.Range, close.Range).Ptr(), + }) + p.setRecovery() + } + + p.Read() // eat closing paren + p.PopIncludeNewlines() + + return expr, diags + + case TokenNumberLit: + tok := p.Read() // eat number token + + numVal, diags := p.numberLitValue(tok) + return &LiteralValueExpr{ + Val: numVal, + SrcRange: tok.Range, + }, diags + + case TokenIdent: + tok := p.Read() // eat identifier token + + if p.Peek().Type == TokenOParen { + return p.finishParsingFunctionCall(tok) + } + + name := string(tok.Bytes) + switch name { + case "true": + return &LiteralValueExpr{ + Val: cty.True, + SrcRange: tok.Range, + }, nil + case "false": + return &LiteralValueExpr{ + Val: cty.False, + SrcRange: tok.Range, + }, nil + case "null": + return &LiteralValueExpr{ + Val: cty.NullVal(cty.DynamicPseudoType), + SrcRange: tok.Range, + }, nil + default: + return &ScopeTraversalExpr{ + Traversal: hcl.Traversal{ + hcl.TraverseRoot{ + Name: name, + SrcRange: tok.Range, + }, + }, + SrcRange: tok.Range, + }, nil + } + + case TokenOQuote, TokenOHeredoc: + open := p.Read() // eat opening marker + closer := p.oppositeBracket(open.Type) + exprs, passthru, _, diags := p.parseTemplateInner(closer) + + closeRange := p.PrevRange() + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + + case TokenMinus: + tok := p.Read() // eat minus token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + // e.g. -46+5 should parse as (-46)+5, not -(46+5) + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpNegate, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenBang: + tok := p.Read() // eat bang token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpLogicalNot, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenOBrack: + return p.parseTupleCons() + + case TokenOBrace: + return p.parseObjectCons() + + default: + var diags hcl.Diagnostics + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expression", + Detail: "Expected the start of an expression, but found an invalid expression token.", + Subject: &start.Range, + }) + } + p.setRecovery() + + // Return a placeholder so that the AST is still structurally sound + // even in the presence of parse errors. + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: start.Range, + }, diags + } +} + +func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) { + // We'll lean on the cty converter to do the conversion, to ensure that + // the behavior is the same as what would happen if converting a + // non-literal string to a number. + numStrVal := cty.StringVal(string(tok.Bytes)) + numVal, err := convert.Convert(numStrVal, cty.Number) + if err != nil { + ret := cty.UnknownVal(cty.Number) + return ret, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid number literal", + // FIXME: not a very good error message, but convert only + // gives us "a number is required", so not much help either. + Detail: "Failed to recognize the value of this number literal.", + Subject: &tok.Range, + }, + } + } + return numVal, nil +} + +// finishParsingFunctionCall parses a function call assuming that the function +// name was already read, and so the peeker should be pointing at the opening +// parenthesis after the name. +func (p *parser) finishParsingFunctionCall(name Token) (Expression, hcl.Diagnostics) { + openTok := p.Read() + if openTok.Type != TokenOParen { + // should never happen if callers behave + panic("finishParsingFunctionCall called with non-parenthesis as next token") + } + + var args []Expression + var diags hcl.Diagnostics + var expandFinal bool + var closeTok Token + + // Arbitrary newlines are allowed inside the function call parentheses. + p.PushIncludeNewlines(false) + +Token: + for { + tok := p.Peek() + + if tok.Type == TokenCParen { + closeTok = p.Read() // eat closing paren + break Token + } + + arg, argDiags := p.ParseExpression() + args = append(args, arg) + diags = append(diags, argDiags...) + if p.recovery && argDiags.HasErrors() { + // if there was a parse error in the argument then we've + // probably been left in a weird place in the token stream, + // so we'll bail out with a partial argument list. + p.recover(TokenCParen) + break Token + } + + sep := p.Read() + if sep.Type == TokenCParen { + closeTok = sep + break Token + } + + if sep.Type == TokenEllipsis { + expandFinal = true + + if p.Peek().Type != TokenCParen { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing closing parenthesis", + Detail: "An expanded function argument (with ...) must be immediately followed by closing parentheses.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + } + closeTok = p.recover(TokenCParen) + } else { + closeTok = p.Read() // eat closing paren + } + break Token + } + + if sep.Type != TokenComma { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing argument separator", + Detail: "A comma is required to separate each function argument from the next.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + closeTok = p.recover(TokenCParen) + break Token + } + + if p.Peek().Type == TokenCParen { + // A trailing comma after the last argument gets us in here. + closeTok = p.Read() // eat closing paren + break Token + } + + } + + p.PopIncludeNewlines() + + return &FunctionCallExpr{ + Name: string(name.Bytes), + Args: args, + + ExpandFinal: expandFinal, + + NameRange: name.Range, + OpenParenRange: openTok.Range, + CloseParenRange: closeTok.Range, + }, diags +} + +func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrack { + // Should never happen if callers are behaving + panic("parseTupleCons called without peeker pointing to open bracket") + } + + p.PushIncludeNewlines(false) + defer p.PopIncludeNewlines() + + if forKeyword.TokenMatches(p.Peek()) { + return p.finishParsingForExpr(open) + } + + var close Token + + var diags hcl.Diagnostics + var exprs []Expression + + for { + next := p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + expr, exprDiags := p.ParseExpression() + exprs = append(exprs, expr) + diags = append(diags, exprDiags...) + + if p.recovery && exprDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing bracket to allow parsing to continue. + close = p.recover(TokenCBrack) + break + } + + next = p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrack) + break + } + + p.Read() // eat comma + + } + + return &TupleConsExpr{ + Exprs: exprs, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrace { + // Should never happen if callers are behaving + panic("parseObjectCons called without peeker pointing to open brace") + } + + p.PushIncludeNewlines(true) + defer p.PopIncludeNewlines() + + if forKeyword.TokenMatches(p.Peek()) { + return p.finishParsingForExpr(open) + } + + var close Token + + var diags hcl.Diagnostics + var items []ObjectConsItem + + for { + next := p.Peek() + if next.Type == TokenNewline { + p.Read() // eat newline + continue + } + + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + var key Expression + var keyDiags hcl.Diagnostics + key, keyDiags = p.ParseExpression() + diags = append(diags, keyDiags...) + + if p.recovery && keyDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + // We wrap up the key expression in a special wrapper that deals + // with our special case that naked identifiers as object keys + // are interpreted as literal strings. + key = &ObjectConsKeyExpr{Wrapped: key} + + next = p.Peek() + if next.Type != TokenEqual && next.Type != TokenColon { + if !p.recovery { + if next.Type == TokenNewline || next.Type == TokenComma { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item value", + Detail: "Expected an item value, introduced by an equals sign (\"=\").", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing key/value separator", + Detail: "Expected an equals sign (\"=\") to mark the beginning of the item value.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat equals sign or colon + + value, valueDiags := p.ParseExpression() + diags = append(diags, valueDiags...) + + if p.recovery && valueDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + items = append(items, ObjectConsItem{ + KeyExpr: key, + ValueExpr: value, + }) + + next = p.Peek() + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma && next.Type != TokenNewline { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a newline or comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat comma or newline + + } + + return &ObjectConsExpr{ + Items: items, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) { + introducer := p.Read() + if !forKeyword.TokenMatches(introducer) { + // Should never happen if callers are behaving + panic("finishParsingForExpr called without peeker pointing to 'for' identifier") + } + + var makeObj bool + var closeType TokenType + switch open.Type { + case TokenOBrace: + makeObj = true + closeType = TokenCBrace + case TokenOBrack: + makeObj = false // making a tuple + closeType = TokenCBrack + default: + // Should never happen if callers are behaving + panic("finishParsingForExpr called with invalid open token") + } + + var diags hcl.Diagnostics + var keyName, valName string + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires 'in' keyword after names.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + if p.recovery && collDiags.HasErrors() { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + if p.Peek().Type != TokenColon { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires colon after collection expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat colon + + var keyExpr, valExpr Expression + var keyDiags, valDiags hcl.Diagnostics + valExpr, valDiags = p.ParseExpression() + if p.Peek().Type == TokenFatArrow { + // What we just parsed was actually keyExpr + p.Read() // eat the fat arrow + keyExpr, keyDiags = valExpr, valDiags + + valExpr, valDiags = p.ParseExpression() + } + diags = append(diags, keyDiags...) + diags = append(diags, valDiags...) + if p.recovery && (keyDiags.HasErrors() || valDiags.HasErrors()) { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + group := false + var ellipsis Token + if p.Peek().Type == TokenEllipsis { + ellipsis = p.Read() + group = true + } + + var condExpr Expression + var condDiags hcl.Diagnostics + if ifKeyword.TokenMatches(p.Peek()) { + p.Read() // eat "if" + condExpr, condDiags = p.ParseExpression() + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + close := p.recover(p.oppositeBracket(open.Type)) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + } + + var close Token + if p.Peek().Type == closeType { + close = p.Read() + } else { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Extra characters after the end of the 'for' expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close = p.recover(closeType) + } + + if !makeObj { + if keyExpr != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is not valid when building a tuple.", + Subject: keyExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + if group { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Grouping ellipsis (...) cannot be used when building a tuple.", + Subject: &ellipsis.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } else { + if keyExpr == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is required when building an object.", + Subject: valExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } + + return &ForExpr{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + KeyExpr: keyExpr, + ValExpr: valExpr, + CondExpr: condExpr, + Group: group, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + CloseRange: close.Range, + }, diags +} + +// parseQuotedStringLiteral is a helper for parsing quoted strings that +// aren't allowed to contain any interpolations, such as block labels. +func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) { + oQuote := p.Read() + if oQuote.Type != TokenOQuote { + return "", oQuote.Range, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "A quoted string is required here.", + Subject: &oQuote.Range, + }, + } + } + + var diags hcl.Diagnostics + ret := &bytes.Buffer{} + var cQuote Token + +Token: + for { + tok := p.Read() + switch tok.Type { + + case TokenCQuote: + cQuote = tok + break Token + + case TokenQuotedLit: + s, sDiags := p.decodeStringLit(tok) + diags = append(diags, sDiags...) + ret.WriteString(s) + + case TokenTemplateControl, TokenTemplateInterp: + which := "$" + if tok.Type == TokenTemplateControl { + which = "!" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: fmt.Sprintf( + "Template sequences are not allowed in this string. To include a literal %q, double it (as \"%s%s\") to escape it.", + which, which, which, + ), + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + p.recover(TokenTemplateSeqEnd) + + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated string literal", + Detail: "Unable to find the closing quote mark before the end of the file.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + break Token + + default: + // Should never happen, as long as the scanner is behaving itself + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "This item is not valid in a string literal.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + p.recover(TokenOQuote) + break Token + + } + + } + + return ret.String(), hcl.RangeBetween(oQuote.Range, cQuote.Range), diags +} + +// decodeStringLit processes the given token, which must be either a +// TokenQuotedLit or a TokenStringLit, returning the string resulting from +// resolving any escape sequences. +// +// If any error diagnostics are returned, the returned string may be incomplete +// or otherwise invalid. +func (p *parser) decodeStringLit(tok Token) (string, hcl.Diagnostics) { + var quoted bool + switch tok.Type { + case TokenQuotedLit: + quoted = true + case TokenStringLit: + quoted = false + default: + panic("decodeQuotedLit can only be used with TokenStringLit and TokenQuotedLit tokens") + } + var diags hcl.Diagnostics + + ret := make([]byte, 0, len(tok.Bytes)) + slices := scanStringLit(tok.Bytes, quoted) + + // We will mutate rng constantly as we walk through our token slices below. + // Any diagnostics must take a copy of this rng rather than simply pointing + // to it, e.g. by using rng.Ptr() rather than &rng. + rng := tok.Range + rng.End = rng.Start + +Slices: + for _, slice := range slices { + if len(slice) == 0 { + continue + } + + // Advance the start of our range to where the previous token ended + rng.Start = rng.End + + // Advance the end of our range to after our token. + b := slice + for len(b) > 0 { + adv, ch, _ := textseg.ScanGraphemeClusters(b, true) + rng.End.Byte += adv + switch ch[0] { + case '\r', '\n': + rng.End.Line++ + rng.End.Column = 1 + default: + rng.End.Column++ + } + b = b[adv:] + } + + TokenType: + switch slice[0] { + case '\\': + if !quoted { + // If we're not in quoted mode then just treat this token as + // normal. (Slices can still start with backslash even if we're + // not specifically looking for backslash sequences.) + break TokenType + } + if len(slice) < 2 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "Backslash must be followed by an escape sequence selector character.", + Subject: rng.Ptr(), + }) + break TokenType + } + + switch slice[1] { + + case 'n': + ret = append(ret, '\n') + continue Slices + case 'r': + ret = append(ret, '\r') + continue Slices + case 't': + ret = append(ret, '\t') + continue Slices + case '"': + ret = append(ret, '"') + continue Slices + case '\\': + ret = append(ret, '\\') + continue Slices + case 'u', 'U': + if slice[1] == 'u' && len(slice) != 6 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "The \\u escape sequence must be followed by four hexadecimal digits.", + Subject: rng.Ptr(), + }) + break TokenType + } else if slice[1] == 'U' && len(slice) != 10 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "The \\U escape sequence must be followed by eight hexadecimal digits.", + Subject: rng.Ptr(), + }) + break TokenType + } + + numHex := string(slice[2:]) + num, err := strconv.ParseUint(numHex, 16, 32) + if err != nil { + // Should never happen because the scanner won't match + // a sequence of digits that isn't valid. + panic(err) + } + + r := rune(num) + l := utf8.RuneLen(r) + if l == -1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: fmt.Sprintf("Cannot encode character U+%04x in UTF-8.", num), + Subject: rng.Ptr(), + }) + break TokenType + } + for i := 0; i < l; i++ { + ret = append(ret, 0) + } + rb := ret[len(ret)-l:] + utf8.EncodeRune(rb, r) + + continue Slices + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: fmt.Sprintf("The symbol %q is not a valid escape sequence selector.", slice[1:]), + Subject: rng.Ptr(), + }) + ret = append(ret, slice[1:]...) + continue Slices + } + + case '$', '%': + if len(slice) != 3 { + // Not long enough to be our escape sequence, so it's literal. + break TokenType + } + + if slice[1] == slice[0] && slice[2] == '{' { + ret = append(ret, slice[0]) + ret = append(ret, '{') + continue Slices + } + + break TokenType + } + + // If we fall out here or break out of here from the switch above + // then this slice is just a literal. + ret = append(ret, slice...) + } + + return string(ret), diags +} + +// setRecovery turns on recovery mode without actually doing any recovery. +// This can be used when a parser knowingly leaves the peeker in a useless +// place and wants to suppress errors that might result from that decision. +func (p *parser) setRecovery() { + p.recovery = true +} + +// recover seeks forward in the token stream until it finds TokenType "end", +// then returns with the peeker pointed at the following token. +// +// If the given token type is a bracketer, this function will additionally +// count nested instances of the brackets to try to leave the peeker at +// the end of the _current_ instance of that bracketer, skipping over any +// nested instances. This is a best-effort operation and may have +// unpredictable results on input with bad bracketer nesting. +func (p *parser) recover(end TokenType) Token { + start := p.oppositeBracket(end) + p.recovery = true + + nest := 0 + for { + tok := p.Read() + ty := tok.Type + if end == TokenTemplateSeqEnd && ty == TokenTemplateControl { + // normalize so that our matching behavior can work, since + // TokenTemplateControl/TokenTemplateInterp are asymmetrical + // with TokenTemplateSeqEnd and thus we need to count both + // openers if that's the closer we're looking for. + ty = TokenTemplateInterp + } + + switch ty { + case start: + nest++ + case end: + if nest < 1 { + return tok + } + + nest-- + case TokenEOF: + return tok + } + } +} + +// recoverOver seeks forward in the token stream until it finds a block +// starting with TokenType "start", then finds the corresponding end token, +// leaving the peeker pointed at the token after that end token. +// +// The given token type _must_ be a bracketer. For example, if the given +// start token is TokenOBrace then the parser will be left at the _end_ of +// the next brace-delimited block encountered, or at EOF if no such block +// is found or it is unclosed. +func (p *parser) recoverOver(start TokenType) { + end := p.oppositeBracket(start) + + // find the opening bracket first +Token: + for { + tok := p.Read() + switch tok.Type { + case start, TokenEOF: + break Token + } + } + + // Now use our existing recover function to locate the _end_ of the + // container we've found. + p.recover(end) +} + +func (p *parser) recoverAfterBodyItem() { + p.recovery = true + var open []TokenType + +Token: + for { + tok := p.Read() + + switch tok.Type { + + case TokenNewline: + if len(open) == 0 { + break Token + } + + case TokenEOF: + break Token + + case TokenOBrace, TokenOBrack, TokenOParen, TokenOQuote, TokenOHeredoc, TokenTemplateInterp, TokenTemplateControl: + open = append(open, tok.Type) + + case TokenCBrace, TokenCBrack, TokenCParen, TokenCQuote, TokenCHeredoc: + opener := p.oppositeBracket(tok.Type) + for len(open) > 0 && open[len(open)-1] != opener { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + case TokenTemplateSeqEnd: + for len(open) > 0 && open[len(open)-1] != TokenTemplateInterp && open[len(open)-1] != TokenTemplateControl { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + } + } +} + +// oppositeBracket finds the bracket that opposes the given bracketer, or +// NilToken if the given token isn't a bracketer. +// +// "Bracketer", for the sake of this function, is one end of a matching +// open/close set of tokens that establish a bracketing context. +func (p *parser) oppositeBracket(ty TokenType) TokenType { + switch ty { + + case TokenOBrace: + return TokenCBrace + case TokenOBrack: + return TokenCBrack + case TokenOParen: + return TokenCParen + case TokenOQuote: + return TokenCQuote + case TokenOHeredoc: + return TokenCHeredoc + + case TokenCBrace: + return TokenOBrace + case TokenCBrack: + return TokenOBrack + case TokenCParen: + return TokenOParen + case TokenCQuote: + return TokenOQuote + case TokenCHeredoc: + return TokenOHeredoc + + case TokenTemplateControl: + return TokenTemplateSeqEnd + case TokenTemplateInterp: + return TokenTemplateSeqEnd + case TokenTemplateSeqEnd: + // This is ambigous, but we return Interp here because that's + // what's assumed by the "recover" method. + return TokenTemplateInterp + + default: + return TokenNil + } +} + +func errPlaceholderExpr(rng hcl.Range) Expression { + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: rng, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go new file mode 100644 index 0000000..3711067 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go @@ -0,0 +1,728 @@ +package hclsyntax + +import ( + "fmt" + "strings" + "unicode" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) { + return p.parseTemplate(TokenEOF) +} + +func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) { + exprs, passthru, rng, diags := p.parseTemplateInner(end) + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: rng, + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: rng, + }, diags +} + +func (p *parser) parseTemplateInner(end TokenType) ([]Expression, bool, hcl.Range, hcl.Diagnostics) { + parts, diags := p.parseTemplateParts(end) + tp := templateParser{ + Tokens: parts.Tokens, + SrcRange: parts.SrcRange, + } + exprs, exprsDiags := tp.parseRoot() + diags = append(diags, exprsDiags...) + + passthru := false + if len(parts.Tokens) == 2 { // one real token and one synthetic "end" token + if _, isInterp := parts.Tokens[0].(*templateInterpToken); isInterp { + passthru = true + } + } + + return exprs, passthru, parts.SrcRange, diags +} + +type templateParser struct { + Tokens []templateToken + SrcRange hcl.Range + + pos int +} + +func (p *templateParser) parseRoot() ([]Expression, hcl.Diagnostics) { + var exprs []Expression + var diags hcl.Diagnostics + + for { + next := p.Peek() + if _, isEnd := next.(*templateEndToken); isEnd { + break + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + exprs = append(exprs, expr) + } + + return exprs, diags +} + +func (p *templateParser) parseExpr() (Expression, hcl.Diagnostics) { + next := p.Peek() + switch tok := next.(type) { + + case *templateLiteralToken: + p.Read() // eat literal + return &LiteralValueExpr{ + Val: cty.StringVal(tok.Val), + SrcRange: tok.SrcRange, + }, nil + + case *templateInterpToken: + p.Read() // eat interp + return tok.Expr, nil + + case *templateIfToken: + return p.parseIf() + + case *templateForToken: + return p.parseFor() + + case *templateEndToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + // This is a particularly unhelpful diagnostic, so callers + // should attempt to pre-empt it and produce a more helpful + // diagnostic that is context-aware. + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + case *templateEndCtrlToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", tok.Name()), + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + default: + // should never happen, because above should be exhaustive + panic(fmt.Sprintf("unhandled template token type %T", next)) + } +} + +func (p *templateParser) parseIf() (Expression, hcl.Diagnostics) { + open := p.Read() + openIf, isIf := open.(*templateIfToken) + if !isIf { + // should never happen if caller is behaving + panic("parseIf called with peeker not pointing at if token") + } + + var ifExprs, elseExprs []Expression + var diags hcl.Diagnostics + var endifRange hcl.Range + + currentExprs := &ifExprs +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The if directive at %s is missing its corresponding endif directive.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + if currentExprs == &ifExprs { + currentExprs = &elseExprs + continue Token + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: fmt.Sprintf( + "Already in the else clause for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + + case templateEndIf: + endifRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endif directive for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + *currentExprs = append(*currentExprs, expr) + } + + if len(ifExprs) == 0 { + ifExprs = append(ifExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openIf.SrcRange.Filename, + Start: openIf.SrcRange.End, + End: openIf.SrcRange.End, + }, + }) + } + if len(elseExprs) == 0 { + elseExprs = append(elseExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: endifRange.Filename, + Start: endifRange.Start, + End: endifRange.Start, + }, + }) + } + + trueExpr := &TemplateExpr{ + Parts: ifExprs, + SrcRange: hcl.RangeBetween(ifExprs[0].Range(), ifExprs[len(ifExprs)-1].Range()), + } + falseExpr := &TemplateExpr{ + Parts: elseExprs, + SrcRange: hcl.RangeBetween(elseExprs[0].Range(), elseExprs[len(elseExprs)-1].Range()), + } + + return &ConditionalExpr{ + Condition: openIf.CondExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(openIf.SrcRange, endifRange), + }, diags +} + +func (p *templateParser) parseFor() (Expression, hcl.Diagnostics) { + open := p.Read() + openFor, isFor := open.(*templateForToken) + if !isFor { + // should never happen if caller is behaving + panic("parseFor called with peeker not pointing at for token") + } + + var contentExprs []Expression + var diags hcl.Diagnostics + var endforRange hcl.Range + +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The for directive at %s is missing its corresponding endfor directive.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: "An else clause is not expected for a for directive.", + Subject: &end.SrcRange, + }) + + case templateEndFor: + endforRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endfor directive corresponding to the for directive at %s.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + contentExprs = append(contentExprs, expr) + } + + if len(contentExprs) == 0 { + contentExprs = append(contentExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openFor.SrcRange.Filename, + Start: openFor.SrcRange.End, + End: openFor.SrcRange.End, + }, + }) + } + + contentExpr := &TemplateExpr{ + Parts: contentExprs, + SrcRange: hcl.RangeBetween(contentExprs[0].Range(), contentExprs[len(contentExprs)-1].Range()), + } + + forExpr := &ForExpr{ + KeyVar: openFor.KeyVar, + ValVar: openFor.ValVar, + + CollExpr: openFor.CollExpr, + ValExpr: contentExpr, + + SrcRange: hcl.RangeBetween(openFor.SrcRange, endforRange), + OpenRange: openFor.SrcRange, + CloseRange: endforRange, + } + + return &TemplateJoinExpr{ + Tuple: forExpr, + }, diags +} + +func (p *templateParser) Peek() templateToken { + return p.Tokens[p.pos] +} + +func (p *templateParser) Read() templateToken { + ret := p.Peek() + if _, end := ret.(*templateEndToken); !end { + p.pos++ + } + return ret +} + +// parseTemplateParts produces a flat sequence of "template tokens", which are +// either literal values (with any "trimming" already applied), interpolation +// sequences, or control flow markers. +// +// A further pass is required on the result to turn it into an AST. +func (p *parser) parseTemplateParts(end TokenType) (*templateParts, hcl.Diagnostics) { + var parts []templateToken + var diags hcl.Diagnostics + + startRange := p.NextRange() + ltrimNext := false + nextCanTrimPrev := false + var endRange hcl.Range + +Token: + for { + next := p.Read() + if next.Type == end { + // all done! + endRange = next.Range + break + } + + ltrim := ltrimNext + ltrimNext = false + canTrimPrev := nextCanTrimPrev + nextCanTrimPrev = false + + switch next.Type { + case TokenStringLit, TokenQuotedLit: + str, strDiags := p.decodeStringLit(next) + diags = append(diags, strDiags...) + + if ltrim { + str = strings.TrimLeftFunc(str, unicode.IsSpace) + } + + parts = append(parts, &templateLiteralToken{ + Val: str, + SrcRange: next.Range, + }) + nextCanTrimPrev = true + + case TokenTemplateInterp: + // if the opener is ${~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + + p.PushIncludeNewlines(false) + expr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + parts = append(parts, &templateInterpToken{ + Expr: expr, + SrcRange: hcl.RangeBetween(next.Range, close.Range), + }) + + case TokenTemplateControl: + // if the opener is %{~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + p.PushIncludeNewlines(false) + + kw := p.Peek() + if kw.Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template directive", + Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a %{ sequence.", + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat keyword token + + switch { + + case ifKeyword.TokenMatches(kw): + condExpr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + parts = append(parts, &templateIfToken{ + CondExpr: condExpr, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case elseKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateElse, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endifKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndIf, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case forKeyword.TokenMatches(kw): + var keyName, valName string + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires 'in' keyword after names.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + parts = append(parts, &templateForToken{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endforKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndFor, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + default: + if !p.recovery { + suggestions := []string{"if", "for", "else", "endif", "endfor"} + given := string(kw.Bytes) + suggestion := nameSuggestion(given, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template control keyword", + Detail: fmt.Sprintf("%q is not a valid template control keyword.%s", given, suggestion), + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + + } + + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extra characters in %s marker", kw.Bytes), + Detail: "Expected a closing brace to end the sequence, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated template string", + Detail: "No closing marker was found for the string.", + Subject: &next.Range, + Context: hcl.RangeBetween(startRange, next.Range).Ptr(), + }) + } + final := p.recover(end) + endRange = final.Range + break Token + } + } + + if len(parts) == 0 { + // If a sequence has no content, we'll treat it as if it had an + // empty string in it because that's what the user probably means + // if they write "" in configuration. + parts = append(parts, &templateLiteralToken{ + Val: "", + SrcRange: hcl.Range{ + // Range is the zero-character span immediately after the + // opening quote. + Filename: startRange.Filename, + Start: startRange.End, + End: startRange.End, + }, + }) + } + + // Always end with an end token, so the parser can produce diagnostics + // about unclosed items with proper position information. + parts = append(parts, &templateEndToken{ + SrcRange: endRange, + }) + + ret := &templateParts{ + Tokens: parts, + SrcRange: hcl.RangeBetween(startRange, endRange), + } + + return ret, diags +} + +type templateParts struct { + Tokens []templateToken + SrcRange hcl.Range +} + +// templateToken is a higher-level token that represents a single atom within +// the template language. Our template parsing first raises the raw token +// stream to a sequence of templateToken, and then transforms the result into +// an expression tree. +type templateToken interface { + templateToken() templateToken +} + +type templateLiteralToken struct { + Val string + SrcRange hcl.Range + isTemplateToken +} + +type templateInterpToken struct { + Expr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateIfToken struct { + CondExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateForToken struct { + KeyVar string // empty if ignoring key + ValVar string + CollExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateEndCtrlType int + +const ( + templateEndIf templateEndCtrlType = iota + templateElse + templateEndFor +) + +type templateEndCtrlToken struct { + Type templateEndCtrlType + SrcRange hcl.Range + isTemplateToken +} + +func (t *templateEndCtrlToken) Name() string { + switch t.Type { + case templateEndIf: + return "endif" + case templateElse: + return "else" + case templateEndFor: + return "endfor" + default: + // should never happen + panic("invalid templateEndCtrlType") + } +} + +type templateEndToken struct { + SrcRange hcl.Range + isTemplateToken +} + +type isTemplateToken [0]int + +func (t isTemplateToken) templateToken() templateToken { + return t +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go new file mode 100644 index 0000000..2ff3ed6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go @@ -0,0 +1,159 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// ParseTraversalAbs parses an absolute traversal that is assumed to consume +// all of the remaining tokens in the peeker. The usual parser recovery +// behavior is not supported here because traversals are not expected to +// be parsed as part of a larger program. +func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) { + var ret hcl.Traversal + var diags hcl.Diagnostics + + // Absolute traversal must always begin with a variable name + varTok := p.Read() + if varTok.Type != TokenIdent { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Variable name required", + Detail: "Must begin with a variable name.", + Subject: &varTok.Range, + }) + return ret, diags + } + + varName := string(varTok.Bytes) + ret = append(ret, hcl.TraverseRoot{ + Name: varName, + SrcRange: varTok.Range, + }) + + for { + next := p.Peek() + + if next.Type == TokenEOF { + return ret, diags + } + + switch next.Type { + case TokenDot: + // Attribute access + dot := p.Read() // eat dot + nameTok := p.Read() + if nameTok.Type != TokenIdent { + if nameTok.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions (.*) may not be used here.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Dot must be followed by attribute name.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } + return ret, diags + } + + attrName := string(nameTok.Bytes) + ret = append(ret, hcl.TraverseAttr{ + Name: attrName, + SrcRange: hcl.RangeBetween(dot.Range, nameTok.Range), + }) + case TokenOBrack: + // Index + open := p.Read() // eat open bracket + next := p.Peek() + + switch next.Type { + case TokenNumberLit: + tok := p.Read() // eat number + numVal, numDiags := p.numberLitValue(tok) + diags = append(diags, numDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + case TokenOQuote: + str, _, strDiags := p.parseQuotedStringLiteral() + diags = append(diags, strDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: cty.StringVal(str), + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + default: + if next.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions ([*]) may not be used here.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Index value required", + Detail: "Index brackets must contain either a literal number or a literal string.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } + return ret, diags + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "Expected an attribute access or an index operator.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + return ret, diags + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go new file mode 100644 index 0000000..5a4b50e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go @@ -0,0 +1,212 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// This is set to true at init() time in tests, to enable more useful output +// if a stack discipline error is detected. It should not be enabled in +// normal mode since there is a performance penalty from accessing the +// runtime stack to produce the traces, but could be temporarily set to +// true for debugging if desired. +var tracePeekerNewlinesStack = false + +type peeker struct { + Tokens Tokens + NextIndex int + + IncludeComments bool + IncludeNewlinesStack []bool + + // used only when tracePeekerNewlinesStack is set + newlineStackChanges []peekerNewlineStackChange +} + +// for use in debugging the stack usage only +type peekerNewlineStackChange struct { + Pushing bool // if false, then popping + Frame runtime.Frame + Include bool +} + +func newPeeker(tokens Tokens, includeComments bool) *peeker { + return &peeker{ + Tokens: tokens, + IncludeComments: includeComments, + + IncludeNewlinesStack: []bool{true}, + } +} + +func (p *peeker) Peek() Token { + ret, _ := p.nextToken() + return ret +} + +func (p *peeker) Read() Token { + ret, nextIdx := p.nextToken() + p.NextIndex = nextIdx + return ret +} + +func (p *peeker) NextRange() hcl.Range { + return p.Peek().Range +} + +func (p *peeker) PrevRange() hcl.Range { + if p.NextIndex == 0 { + return p.NextRange() + } + + return p.Tokens[p.NextIndex-1].Range +} + +func (p *peeker) nextToken() (Token, int) { + for i := p.NextIndex; i < len(p.Tokens); i++ { + tok := p.Tokens[i] + switch tok.Type { + case TokenComment: + if !p.IncludeComments { + // Single-line comment tokens, starting with # or //, absorb + // the trailing newline that terminates them as part of their + // bytes. When we're filtering out comments, we must as a + // special case transform these to newline tokens in order + // to properly parse newline-terminated block items. + + if p.includingNewlines() { + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + fakeNewline := Token{ + Type: TokenNewline, + Bytes: tok.Bytes[len(tok.Bytes)-1 : len(tok.Bytes)], + + // We use the whole token range as the newline + // range, even though that's a little... weird, + // because otherwise we'd need to go count + // characters again in order to figure out the + // column of the newline, and that complexity + // isn't justified when ranges of newlines are + // so rarely printed anyway. + Range: tok.Range, + } + return fakeNewline, i + 1 + } + } + + continue + } + case TokenNewline: + if !p.includingNewlines() { + continue + } + } + + return tok, i + 1 + } + + // if we fall out here then we'll return the EOF token, and leave + // our index pointed off the end of the array so we'll keep + // returning EOF in future too. + return p.Tokens[len(p.Tokens)-1], len(p.Tokens) +} + +func (p *peeker) includingNewlines() bool { + return p.IncludeNewlinesStack[len(p.IncludeNewlinesStack)-1] +} + +func (p *peeker) PushIncludeNewlines(include bool) { + if tracePeekerNewlinesStack { + // Record who called us so that we can more easily track down any + // mismanagement of the stack in the parser. + callers := []uintptr{0} + runtime.Callers(2, callers) + frames := runtime.CallersFrames(callers) + frame, _ := frames.Next() + p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{ + true, frame, include, + }) + } + + p.IncludeNewlinesStack = append(p.IncludeNewlinesStack, include) +} + +func (p *peeker) PopIncludeNewlines() bool { + stack := p.IncludeNewlinesStack + remain, ret := stack[:len(stack)-1], stack[len(stack)-1] + p.IncludeNewlinesStack = remain + + if tracePeekerNewlinesStack { + // Record who called us so that we can more easily track down any + // mismanagement of the stack in the parser. + callers := []uintptr{0} + runtime.Callers(2, callers) + frames := runtime.CallersFrames(callers) + frame, _ := frames.Next() + p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{ + false, frame, ret, + }) + } + + return ret +} + +// AssertEmptyNewlinesStack checks if the IncludeNewlinesStack is empty, doing +// panicking if it is not. This can be used to catch stack mismanagement that +// might otherwise just cause confusing downstream errors. +// +// This function is a no-op if the stack is empty when called. +// +// If newlines stack tracing is enabled by setting the global variable +// tracePeekerNewlinesStack at init time, a full log of all of the push/pop +// calls will be produced to help identify which caller in the parser is +// misbehaving. +func (p *peeker) AssertEmptyIncludeNewlinesStack() { + if len(p.IncludeNewlinesStack) != 1 { + // Should never happen; indicates mismanagement of the stack inside + // the parser. + if p.newlineStackChanges != nil { // only if traceNewlinesStack is enabled above + panic(fmt.Errorf( + "non-empty IncludeNewlinesStack after parse with %d calls unaccounted for:\n%s", + len(p.IncludeNewlinesStack)-1, + formatPeekerNewlineStackChanges(p.newlineStackChanges), + )) + } else { + panic(fmt.Errorf("non-empty IncludeNewlinesStack after parse: %#v", p.IncludeNewlinesStack)) + } + } +} + +func formatPeekerNewlineStackChanges(changes []peekerNewlineStackChange) string { + indent := 0 + var buf bytes.Buffer + for _, change := range changes { + funcName := change.Frame.Function + if idx := strings.LastIndexByte(funcName, '.'); idx != -1 { + funcName = funcName[idx+1:] + } + filename := change.Frame.File + if idx := strings.LastIndexByte(filename, filepath.Separator); idx != -1 { + filename = filename[idx+1:] + } + + switch change.Pushing { + + case true: + buf.WriteString(strings.Repeat(" ", indent)) + fmt.Fprintf(&buf, "PUSH %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line) + indent++ + + case false: + indent-- + buf.WriteString(strings.Repeat(" ", indent)) + fmt.Fprintf(&buf, "POP %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line) + + } + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go new file mode 100644 index 0000000..cf0ee29 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go @@ -0,0 +1,171 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ParseConfig parses the given buffer as a whole HCL config file, returning +// a *hcl.File representing its contents. If HasErrors called on the returned +// diagnostics returns true, the returned body is likely to be incomplete +// and should therefore be used with care. +// +// The body in the returned file has dynamic type *hclsyntax.Body, so callers +// may freely type-assert this to get access to the full hclsyntax API in +// situations where detailed access is required. However, most common use-cases +// should be served using the hcl.Body interface to ensure compatibility with +// other configurationg syntaxes, such as JSON. +func ParseConfig(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) { + tokens, diags := LexConfig(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + body, parseDiags := parser.ParseBody(TokenEOF) + diags = append(diags, parseDiags...) + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return &hcl.File{ + Body: body, + Bytes: src, + + Nav: navigation{ + root: body, + }, + }, diags +} + +// ParseExpression parses the given buffer as a standalone HCL expression, +// returning it as an instance of Expression. +func ParseExpression(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare expressions are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseExpression() + diags = append(diags, parseDiags...) + + next := parser.Peek() + if next.Type != TokenEOF && !parser.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after expression", + Detail: "An expression was successfully parsed, but extra characters were found after it.", + Subject: &next.Range, + }) + } + + parser.PopIncludeNewlines() + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// ParseTemplate parses the given buffer as a standalone HCL template, +// returning it as an instance of Expression. +func ParseTemplate(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexTemplate(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + expr, parseDiags := parser.ParseTemplate() + diags = append(diags, parseDiags...) + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// ParseTraversalAbs parses the given buffer as a standalone absolute traversal. +// +// Parsing as a traversal is more limited than parsing as an expession since +// it allows only attribute and indexing operations on variables. Traverals +// are useful as a syntax for referring to objects without necessarily +// evaluating them. +func ParseTraversalAbs(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare traverals are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseTraversalAbs() + diags = append(diags, parseDiags...) + + parser.PopIncludeNewlines() + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// LexConfig performs lexical analysis on the given buffer, treating it as a +// whole HCL config file, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexConfig(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexExpression performs lexical analysis on the given buffer, treating it as +// a standalone HCL expression, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexExpression(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + // This is actually just the same thing as LexConfig, since configs + // and expressions lex in the same way. + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexTemplate performs lexical analysis on the given buffer, treating it as a +// standalone HCL template, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexTemplate(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanTemplate) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// ValidIdentifier tests if the given string could be a valid identifier in +// a native syntax expression. +// +// This is useful when accepting names from the user that will be used as +// variable or attribute names in the scope, to ensure that any name chosen +// will be traversable using the variable or attribute traversal syntax. +func ValidIdentifier(s string) bool { + // This is a kinda-expensive way to do something pretty simple, but it + // is easiest to do with our existing scanner-related infrastructure here + // and nobody should be validating identifiers in a tight loop. + tokens := scanTokens([]byte(s), "", hcl.Pos{}, scanIdentOnly) + return len(tokens) == 2 && tokens[0].Type == TokenIdent && tokens[1].Type == TokenEOF +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go new file mode 100644 index 0000000..de1f524 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go @@ -0,0 +1,301 @@ +// line 1 "scan_string_lit.rl" + +package hclsyntax + +// This file is generated from scan_string_lit.rl. DO NOT EDIT. + +// line 9 "scan_string_lit.go" +var _hclstrtok_actions []byte = []byte{ + 0, 1, 0, 1, 1, 2, 1, 0, +} + +var _hclstrtok_key_offsets []byte = []byte{ + 0, 0, 2, 4, 6, 10, 14, 18, + 22, 27, 31, 36, 41, 46, 51, 57, + 62, 74, 85, 96, 107, 118, 129, 140, + 151, +} + +var _hclstrtok_trans_keys []byte = []byte{ + 128, 191, 128, 191, 128, 191, 10, 13, + 36, 37, 10, 13, 36, 37, 10, 13, + 36, 37, 10, 13, 36, 37, 10, 13, + 36, 37, 123, 10, 13, 36, 37, 10, + 13, 36, 37, 92, 10, 13, 36, 37, + 92, 10, 13, 36, 37, 92, 10, 13, + 36, 37, 92, 10, 13, 36, 37, 92, + 123, 10, 13, 36, 37, 92, 85, 117, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 36, 37, 92, 48, + 57, 65, 70, 97, 102, 10, 13, 36, + 37, 92, 48, 57, 65, 70, 97, 102, + 10, 13, 36, 37, 92, 48, 57, 65, + 70, 97, 102, 10, 13, 36, 37, 92, + 48, 57, 65, 70, 97, 102, 10, 13, + 36, 37, 92, 48, 57, 65, 70, 97, + 102, 10, 13, 36, 37, 92, 48, 57, + 65, 70, 97, 102, 10, 13, 36, 37, + 92, 48, 57, 65, 70, 97, 102, 10, + 13, 36, 37, 92, 48, 57, 65, 70, + 97, 102, +} + +var _hclstrtok_single_lengths []byte = []byte{ + 0, 0, 0, 0, 4, 4, 4, 4, + 5, 4, 5, 5, 5, 5, 6, 5, + 2, 5, 5, 5, 5, 5, 5, 5, + 5, +} + +var _hclstrtok_range_lengths []byte = []byte{ + 0, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 5, 3, 3, 3, 3, 3, 3, 3, + 3, +} + +var _hclstrtok_index_offsets []byte = []byte{ + 0, 0, 2, 4, 6, 11, 16, 21, + 26, 32, 37, 43, 49, 55, 61, 68, + 74, 82, 91, 100, 109, 118, 127, 136, + 145, +} + +var _hclstrtok_indicies []byte = []byte{ + 0, 1, 2, 1, 3, 1, 5, 6, + 7, 8, 4, 10, 11, 12, 13, 9, + 14, 11, 12, 13, 9, 10, 11, 15, + 13, 9, 10, 11, 12, 13, 14, 9, + 10, 11, 12, 15, 9, 17, 18, 19, + 20, 21, 16, 23, 24, 25, 26, 27, + 22, 0, 24, 25, 26, 27, 22, 23, + 24, 28, 26, 27, 22, 23, 24, 25, + 26, 27, 0, 22, 23, 24, 25, 28, + 27, 22, 29, 30, 22, 2, 3, 31, + 22, 0, 23, 24, 25, 26, 27, 32, + 32, 32, 22, 23, 24, 25, 26, 27, + 33, 33, 33, 22, 23, 24, 25, 26, + 27, 34, 34, 34, 22, 23, 24, 25, + 26, 27, 30, 30, 30, 22, 23, 24, + 25, 26, 27, 35, 35, 35, 22, 23, + 24, 25, 26, 27, 36, 36, 36, 22, + 23, 24, 25, 26, 27, 37, 37, 37, + 22, 23, 24, 25, 26, 27, 0, 0, + 0, 22, +} + +var _hclstrtok_trans_targs []byte = []byte{ + 11, 0, 1, 2, 4, 5, 6, 7, + 9, 4, 5, 6, 7, 9, 5, 8, + 10, 11, 12, 13, 15, 16, 10, 11, + 12, 13, 15, 16, 14, 17, 21, 3, + 18, 19, 20, 22, 23, 24, +} + +var _hclstrtok_trans_actions []byte = []byte{ + 0, 0, 0, 0, 0, 1, 1, 1, + 1, 3, 5, 5, 5, 5, 0, 0, + 0, 1, 1, 1, 1, 1, 3, 5, + 5, 5, 5, 5, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hclstrtok_eof_actions []byte = []byte{ + 0, 0, 0, 0, 0, 3, 3, 3, + 3, 3, 0, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, +} + +const hclstrtok_start int = 4 +const hclstrtok_first_final int = 4 +const hclstrtok_error int = 0 + +const hclstrtok_en_quoted int = 10 +const hclstrtok_en_unquoted int = 4 + +// line 10 "scan_string_lit.rl" + +func scanStringLit(data []byte, quoted bool) [][]byte { + var ret [][]byte + + // line 61 "scan_string_lit.rl" + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + eof := pe + + var cs int // current state + switch { + case quoted: + cs = hclstrtok_en_quoted + default: + cs = hclstrtok_en_unquoted + } + + // Make Go compiler happy + _ = ts + _ = eof + + /*token := func () { + ret = append(ret, data[ts:te]) + }*/ + + // line 154 "scan_string_lit.go" + { + } + + // line 158 "scan_string_lit.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _keys = int(_hclstrtok_key_offsets[cs]) + _trans = int(_hclstrtok_index_offsets[cs]) + + _klen = int(_hclstrtok_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _hclstrtok_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _hclstrtok_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_hclstrtok_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _hclstrtok_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _hclstrtok_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_hclstrtok_indicies[_trans]) + cs = int(_hclstrtok_trans_targs[_trans]) + + if _hclstrtok_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_hclstrtok_trans_actions[_trans]) + _nacts = uint(_hclstrtok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hclstrtok_actions[_acts-1] { + case 0: + // line 40 "scan_string_lit.rl" + + // If te is behind p then we've skipped over some literal + // characters which we must now return. + if te < p { + ret = append(ret, data[te:p]) + } + ts = p + + case 1: + // line 48 "scan_string_lit.rl" + + te = p + ret = append(ret, data[ts:te]) + + // line 255 "scan_string_lit.go" + } + } + + _again: + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + __acts := _hclstrtok_eof_actions[cs] + __nacts := uint(_hclstrtok_actions[__acts]) + __acts++ + for ; __nacts > 0; __nacts-- { + __acts++ + switch _hclstrtok_actions[__acts-1] { + case 1: + // line 48 "scan_string_lit.rl" + + te = p + ret = append(ret, data[ts:te]) + + // line 281 "scan_string_lit.go" + } + } + } + + _out: + { + } + } + + // line 89 "scan_string_lit.rl" + + if te < p { + // Collect any leftover literal characters at the end of the input + ret = append(ret, data[te:p]) + } + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // tolerate it and let the caller deal with it. + if cs < hclstrtok_first_final { + ret = append(ret, data[p:len(data)]) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl new file mode 100644 index 0000000..f8ac117 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl @@ -0,0 +1,105 @@ + +package hclsyntax + +// This file is generated from scan_string_lit.rl. DO NOT EDIT. +%%{ + # (except you are actually in scan_string_lit.rl here, so edit away!) + + machine hclstrtok; + write data; +}%% + +func scanStringLit(data []byte, quoted bool) [][]byte { + var ret [][]byte + + %%{ + include UnicodeDerived "unicode_derived.rl"; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + BadUTF8 = any - AnyUTF8; + + Hex = ('0'..'9' | 'a'..'f' | 'A'..'F'); + + # Our goal with this patterns is to capture user intent as best as + # possible, even if the input is invalid. The caller will then verify + # whether each token is valid and generate suitable error messages + # if not. + UnicodeEscapeShort = "\\u" . Hex{0,4}; + UnicodeEscapeLong = "\\U" . Hex{0,8}; + UnicodeEscape = (UnicodeEscapeShort | UnicodeEscapeLong); + SimpleEscape = "\\" . (AnyUTF8 - ('U'|'u'))?; + TemplateEscape = ("$" . ("$" . ("{"?))?) | ("%" . ("%" . ("{"?))?); + Newline = ("\r\n" | "\r" | "\n"); + + action Begin { + // If te is behind p then we've skipped over some literal + // characters which we must now return. + if te < p { + ret = append(ret, data[te:p]) + } + ts = p; + } + action End { + te = p; + ret = append(ret, data[ts:te]); + } + + QuotedToken = (UnicodeEscape | SimpleEscape | TemplateEscape | Newline) >Begin %End; + UnquotedToken = (TemplateEscape | Newline) >Begin %End; + QuotedLiteral = (any - ("\\" | "$" | "%" | "\r" | "\n")); + UnquotedLiteral = (any - ("$" | "%" | "\r" | "\n")); + + quoted := (QuotedToken | QuotedLiteral)**; + unquoted := (UnquotedToken | UnquotedLiteral)**; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + eof := pe + + var cs int // current state + switch { + case quoted: + cs = hclstrtok_en_quoted + default: + cs = hclstrtok_en_unquoted + } + + // Make Go compiler happy + _ = ts + _ = eof + + /*token := func () { + ret = append(ret, data[ts:te]) + }*/ + + %%{ + write init nocs; + write exec; + }%% + + if te < p { + // Collect any leftover literal characters at the end of the input + ret = append(ret, data[te:p]) + } + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // tolerate it and let the caller deal with it. + if cs < hclstrtok_first_final { + ret = append(ret, data[p:len(data)]) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go new file mode 100644 index 0000000..395e9c1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go @@ -0,0 +1,5443 @@ +// line 1 "scan_tokens.rl" + +package hclsyntax + +import ( + "bytes" + + "github.com/hashicorp/hcl2/hcl" +) + +// This file is generated from scan_tokens.rl. DO NOT EDIT. + +// line 15 "scan_tokens.go" +var _hcltok_actions []byte = []byte{ + 0, 1, 0, 1, 1, 1, 2, 1, 3, + 1, 4, 1, 6, 1, 7, 1, 8, + 1, 9, 1, 10, 1, 11, 1, 12, + 1, 13, 1, 14, 1, 15, 1, 16, + 1, 17, 1, 18, 1, 19, 1, 22, + 1, 23, 1, 24, 1, 25, 1, 26, + 1, 27, 1, 28, 1, 29, 1, 30, + 1, 31, 1, 34, 1, 35, 1, 36, + 1, 37, 1, 38, 1, 39, 1, 40, + 1, 41, 1, 42, 1, 43, 1, 46, + 1, 47, 1, 48, 1, 49, 1, 50, + 1, 51, 1, 52, 1, 58, 1, 59, + 1, 60, 1, 61, 1, 62, 1, 63, + 1, 64, 1, 65, 1, 66, 1, 67, + 1, 68, 1, 69, 1, 70, 1, 71, + 1, 72, 1, 73, 1, 74, 1, 75, + 1, 76, 1, 77, 1, 78, 1, 79, + 1, 80, 1, 81, 1, 82, 1, 83, + 1, 84, 1, 85, 1, 86, 1, 87, + 2, 0, 15, 2, 1, 15, 2, 2, + 24, 2, 2, 28, 2, 3, 24, 2, + 3, 28, 2, 4, 5, 2, 7, 0, + 2, 7, 1, 2, 7, 20, 2, 7, + 21, 2, 7, 32, 2, 7, 33, 2, + 7, 44, 2, 7, 45, 2, 7, 53, + 2, 7, 54, 2, 7, 55, 2, 7, + 56, 2, 7, 57, 3, 7, 2, 20, + 3, 7, 3, 20, +} + +var _hcltok_key_offsets []int16 = []int16{ + 0, 0, 1, 2, 3, 5, 10, 14, + 16, 58, 99, 145, 146, 150, 156, 156, + 158, 160, 169, 175, 182, 183, 186, 187, + 191, 196, 205, 209, 213, 221, 223, 225, + 227, 230, 262, 264, 266, 270, 274, 277, + 288, 301, 320, 333, 349, 361, 377, 392, + 413, 423, 435, 446, 460, 475, 485, 497, + 506, 518, 520, 524, 545, 554, 564, 570, + 576, 577, 626, 628, 632, 634, 640, 647, + 655, 662, 665, 671, 675, 679, 681, 685, + 689, 693, 699, 707, 715, 721, 723, 727, + 729, 735, 739, 743, 747, 751, 756, 763, + 769, 771, 773, 777, 779, 785, 789, 793, + 803, 808, 822, 837, 839, 847, 849, 854, + 868, 873, 875, 879, 880, 884, 890, 896, + 906, 916, 927, 935, 938, 941, 945, 949, + 951, 954, 954, 957, 959, 989, 991, 993, + 997, 1002, 1006, 1011, 1013, 1015, 1017, 1026, + 1030, 1034, 1040, 1042, 1050, 1058, 1070, 1073, + 1079, 1083, 1085, 1089, 1109, 1111, 1113, 1124, + 1130, 1132, 1134, 1136, 1140, 1146, 1152, 1154, + 1159, 1163, 1165, 1173, 1191, 1231, 1241, 1245, + 1247, 1249, 1250, 1254, 1258, 1262, 1266, 1270, + 1275, 1279, 1283, 1287, 1289, 1291, 1295, 1305, + 1309, 1311, 1315, 1319, 1323, 1336, 1338, 1340, + 1344, 1346, 1350, 1352, 1354, 1384, 1388, 1392, + 1396, 1399, 1406, 1411, 1422, 1426, 1442, 1456, + 1460, 1465, 1469, 1473, 1479, 1481, 1487, 1489, + 1493, 1495, 1501, 1506, 1511, 1521, 1523, 1525, + 1529, 1533, 1535, 1548, 1550, 1554, 1558, 1566, + 1568, 1572, 1574, 1575, 1578, 1583, 1585, 1587, + 1591, 1593, 1597, 1603, 1623, 1629, 1635, 1637, + 1638, 1648, 1649, 1657, 1664, 1666, 1669, 1671, + 1673, 1675, 1680, 1684, 1688, 1693, 1703, 1713, + 1717, 1721, 1735, 1761, 1771, 1773, 1775, 1778, + 1780, 1783, 1785, 1789, 1791, 1792, 1796, 1798, + 1801, 1808, 1816, 1818, 1820, 1824, 1826, 1832, + 1843, 1846, 1848, 1852, 1857, 1887, 1892, 1894, + 1897, 1902, 1916, 1923, 1937, 1942, 1955, 1959, + 1972, 1977, 1995, 1996, 2005, 2009, 2021, 2026, + 2033, 2040, 2047, 2049, 2053, 2075, 2080, 2081, + 2085, 2087, 2137, 2140, 2151, 2155, 2157, 2163, + 2169, 2171, 2176, 2178, 2182, 2184, 2185, 2187, + 2189, 2195, 2197, 2199, 2203, 2209, 2222, 2224, + 2230, 2234, 2242, 2253, 2261, 2264, 2294, 2300, + 2303, 2308, 2310, 2314, 2318, 2322, 2324, 2331, + 2333, 2342, 2349, 2357, 2359, 2379, 2391, 2395, + 2397, 2415, 2454, 2456, 2460, 2462, 2469, 2473, + 2501, 2503, 2505, 2507, 2509, 2512, 2514, 2518, + 2522, 2524, 2527, 2529, 2531, 2534, 2536, 2538, + 2539, 2541, 2543, 2547, 2551, 2554, 2567, 2569, + 2575, 2579, 2581, 2585, 2589, 2603, 2606, 2615, + 2617, 2621, 2627, 2627, 2629, 2631, 2640, 2646, + 2653, 2654, 2657, 2658, 2662, 2667, 2676, 2680, + 2684, 2692, 2694, 2696, 2698, 2701, 2733, 2735, + 2737, 2741, 2745, 2748, 2759, 2772, 2791, 2804, + 2820, 2832, 2848, 2863, 2884, 2894, 2906, 2917, + 2931, 2946, 2956, 2968, 2977, 2989, 2991, 2995, + 3016, 3025, 3035, 3041, 3047, 3048, 3097, 3099, + 3103, 3105, 3111, 3118, 3126, 3133, 3136, 3142, + 3146, 3150, 3152, 3156, 3160, 3164, 3170, 3178, + 3186, 3192, 3194, 3198, 3200, 3206, 3210, 3214, + 3218, 3222, 3227, 3234, 3240, 3242, 3244, 3248, + 3250, 3256, 3260, 3264, 3274, 3279, 3293, 3308, + 3310, 3318, 3320, 3325, 3339, 3344, 3346, 3350, + 3351, 3355, 3361, 3367, 3377, 3387, 3398, 3406, + 3409, 3412, 3416, 3420, 3422, 3425, 3425, 3428, + 3430, 3460, 3462, 3464, 3468, 3473, 3477, 3482, + 3484, 3486, 3488, 3497, 3501, 3505, 3511, 3513, + 3521, 3529, 3541, 3544, 3550, 3554, 3556, 3560, + 3580, 3582, 3584, 3595, 3601, 3603, 3605, 3607, + 3611, 3617, 3623, 3625, 3630, 3634, 3636, 3644, + 3662, 3702, 3712, 3716, 3718, 3720, 3721, 3725, + 3729, 3733, 3737, 3741, 3746, 3750, 3754, 3758, + 3760, 3762, 3766, 3776, 3780, 3782, 3786, 3790, + 3794, 3807, 3809, 3811, 3815, 3817, 3821, 3823, + 3825, 3855, 3859, 3863, 3867, 3870, 3877, 3882, + 3893, 3897, 3913, 3927, 3931, 3936, 3940, 3944, + 3950, 3952, 3958, 3960, 3964, 3966, 3972, 3977, + 3982, 3992, 3994, 3996, 4000, 4004, 4006, 4019, + 4021, 4025, 4029, 4037, 4039, 4043, 4045, 4046, + 4049, 4054, 4056, 4058, 4062, 4064, 4068, 4074, + 4094, 4100, 4106, 4108, 4109, 4119, 4120, 4128, + 4135, 4137, 4140, 4142, 4144, 4146, 4151, 4155, + 4159, 4164, 4174, 4184, 4188, 4192, 4206, 4232, + 4242, 4244, 4246, 4249, 4251, 4254, 4256, 4260, + 4262, 4263, 4267, 4269, 4271, 4278, 4282, 4289, + 4296, 4305, 4321, 4333, 4351, 4362, 4374, 4382, + 4400, 4408, 4438, 4441, 4451, 4461, 4473, 4484, + 4493, 4506, 4518, 4522, 4528, 4555, 4564, 4567, + 4572, 4578, 4583, 4604, 4608, 4614, 4614, 4621, + 4630, 4638, 4641, 4645, 4651, 4657, 4660, 4664, + 4671, 4677, 4686, 4695, 4699, 4703, 4707, 4711, + 4718, 4722, 4726, 4736, 4742, 4746, 4752, 4756, + 4759, 4765, 4771, 4783, 4787, 4791, 4801, 4805, + 4816, 4818, 4820, 4824, 4836, 4841, 4865, 4869, + 4875, 4897, 4906, 4910, 4913, 4914, 4922, 4930, + 4936, 4946, 4953, 4971, 4974, 4977, 4985, 4991, + 4995, 4999, 5003, 5009, 5017, 5022, 5028, 5032, + 5040, 5047, 5051, 5058, 5064, 5072, 5080, 5086, + 5092, 5103, 5107, 5119, 5128, 5145, 5162, 5165, + 5169, 5171, 5177, 5179, 5183, 5198, 5202, 5206, + 5210, 5214, 5218, 5220, 5226, 5231, 5235, 5241, + 5248, 5251, 5269, 5271, 5316, 5322, 5328, 5332, + 5336, 5342, 5346, 5352, 5358, 5365, 5367, 5373, + 5379, 5383, 5387, 5395, 5408, 5414, 5421, 5429, + 5435, 5444, 5450, 5454, 5459, 5463, 5471, 5475, + 5479, 5509, 5515, 5521, 5527, 5533, 5540, 5546, + 5553, 5558, 5568, 5572, 5579, 5585, 5589, 5596, + 5600, 5606, 5609, 5613, 5617, 5621, 5625, 5630, + 5635, 5639, 5650, 5654, 5658, 5664, 5672, 5676, + 5693, 5697, 5703, 5713, 5719, 5725, 5728, 5733, + 5742, 5746, 5750, 5756, 5760, 5766, 5774, 5792, + 5793, 5803, 5804, 5813, 5821, 5823, 5826, 5828, + 5830, 5832, 5837, 5850, 5854, 5869, 5898, 5909, + 5911, 5915, 5919, 5924, 5928, 5930, 5937, 5941, + 5949, 5953, 5954, 5955, 5957, 5959, 5961, 5963, + 5965, 5966, 5967, 5968, 5970, 5972, 5974, 5975, + 5976, 5977, 5978, 5980, 5982, 5984, 5985, 5986, + 5990, 5996, 5996, 5998, 6000, 6009, 6015, 6022, + 6023, 6026, 6027, 6031, 6036, 6045, 6049, 6053, + 6061, 6063, 6065, 6067, 6070, 6102, 6104, 6106, + 6110, 6114, 6117, 6128, 6141, 6160, 6173, 6189, + 6201, 6217, 6232, 6253, 6263, 6275, 6286, 6300, + 6315, 6325, 6337, 6346, 6358, 6360, 6364, 6385, + 6394, 6404, 6410, 6416, 6417, 6466, 6468, 6472, + 6474, 6480, 6487, 6495, 6502, 6505, 6511, 6515, + 6519, 6521, 6525, 6529, 6533, 6539, 6547, 6555, + 6561, 6563, 6567, 6569, 6575, 6579, 6583, 6587, + 6591, 6596, 6603, 6609, 6611, 6613, 6617, 6619, + 6625, 6629, 6633, 6643, 6648, 6662, 6677, 6679, + 6687, 6689, 6694, 6708, 6713, 6715, 6719, 6720, + 6724, 6730, 6736, 6746, 6756, 6767, 6775, 6778, + 6781, 6785, 6789, 6791, 6794, 6794, 6797, 6799, + 6829, 6831, 6833, 6837, 6842, 6846, 6851, 6853, + 6855, 6857, 6866, 6870, 6874, 6880, 6882, 6890, + 6898, 6910, 6913, 6919, 6923, 6925, 6929, 6949, + 6951, 6953, 6964, 6970, 6972, 6974, 6976, 6980, + 6986, 6992, 6994, 6999, 7003, 7005, 7013, 7031, + 7071, 7081, 7085, 7087, 7089, 7090, 7094, 7098, + 7102, 7106, 7110, 7115, 7119, 7123, 7127, 7129, + 7131, 7135, 7145, 7149, 7151, 7155, 7159, 7163, + 7176, 7178, 7180, 7184, 7186, 7190, 7192, 7194, + 7224, 7228, 7232, 7236, 7239, 7246, 7251, 7262, + 7266, 7282, 7296, 7300, 7305, 7309, 7313, 7319, + 7321, 7327, 7329, 7333, 7335, 7341, 7346, 7351, + 7361, 7363, 7365, 7369, 7373, 7375, 7388, 7390, + 7394, 7398, 7406, 7408, 7412, 7414, 7415, 7418, + 7423, 7425, 7427, 7431, 7433, 7437, 7443, 7463, + 7469, 7475, 7477, 7478, 7488, 7489, 7497, 7504, + 7506, 7509, 7511, 7513, 7515, 7520, 7524, 7528, + 7533, 7543, 7553, 7557, 7561, 7575, 7601, 7611, + 7613, 7615, 7618, 7620, 7623, 7625, 7629, 7631, + 7632, 7636, 7638, 7640, 7647, 7651, 7658, 7665, + 7674, 7690, 7702, 7720, 7731, 7743, 7751, 7769, + 7777, 7807, 7810, 7820, 7830, 7842, 7853, 7862, + 7875, 7887, 7891, 7897, 7924, 7933, 7936, 7941, + 7947, 7952, 7973, 7977, 7983, 7983, 7990, 7999, + 8007, 8010, 8014, 8020, 8026, 8029, 8033, 8040, + 8046, 8055, 8064, 8068, 8072, 8076, 8080, 8087, + 8091, 8095, 8105, 8111, 8115, 8121, 8125, 8128, + 8134, 8140, 8152, 8156, 8160, 8170, 8174, 8185, + 8187, 8189, 8193, 8205, 8210, 8234, 8238, 8244, + 8266, 8275, 8279, 8282, 8283, 8291, 8299, 8305, + 8315, 8322, 8340, 8343, 8346, 8354, 8360, 8364, + 8368, 8372, 8378, 8386, 8391, 8397, 8401, 8409, + 8416, 8420, 8427, 8433, 8441, 8449, 8455, 8461, + 8472, 8476, 8488, 8497, 8514, 8531, 8534, 8538, + 8540, 8546, 8548, 8552, 8567, 8571, 8575, 8579, + 8583, 8587, 8589, 8595, 8600, 8604, 8610, 8617, + 8620, 8638, 8640, 8685, 8691, 8697, 8701, 8705, + 8711, 8715, 8721, 8727, 8734, 8736, 8742, 8748, + 8752, 8756, 8764, 8777, 8783, 8790, 8798, 8804, + 8813, 8819, 8823, 8828, 8832, 8840, 8844, 8848, + 8878, 8884, 8890, 8896, 8902, 8909, 8915, 8922, + 8927, 8937, 8941, 8948, 8954, 8958, 8965, 8969, + 8975, 8978, 8982, 8986, 8990, 8994, 8999, 9004, + 9008, 9019, 9023, 9027, 9033, 9041, 9045, 9062, + 9066, 9072, 9082, 9088, 9094, 9097, 9102, 9111, + 9115, 9119, 9125, 9129, 9135, 9143, 9161, 9162, + 9172, 9173, 9182, 9190, 9192, 9195, 9197, 9199, + 9201, 9206, 9219, 9223, 9238, 9267, 9278, 9280, + 9284, 9288, 9293, 9297, 9299, 9306, 9310, 9318, + 9322, 9398, 9400, 9401, 9402, 9403, 9404, 9405, + 9407, 9408, 9413, 9415, 9417, 9418, 9462, 9463, + 9464, 9466, 9471, 9475, 9475, 9477, 9479, 9490, + 9500, 9508, 9509, 9511, 9512, 9516, 9520, 9530, + 9534, 9541, 9552, 9559, 9563, 9569, 9580, 9612, + 9661, 9676, 9691, 9696, 9698, 9703, 9735, 9743, + 9745, 9767, 9789, 9791, 9807, 9823, 9839, 9855, + 9870, 9880, 9897, 9914, 9931, 9947, 9957, 9974, + 9990, 10006, 10022, 10038, 10054, 10070, 10086, 10087, + 10088, 10089, 10090, 10092, 10094, 10096, 10110, 10124, + 10138, 10152, 10153, 10154, 10156, 10158, 10160, 10174, + 10188, 10189, 10190, 10192, 10194, 10196, 10245, 10289, + 10291, 10296, 10300, 10300, 10302, 10304, 10315, 10325, + 10333, 10334, 10336, 10337, 10341, 10345, 10355, 10359, + 10366, 10377, 10384, 10388, 10394, 10405, 10437, 10486, + 10501, 10516, 10521, 10523, 10528, 10560, 10568, 10570, + 10592, 10614, +} + +var _hcltok_trans_keys []byte = []byte{ + 10, 46, 42, 42, 47, 46, 69, 101, + 48, 57, 43, 45, 48, 57, 48, 57, + 45, 95, 194, 195, 198, 199, 203, 205, + 206, 207, 210, 212, 213, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 233, 234, 237, 239, 240, + 65, 90, 97, 122, 196, 202, 208, 218, + 229, 236, 95, 194, 195, 198, 199, 203, + 205, 206, 207, 210, 212, 213, 214, 215, + 216, 217, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 233, 234, 237, 239, + 240, 65, 90, 97, 122, 196, 202, 208, + 218, 229, 236, 10, 13, 45, 95, 194, + 195, 198, 199, 203, 204, 205, 206, 207, + 210, 212, 213, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 233, 234, 237, 239, 240, 243, 48, + 57, 65, 90, 97, 122, 196, 218, 229, + 236, 10, 170, 181, 183, 186, 128, 150, + 152, 182, 184, 255, 192, 255, 0, 127, + 173, 130, 133, 146, 159, 165, 171, 175, + 255, 181, 190, 184, 185, 192, 255, 140, + 134, 138, 142, 161, 163, 255, 182, 130, + 136, 137, 176, 151, 152, 154, 160, 190, + 136, 144, 192, 255, 135, 129, 130, 132, + 133, 144, 170, 176, 178, 144, 154, 160, + 191, 128, 169, 174, 255, 148, 169, 157, + 158, 189, 190, 192, 255, 144, 255, 139, + 140, 178, 255, 186, 128, 181, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 128, 173, + 128, 155, 160, 180, 182, 189, 148, 161, + 163, 255, 176, 164, 165, 132, 169, 177, + 141, 142, 145, 146, 179, 181, 186, 187, + 158, 133, 134, 137, 138, 143, 150, 152, + 155, 164, 165, 178, 255, 188, 129, 131, + 133, 138, 143, 144, 147, 168, 170, 176, + 178, 179, 181, 182, 184, 185, 190, 255, + 157, 131, 134, 137, 138, 142, 144, 146, + 152, 159, 165, 182, 255, 129, 131, 133, + 141, 143, 145, 147, 168, 170, 176, 178, + 179, 181, 185, 188, 255, 134, 138, 142, + 143, 145, 159, 164, 165, 176, 184, 186, + 255, 129, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 177, 128, 132, 135, 136, 139, 141, + 150, 151, 156, 157, 159, 163, 166, 175, + 156, 130, 131, 133, 138, 142, 144, 146, + 149, 153, 154, 158, 159, 163, 164, 168, + 170, 174, 185, 190, 191, 144, 151, 128, + 130, 134, 136, 138, 141, 166, 175, 128, + 131, 133, 140, 142, 144, 146, 168, 170, + 185, 189, 255, 133, 137, 151, 142, 148, + 155, 159, 164, 165, 176, 255, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 179, + 181, 185, 188, 191, 158, 128, 132, 134, + 136, 138, 141, 149, 150, 160, 163, 166, + 175, 177, 178, 129, 131, 133, 140, 142, + 144, 146, 186, 189, 255, 133, 137, 143, + 147, 152, 158, 164, 165, 176, 185, 192, + 255, 189, 130, 131, 133, 150, 154, 177, + 179, 187, 138, 150, 128, 134, 143, 148, + 152, 159, 166, 175, 178, 179, 129, 186, + 128, 142, 144, 153, 132, 138, 141, 165, + 167, 129, 130, 135, 136, 148, 151, 153, + 159, 161, 163, 170, 171, 173, 185, 187, + 189, 134, 128, 132, 136, 141, 144, 153, + 156, 159, 128, 181, 183, 185, 152, 153, + 160, 169, 190, 191, 128, 135, 137, 172, + 177, 191, 128, 132, 134, 151, 153, 188, + 134, 128, 129, 130, 131, 137, 138, 139, + 140, 141, 142, 143, 144, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 173, + 175, 176, 177, 178, 179, 181, 182, 183, + 188, 189, 190, 191, 132, 152, 172, 184, + 185, 187, 128, 191, 128, 137, 144, 255, + 158, 159, 134, 187, 136, 140, 142, 143, + 137, 151, 153, 142, 143, 158, 159, 137, + 177, 142, 143, 182, 183, 191, 255, 128, + 130, 133, 136, 150, 152, 255, 145, 150, + 151, 155, 156, 160, 168, 178, 255, 128, + 143, 160, 255, 182, 183, 190, 255, 129, + 255, 173, 174, 192, 255, 129, 154, 160, + 255, 171, 173, 185, 255, 128, 140, 142, + 148, 160, 180, 128, 147, 160, 172, 174, + 176, 178, 179, 148, 150, 152, 155, 158, + 159, 170, 255, 139, 141, 144, 153, 160, + 255, 184, 255, 128, 170, 176, 255, 182, + 255, 128, 158, 160, 171, 176, 187, 134, + 173, 176, 180, 128, 171, 176, 255, 138, + 143, 155, 255, 128, 155, 160, 255, 159, + 189, 190, 192, 255, 167, 128, 137, 144, + 153, 176, 189, 140, 143, 154, 170, 180, + 255, 180, 255, 128, 183, 128, 137, 141, + 189, 128, 136, 144, 146, 148, 182, 184, + 185, 128, 181, 187, 191, 150, 151, 158, + 159, 152, 154, 156, 158, 134, 135, 142, + 143, 190, 255, 190, 128, 180, 182, 188, + 130, 132, 134, 140, 144, 147, 150, 155, + 160, 172, 178, 180, 182, 188, 128, 129, + 130, 131, 132, 133, 134, 176, 177, 178, + 179, 180, 181, 182, 183, 191, 255, 129, + 147, 149, 176, 178, 190, 192, 255, 144, + 156, 161, 144, 156, 165, 176, 130, 135, + 149, 164, 166, 168, 138, 147, 152, 157, + 170, 185, 188, 191, 142, 133, 137, 160, + 255, 137, 255, 128, 174, 176, 255, 159, + 165, 170, 180, 255, 167, 173, 128, 165, + 176, 255, 168, 174, 176, 190, 192, 255, + 128, 150, 160, 166, 168, 174, 176, 182, + 184, 190, 128, 134, 136, 142, 144, 150, + 152, 158, 160, 191, 128, 129, 130, 131, + 132, 133, 134, 135, 144, 145, 255, 133, + 135, 161, 175, 177, 181, 184, 188, 160, + 151, 152, 187, 192, 255, 133, 173, 177, + 255, 143, 159, 187, 255, 176, 191, 182, + 183, 184, 191, 192, 255, 150, 255, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 255, 141, 255, 144, + 189, 141, 143, 172, 255, 191, 128, 175, + 180, 189, 151, 159, 162, 255, 175, 137, + 138, 184, 255, 183, 255, 168, 255, 128, + 179, 188, 134, 143, 154, 159, 184, 186, + 190, 255, 128, 173, 176, 255, 148, 159, + 189, 255, 129, 142, 154, 159, 191, 255, + 128, 182, 128, 141, 144, 153, 160, 182, + 186, 255, 128, 130, 155, 157, 160, 175, + 178, 182, 129, 134, 137, 142, 145, 150, + 160, 166, 168, 174, 176, 255, 155, 166, + 175, 128, 170, 172, 173, 176, 185, 158, + 159, 160, 255, 164, 175, 135, 138, 188, + 255, 164, 169, 171, 172, 173, 174, 175, + 180, 181, 182, 183, 184, 185, 187, 188, + 189, 190, 191, 165, 186, 174, 175, 154, + 255, 190, 128, 134, 147, 151, 157, 168, + 170, 182, 184, 188, 128, 129, 131, 132, + 134, 255, 147, 255, 190, 255, 144, 145, + 136, 175, 188, 255, 128, 143, 160, 175, + 179, 180, 141, 143, 176, 180, 182, 255, + 189, 255, 191, 144, 153, 161, 186, 129, + 154, 166, 255, 191, 255, 130, 135, 138, + 143, 146, 151, 154, 156, 144, 145, 146, + 147, 148, 150, 151, 152, 155, 157, 158, + 160, 170, 171, 172, 175, 161, 169, 128, + 129, 130, 131, 133, 135, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 152, 156, 157, 160, 161, 162, 163, + 164, 166, 168, 169, 170, 171, 172, 173, + 174, 176, 177, 153, 155, 178, 179, 128, + 139, 141, 166, 168, 186, 188, 189, 191, + 255, 142, 143, 158, 255, 187, 255, 128, + 180, 189, 128, 156, 160, 255, 145, 159, + 161, 255, 128, 159, 176, 255, 139, 143, + 187, 255, 128, 157, 160, 255, 144, 132, + 135, 150, 255, 158, 159, 170, 175, 148, + 151, 188, 255, 128, 167, 176, 255, 164, + 255, 183, 255, 128, 149, 160, 167, 136, + 188, 128, 133, 138, 181, 183, 184, 191, + 255, 150, 159, 183, 255, 128, 158, 160, + 178, 180, 181, 128, 149, 160, 185, 128, + 183, 190, 191, 191, 128, 131, 133, 134, + 140, 147, 149, 151, 153, 179, 184, 186, + 160, 188, 128, 156, 128, 135, 137, 166, + 128, 181, 128, 149, 160, 178, 128, 145, + 128, 178, 129, 130, 131, 132, 133, 135, + 136, 138, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 155, 156, + 162, 163, 171, 176, 177, 178, 128, 134, + 135, 165, 176, 190, 144, 168, 176, 185, + 128, 180, 182, 191, 182, 144, 179, 155, + 133, 137, 141, 143, 157, 255, 190, 128, + 145, 147, 183, 136, 128, 134, 138, 141, + 143, 157, 159, 168, 176, 255, 171, 175, + 186, 255, 128, 131, 133, 140, 143, 144, + 147, 168, 170, 176, 178, 179, 181, 185, + 188, 191, 144, 151, 128, 132, 135, 136, + 139, 141, 157, 163, 166, 172, 176, 180, + 128, 138, 144, 153, 134, 136, 143, 154, + 255, 128, 181, 184, 255, 129, 151, 158, + 255, 129, 131, 133, 143, 154, 255, 128, + 137, 128, 153, 157, 171, 176, 185, 160, + 255, 170, 190, 192, 255, 128, 184, 128, + 136, 138, 182, 184, 191, 128, 144, 153, + 178, 255, 168, 144, 145, 183, 255, 128, + 142, 145, 149, 129, 141, 144, 146, 147, + 148, 175, 255, 132, 255, 128, 144, 129, + 143, 144, 153, 145, 152, 135, 255, 160, + 168, 169, 171, 172, 173, 174, 188, 189, + 190, 191, 161, 167, 185, 255, 128, 158, + 160, 169, 144, 173, 176, 180, 128, 131, + 144, 153, 163, 183, 189, 255, 144, 255, + 133, 143, 191, 255, 143, 159, 160, 128, + 129, 255, 159, 160, 171, 172, 255, 173, + 255, 179, 255, 128, 176, 177, 178, 128, + 129, 171, 175, 189, 255, 128, 136, 144, + 153, 157, 158, 133, 134, 137, 144, 145, + 146, 147, 148, 149, 154, 155, 156, 157, + 158, 159, 168, 169, 170, 150, 153, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 149, 157, 173, + 186, 188, 160, 161, 163, 164, 167, 168, + 132, 134, 149, 157, 186, 139, 140, 191, + 255, 134, 128, 132, 138, 144, 146, 255, + 166, 167, 129, 155, 187, 149, 181, 143, + 175, 137, 169, 131, 140, 141, 192, 255, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 160, 163, + 164, 165, 184, 185, 186, 161, 162, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 133, 143, 151, 255, 139, 143, 154, + 255, 164, 167, 185, 187, 128, 131, 133, + 159, 161, 162, 169, 178, 180, 183, 130, + 135, 137, 139, 148, 151, 153, 155, 157, + 159, 164, 190, 141, 143, 145, 146, 161, + 162, 167, 170, 172, 178, 180, 183, 185, + 188, 128, 137, 139, 155, 161, 163, 165, + 169, 171, 187, 155, 156, 151, 255, 156, + 157, 160, 181, 255, 186, 187, 255, 162, + 255, 160, 168, 161, 167, 158, 255, 160, + 132, 135, 133, 134, 176, 255, 170, 181, + 186, 191, 176, 180, 182, 183, 186, 189, + 134, 140, 136, 138, 142, 161, 163, 255, + 130, 137, 136, 255, 144, 170, 176, 178, + 160, 191, 128, 138, 174, 175, 177, 255, + 148, 150, 164, 167, 173, 176, 185, 189, + 190, 192, 255, 144, 146, 175, 141, 255, + 166, 176, 178, 255, 186, 138, 170, 180, + 181, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 184, + 186, 187, 188, 189, 190, 183, 185, 154, + 164, 168, 128, 149, 128, 152, 189, 132, + 185, 144, 152, 161, 177, 255, 169, 177, + 129, 132, 141, 142, 145, 146, 179, 181, + 186, 188, 190, 255, 142, 156, 157, 159, + 161, 176, 177, 133, 138, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 182, 184, + 185, 158, 153, 156, 178, 180, 189, 133, + 141, 143, 145, 147, 168, 170, 176, 178, + 179, 181, 185, 144, 185, 160, 161, 189, + 133, 140, 143, 144, 147, 168, 170, 176, + 178, 179, 181, 185, 177, 156, 157, 159, + 161, 131, 156, 133, 138, 142, 144, 146, + 149, 153, 154, 158, 159, 163, 164, 168, + 170, 174, 185, 144, 189, 133, 140, 142, + 144, 146, 168, 170, 185, 152, 154, 160, + 161, 128, 189, 133, 140, 142, 144, 146, + 168, 170, 179, 181, 185, 158, 160, 161, + 177, 178, 189, 133, 140, 142, 144, 146, + 186, 142, 148, 150, 159, 161, 186, 191, + 189, 133, 150, 154, 177, 179, 187, 128, + 134, 129, 176, 178, 179, 132, 138, 141, + 165, 167, 189, 129, 130, 135, 136, 148, + 151, 153, 159, 161, 163, 170, 171, 173, + 176, 178, 179, 134, 128, 132, 156, 159, + 128, 128, 135, 137, 172, 136, 140, 128, + 129, 130, 131, 137, 138, 139, 140, 141, + 142, 143, 144, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 184, 188, 189, 190, 191, 132, 152, 185, + 187, 191, 128, 170, 161, 144, 149, 154, + 157, 165, 166, 174, 176, 181, 255, 130, + 141, 143, 159, 155, 255, 128, 140, 142, + 145, 160, 177, 128, 145, 160, 172, 174, + 176, 151, 156, 170, 128, 168, 176, 255, + 138, 255, 128, 150, 160, 255, 149, 255, + 167, 133, 179, 133, 139, 131, 160, 174, + 175, 186, 255, 166, 255, 128, 163, 141, + 143, 154, 189, 169, 172, 174, 177, 181, + 182, 129, 130, 132, 133, 134, 176, 177, + 178, 179, 180, 181, 182, 183, 177, 191, + 165, 170, 175, 177, 180, 255, 168, 174, + 176, 255, 128, 134, 136, 142, 144, 150, + 152, 158, 128, 129, 130, 131, 132, 133, + 134, 135, 144, 145, 255, 133, 135, 161, + 169, 177, 181, 184, 188, 160, 151, 154, + 128, 146, 147, 148, 152, 153, 154, 155, + 156, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 129, 255, 141, 143, + 160, 169, 172, 255, 191, 128, 174, 130, + 134, 139, 163, 255, 130, 179, 187, 189, + 178, 183, 138, 165, 176, 255, 135, 159, + 189, 255, 132, 178, 143, 160, 164, 166, + 175, 186, 190, 128, 168, 186, 128, 130, + 132, 139, 160, 182, 190, 255, 176, 178, + 180, 183, 184, 190, 255, 128, 130, 155, + 157, 160, 170, 178, 180, 128, 162, 164, + 169, 171, 172, 173, 174, 175, 180, 181, + 182, 183, 185, 186, 187, 188, 189, 190, + 191, 165, 179, 157, 190, 128, 134, 147, + 151, 159, 168, 170, 182, 184, 188, 176, + 180, 182, 255, 161, 186, 144, 145, 146, + 147, 148, 150, 151, 152, 155, 157, 158, + 160, 170, 171, 172, 175, 161, 169, 128, + 129, 130, 131, 133, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 153, 155, 178, 179, 145, 255, + 139, 143, 182, 255, 158, 175, 128, 144, + 147, 149, 151, 153, 179, 128, 135, 137, + 164, 128, 130, 131, 132, 133, 134, 135, + 136, 138, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 156, 162, + 163, 171, 176, 177, 178, 131, 183, 131, + 175, 144, 168, 131, 166, 182, 144, 178, + 131, 178, 154, 156, 129, 132, 128, 145, + 147, 171, 159, 255, 144, 157, 161, 135, + 138, 128, 175, 135, 132, 133, 128, 174, + 152, 155, 132, 128, 170, 128, 153, 160, + 190, 192, 255, 128, 136, 138, 174, 128, + 178, 255, 160, 168, 169, 171, 172, 173, + 174, 188, 189, 190, 191, 161, 167, 144, + 173, 128, 131, 163, 183, 189, 255, 133, + 143, 145, 255, 147, 159, 128, 176, 177, + 178, 128, 136, 144, 153, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 150, 153, 131, 140, 255, 160, 163, + 164, 165, 184, 185, 186, 161, 162, 133, + 255, 170, 181, 183, 186, 128, 150, 152, + 182, 184, 255, 192, 255, 128, 255, 173, + 130, 133, 146, 159, 165, 171, 175, 255, + 181, 190, 184, 185, 192, 255, 140, 134, + 138, 142, 161, 163, 255, 182, 130, 136, + 137, 176, 151, 152, 154, 160, 190, 136, + 144, 192, 255, 135, 129, 130, 132, 133, + 144, 170, 176, 178, 144, 154, 160, 191, + 128, 169, 174, 255, 148, 169, 157, 158, + 189, 190, 192, 255, 144, 255, 139, 140, + 178, 255, 186, 128, 181, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 128, 173, 128, + 155, 160, 180, 182, 189, 148, 161, 163, + 255, 176, 164, 165, 132, 169, 177, 141, + 142, 145, 146, 179, 181, 186, 187, 158, + 133, 134, 137, 138, 143, 150, 152, 155, + 164, 165, 178, 255, 188, 129, 131, 133, + 138, 143, 144, 147, 168, 170, 176, 178, + 179, 181, 182, 184, 185, 190, 255, 157, + 131, 134, 137, 138, 142, 144, 146, 152, + 159, 165, 182, 255, 129, 131, 133, 141, + 143, 145, 147, 168, 170, 176, 178, 179, + 181, 185, 188, 255, 134, 138, 142, 143, + 145, 159, 164, 165, 176, 184, 186, 255, + 129, 131, 133, 140, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 185, 188, 191, + 177, 128, 132, 135, 136, 139, 141, 150, + 151, 156, 157, 159, 163, 166, 175, 156, + 130, 131, 133, 138, 142, 144, 146, 149, + 153, 154, 158, 159, 163, 164, 168, 170, + 174, 185, 190, 191, 144, 151, 128, 130, + 134, 136, 138, 141, 166, 175, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 185, + 189, 255, 133, 137, 151, 142, 148, 155, + 159, 164, 165, 176, 255, 128, 131, 133, + 140, 142, 144, 146, 168, 170, 179, 181, + 185, 188, 191, 158, 128, 132, 134, 136, + 138, 141, 149, 150, 160, 163, 166, 175, + 177, 178, 129, 131, 133, 140, 142, 144, + 146, 186, 189, 255, 133, 137, 143, 147, + 152, 158, 164, 165, 176, 185, 192, 255, + 189, 130, 131, 133, 150, 154, 177, 179, + 187, 138, 150, 128, 134, 143, 148, 152, + 159, 166, 175, 178, 179, 129, 186, 128, + 142, 144, 153, 132, 138, 141, 165, 167, + 129, 130, 135, 136, 148, 151, 153, 159, + 161, 163, 170, 171, 173, 185, 187, 189, + 134, 128, 132, 136, 141, 144, 153, 156, + 159, 128, 181, 183, 185, 152, 153, 160, + 169, 190, 191, 128, 135, 137, 172, 177, + 191, 128, 132, 134, 151, 153, 188, 134, + 128, 129, 130, 131, 137, 138, 139, 140, + 141, 142, 143, 144, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 173, 175, + 176, 177, 178, 179, 181, 182, 183, 188, + 189, 190, 191, 132, 152, 172, 184, 185, + 187, 128, 191, 128, 137, 144, 255, 158, + 159, 134, 187, 136, 140, 142, 143, 137, + 151, 153, 142, 143, 158, 159, 137, 177, + 142, 143, 182, 183, 191, 255, 128, 130, + 133, 136, 150, 152, 255, 145, 150, 151, + 155, 156, 160, 168, 178, 255, 128, 143, + 160, 255, 182, 183, 190, 255, 129, 255, + 173, 174, 192, 255, 129, 154, 160, 255, + 171, 173, 185, 255, 128, 140, 142, 148, + 160, 180, 128, 147, 160, 172, 174, 176, + 178, 179, 148, 150, 152, 155, 158, 159, + 170, 255, 139, 141, 144, 153, 160, 255, + 184, 255, 128, 170, 176, 255, 182, 255, + 128, 158, 160, 171, 176, 187, 134, 173, + 176, 180, 128, 171, 176, 255, 138, 143, + 155, 255, 128, 155, 160, 255, 159, 189, + 190, 192, 255, 167, 128, 137, 144, 153, + 176, 189, 140, 143, 154, 170, 180, 255, + 180, 255, 128, 183, 128, 137, 141, 189, + 128, 136, 144, 146, 148, 182, 184, 185, + 128, 181, 187, 191, 150, 151, 158, 159, + 152, 154, 156, 158, 134, 135, 142, 143, + 190, 255, 190, 128, 180, 182, 188, 130, + 132, 134, 140, 144, 147, 150, 155, 160, + 172, 178, 180, 182, 188, 128, 129, 130, + 131, 132, 133, 134, 176, 177, 178, 179, + 180, 181, 182, 183, 191, 255, 129, 147, + 149, 176, 178, 190, 192, 255, 144, 156, + 161, 144, 156, 165, 176, 130, 135, 149, + 164, 166, 168, 138, 147, 152, 157, 170, + 185, 188, 191, 142, 133, 137, 160, 255, + 137, 255, 128, 174, 176, 255, 159, 165, + 170, 180, 255, 167, 173, 128, 165, 176, + 255, 168, 174, 176, 190, 192, 255, 128, + 150, 160, 166, 168, 174, 176, 182, 184, + 190, 128, 134, 136, 142, 144, 150, 152, + 158, 160, 191, 128, 129, 130, 131, 132, + 133, 134, 135, 144, 145, 255, 133, 135, + 161, 175, 177, 181, 184, 188, 160, 151, + 152, 187, 192, 255, 133, 173, 177, 255, + 143, 159, 187, 255, 176, 191, 182, 183, + 184, 191, 192, 255, 150, 255, 128, 146, + 147, 148, 152, 153, 154, 155, 156, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 129, 255, 141, 255, 144, 189, + 141, 143, 172, 255, 191, 128, 175, 180, + 189, 151, 159, 162, 255, 175, 137, 138, + 184, 255, 183, 255, 168, 255, 128, 179, + 188, 134, 143, 154, 159, 184, 186, 190, + 255, 128, 173, 176, 255, 148, 159, 189, + 255, 129, 142, 154, 159, 191, 255, 128, + 182, 128, 141, 144, 153, 160, 182, 186, + 255, 128, 130, 155, 157, 160, 175, 178, + 182, 129, 134, 137, 142, 145, 150, 160, + 166, 168, 174, 176, 255, 155, 166, 175, + 128, 170, 172, 173, 176, 185, 158, 159, + 160, 255, 164, 175, 135, 138, 188, 255, + 164, 169, 171, 172, 173, 174, 175, 180, + 181, 182, 183, 184, 185, 187, 188, 189, + 190, 191, 165, 186, 174, 175, 154, 255, + 190, 128, 134, 147, 151, 157, 168, 170, + 182, 184, 188, 128, 129, 131, 132, 134, + 255, 147, 255, 190, 255, 144, 145, 136, + 175, 188, 255, 128, 143, 160, 175, 179, + 180, 141, 143, 176, 180, 182, 255, 189, + 255, 191, 144, 153, 161, 186, 129, 154, + 166, 255, 191, 255, 130, 135, 138, 143, + 146, 151, 154, 156, 144, 145, 146, 147, + 148, 150, 151, 152, 155, 157, 158, 160, + 170, 171, 172, 175, 161, 169, 128, 129, + 130, 131, 133, 135, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 153, 155, 178, 179, 128, 139, + 141, 166, 168, 186, 188, 189, 191, 255, + 142, 143, 158, 255, 187, 255, 128, 180, + 189, 128, 156, 160, 255, 145, 159, 161, + 255, 128, 159, 176, 255, 139, 143, 187, + 255, 128, 157, 160, 255, 144, 132, 135, + 150, 255, 158, 159, 170, 175, 148, 151, + 188, 255, 128, 167, 176, 255, 164, 255, + 183, 255, 128, 149, 160, 167, 136, 188, + 128, 133, 138, 181, 183, 184, 191, 255, + 150, 159, 183, 255, 128, 158, 160, 178, + 180, 181, 128, 149, 160, 185, 128, 183, + 190, 191, 191, 128, 131, 133, 134, 140, + 147, 149, 151, 153, 179, 184, 186, 160, + 188, 128, 156, 128, 135, 137, 166, 128, + 181, 128, 149, 160, 178, 128, 145, 128, + 178, 129, 130, 131, 132, 133, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 155, 156, 162, + 163, 171, 176, 177, 178, 128, 134, 135, + 165, 176, 190, 144, 168, 176, 185, 128, + 180, 182, 191, 182, 144, 179, 155, 133, + 137, 141, 143, 157, 255, 190, 128, 145, + 147, 183, 136, 128, 134, 138, 141, 143, + 157, 159, 168, 176, 255, 171, 175, 186, + 255, 128, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 144, 151, 128, 132, 135, 136, 139, + 141, 157, 163, 166, 172, 176, 180, 128, + 138, 144, 153, 134, 136, 143, 154, 255, + 128, 181, 184, 255, 129, 151, 158, 255, + 129, 131, 133, 143, 154, 255, 128, 137, + 128, 153, 157, 171, 176, 185, 160, 255, + 170, 190, 192, 255, 128, 184, 128, 136, + 138, 182, 184, 191, 128, 144, 153, 178, + 255, 168, 144, 145, 183, 255, 128, 142, + 145, 149, 129, 141, 144, 146, 147, 148, + 175, 255, 132, 255, 128, 144, 129, 143, + 144, 153, 145, 152, 135, 255, 160, 168, + 169, 171, 172, 173, 174, 188, 189, 190, + 191, 161, 167, 185, 255, 128, 158, 160, + 169, 144, 173, 176, 180, 128, 131, 144, + 153, 163, 183, 189, 255, 144, 255, 133, + 143, 191, 255, 143, 159, 160, 128, 129, + 255, 159, 160, 171, 172, 255, 173, 255, + 179, 255, 128, 176, 177, 178, 128, 129, + 171, 175, 189, 255, 128, 136, 144, 153, + 157, 158, 133, 134, 137, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 168, 169, 170, 150, 153, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 149, 157, 173, 186, + 188, 160, 161, 163, 164, 167, 168, 132, + 134, 149, 157, 186, 139, 140, 191, 255, + 134, 128, 132, 138, 144, 146, 255, 166, + 167, 129, 155, 187, 149, 181, 143, 175, + 137, 169, 131, 140, 141, 192, 255, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 160, 163, 164, + 165, 184, 185, 186, 161, 162, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 133, 143, 151, 255, 139, 143, 154, 255, + 164, 167, 185, 187, 128, 131, 133, 159, + 161, 162, 169, 178, 180, 183, 130, 135, + 137, 139, 148, 151, 153, 155, 157, 159, + 164, 190, 141, 143, 145, 146, 161, 162, + 167, 170, 172, 178, 180, 183, 185, 188, + 128, 137, 139, 155, 161, 163, 165, 169, + 171, 187, 155, 156, 151, 255, 156, 157, + 160, 181, 255, 186, 187, 255, 162, 255, + 160, 168, 161, 167, 158, 255, 160, 132, + 135, 133, 134, 176, 255, 128, 191, 154, + 164, 168, 128, 149, 150, 191, 128, 152, + 153, 191, 181, 128, 159, 160, 189, 190, + 191, 189, 128, 131, 132, 185, 186, 191, + 144, 128, 151, 152, 161, 162, 176, 177, + 255, 169, 177, 129, 132, 141, 142, 145, + 146, 179, 181, 186, 188, 190, 191, 192, + 255, 142, 158, 128, 155, 156, 161, 162, + 175, 176, 177, 178, 191, 169, 177, 180, + 183, 128, 132, 133, 138, 139, 142, 143, + 144, 145, 146, 147, 185, 186, 191, 157, + 128, 152, 153, 158, 159, 177, 178, 180, + 181, 191, 142, 146, 169, 177, 180, 189, + 128, 132, 133, 185, 186, 191, 144, 185, + 128, 159, 160, 161, 162, 191, 169, 177, + 180, 189, 128, 132, 133, 140, 141, 142, + 143, 144, 145, 146, 147, 185, 186, 191, + 158, 177, 128, 155, 156, 161, 162, 191, + 131, 145, 155, 157, 128, 132, 133, 138, + 139, 141, 142, 149, 150, 152, 153, 159, + 160, 162, 163, 164, 165, 167, 168, 170, + 171, 173, 174, 185, 186, 191, 144, 128, + 191, 141, 145, 169, 189, 128, 132, 133, + 185, 186, 191, 128, 151, 152, 154, 155, + 159, 160, 161, 162, 191, 128, 141, 145, + 169, 180, 189, 129, 132, 133, 185, 186, + 191, 158, 128, 159, 160, 161, 162, 176, + 177, 178, 179, 191, 141, 145, 189, 128, + 132, 133, 186, 187, 191, 142, 128, 147, + 148, 150, 151, 158, 159, 161, 162, 185, + 186, 191, 178, 188, 128, 132, 133, 150, + 151, 153, 154, 189, 190, 191, 128, 134, + 135, 191, 128, 177, 129, 179, 180, 191, + 128, 131, 137, 141, 152, 160, 164, 166, + 172, 177, 189, 129, 132, 133, 134, 135, + 138, 139, 147, 148, 167, 168, 169, 170, + 179, 180, 191, 133, 128, 134, 135, 155, + 156, 159, 160, 191, 128, 129, 191, 136, + 128, 172, 173, 191, 128, 135, 136, 140, + 141, 191, 191, 128, 170, 171, 190, 161, + 128, 143, 144, 149, 150, 153, 154, 157, + 158, 164, 165, 166, 167, 173, 174, 176, + 177, 180, 181, 255, 130, 141, 143, 159, + 134, 187, 136, 140, 142, 143, 137, 151, + 153, 142, 143, 158, 159, 137, 177, 191, + 142, 143, 182, 183, 192, 255, 129, 151, + 128, 133, 134, 135, 136, 255, 145, 150, + 151, 155, 191, 192, 255, 128, 143, 144, + 159, 160, 255, 182, 183, 190, 191, 192, + 255, 128, 129, 255, 173, 174, 192, 255, + 128, 129, 154, 155, 159, 160, 255, 171, + 173, 185, 191, 192, 255, 141, 128, 145, + 146, 159, 160, 177, 178, 191, 173, 128, + 145, 146, 159, 160, 176, 177, 191, 128, + 179, 180, 191, 151, 156, 128, 191, 128, + 159, 160, 255, 184, 191, 192, 255, 169, + 128, 170, 171, 175, 176, 255, 182, 191, + 192, 255, 128, 158, 159, 191, 128, 143, + 144, 173, 174, 175, 176, 180, 181, 191, + 128, 171, 172, 175, 176, 255, 138, 191, + 192, 255, 128, 150, 151, 159, 160, 255, + 149, 191, 192, 255, 167, 128, 191, 128, + 132, 133, 179, 180, 191, 128, 132, 133, + 139, 140, 191, 128, 130, 131, 160, 161, + 173, 174, 175, 176, 185, 186, 255, 166, + 191, 192, 255, 128, 163, 164, 191, 128, + 140, 141, 143, 144, 153, 154, 189, 190, + 191, 128, 136, 137, 191, 173, 128, 168, + 169, 177, 178, 180, 181, 182, 183, 191, + 0, 127, 192, 255, 150, 151, 158, 159, + 152, 154, 156, 158, 134, 135, 142, 143, + 190, 191, 192, 255, 181, 189, 191, 128, + 190, 133, 181, 128, 129, 130, 140, 141, + 143, 144, 147, 148, 149, 150, 155, 156, + 159, 160, 172, 173, 177, 178, 188, 189, + 191, 177, 191, 128, 190, 128, 143, 144, + 156, 157, 191, 130, 135, 148, 164, 166, + 168, 128, 137, 138, 149, 150, 151, 152, + 157, 158, 169, 170, 185, 186, 187, 188, + 191, 142, 128, 132, 133, 137, 138, 159, + 160, 255, 137, 191, 192, 255, 175, 128, + 255, 159, 165, 170, 175, 177, 180, 191, + 192, 255, 166, 173, 128, 167, 168, 175, + 176, 255, 168, 174, 176, 191, 192, 255, + 167, 175, 183, 191, 128, 150, 151, 159, + 160, 190, 135, 143, 151, 128, 158, 159, + 191, 128, 132, 133, 135, 136, 160, 161, + 169, 170, 176, 177, 181, 182, 183, 184, + 188, 189, 191, 160, 151, 154, 187, 192, + 255, 128, 132, 133, 173, 174, 176, 177, + 255, 143, 159, 187, 191, 192, 255, 128, + 175, 176, 191, 150, 191, 192, 255, 141, + 191, 192, 255, 128, 143, 144, 189, 190, + 191, 141, 143, 160, 169, 172, 191, 192, + 255, 191, 128, 174, 175, 190, 128, 157, + 158, 159, 160, 255, 176, 191, 192, 255, + 128, 150, 151, 159, 160, 161, 162, 255, + 175, 137, 138, 184, 191, 192, 255, 128, + 182, 183, 255, 130, 134, 139, 163, 191, + 192, 255, 128, 129, 130, 179, 180, 191, + 187, 189, 128, 177, 178, 183, 184, 191, + 128, 137, 138, 165, 166, 175, 176, 255, + 135, 159, 189, 191, 192, 255, 128, 131, + 132, 178, 179, 191, 143, 165, 191, 128, + 159, 160, 175, 176, 185, 186, 190, 128, + 168, 169, 191, 131, 186, 128, 139, 140, + 159, 160, 182, 183, 189, 190, 255, 176, + 178, 180, 183, 184, 190, 191, 192, 255, + 129, 128, 130, 131, 154, 155, 157, 158, + 159, 160, 170, 171, 177, 178, 180, 181, + 191, 128, 167, 175, 129, 134, 135, 136, + 137, 142, 143, 144, 145, 150, 151, 159, + 160, 255, 155, 166, 175, 128, 162, 163, + 191, 164, 175, 135, 138, 188, 191, 192, + 255, 174, 175, 154, 191, 192, 255, 157, + 169, 183, 189, 191, 128, 134, 135, 146, + 147, 151, 152, 158, 159, 190, 130, 133, + 128, 255, 178, 191, 192, 255, 128, 146, + 147, 255, 190, 191, 192, 255, 128, 143, + 144, 255, 144, 145, 136, 175, 188, 191, + 192, 255, 181, 128, 175, 176, 255, 189, + 191, 192, 255, 128, 160, 161, 186, 187, + 191, 128, 129, 154, 155, 165, 166, 255, + 191, 192, 255, 128, 129, 130, 135, 136, + 137, 138, 143, 144, 145, 146, 151, 152, + 153, 154, 156, 157, 191, 128, 191, 128, + 129, 130, 131, 133, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 132, 151, 153, 155, 158, 175, + 178, 179, 180, 191, 140, 167, 187, 190, + 128, 255, 142, 143, 158, 191, 192, 255, + 187, 191, 192, 255, 128, 180, 181, 191, + 128, 156, 157, 159, 160, 255, 145, 191, + 192, 255, 128, 159, 160, 175, 176, 255, + 139, 143, 182, 191, 192, 255, 144, 132, + 135, 150, 191, 192, 255, 158, 175, 148, + 151, 188, 191, 192, 255, 128, 167, 168, + 175, 176, 255, 164, 191, 192, 255, 183, + 191, 192, 255, 128, 149, 150, 159, 160, + 167, 168, 191, 136, 182, 188, 128, 133, + 134, 137, 138, 184, 185, 190, 191, 255, + 150, 159, 183, 191, 192, 255, 179, 128, + 159, 160, 181, 182, 191, 128, 149, 150, + 159, 160, 185, 186, 191, 128, 183, 184, + 189, 190, 191, 128, 148, 152, 129, 143, + 144, 179, 180, 191, 128, 159, 160, 188, + 189, 191, 128, 156, 157, 191, 136, 128, + 164, 165, 191, 128, 181, 182, 191, 128, + 149, 150, 159, 160, 178, 179, 191, 128, + 145, 146, 191, 128, 178, 179, 191, 128, + 130, 131, 132, 133, 134, 135, 136, 138, + 139, 140, 141, 144, 145, 146, 147, 150, + 151, 152, 153, 154, 156, 162, 163, 171, + 176, 177, 178, 129, 191, 128, 130, 131, + 183, 184, 191, 128, 130, 131, 175, 176, + 191, 128, 143, 144, 168, 169, 191, 128, + 130, 131, 166, 167, 191, 182, 128, 143, + 144, 178, 179, 191, 128, 130, 131, 178, + 179, 191, 128, 154, 156, 129, 132, 133, + 191, 146, 128, 171, 172, 191, 135, 137, + 142, 158, 128, 168, 169, 175, 176, 255, + 159, 191, 192, 255, 144, 128, 156, 157, + 161, 162, 191, 128, 134, 135, 138, 139, + 191, 128, 175, 176, 191, 134, 128, 131, + 132, 135, 136, 191, 128, 174, 175, 191, + 128, 151, 152, 155, 156, 191, 132, 128, + 191, 128, 170, 171, 191, 128, 153, 154, + 191, 160, 190, 192, 255, 128, 184, 185, + 191, 137, 128, 174, 175, 191, 128, 129, + 177, 178, 255, 144, 191, 192, 255, 128, + 142, 143, 144, 145, 146, 149, 129, 148, + 150, 191, 175, 191, 192, 255, 132, 191, + 192, 255, 128, 144, 129, 143, 145, 191, + 144, 153, 128, 143, 145, 152, 154, 191, + 135, 191, 192, 255, 160, 168, 169, 171, + 172, 173, 174, 188, 189, 190, 191, 128, + 159, 161, 167, 170, 187, 185, 191, 192, + 255, 128, 143, 144, 173, 174, 191, 128, + 131, 132, 162, 163, 183, 184, 188, 189, + 255, 133, 143, 145, 191, 192, 255, 128, + 146, 147, 159, 160, 191, 160, 128, 191, + 128, 129, 191, 192, 255, 159, 160, 171, + 128, 170, 172, 191, 192, 255, 173, 191, + 192, 255, 179, 191, 192, 255, 128, 176, + 177, 178, 129, 191, 128, 129, 130, 191, + 171, 175, 189, 191, 192, 255, 128, 136, + 137, 143, 144, 153, 154, 191, 144, 145, + 146, 147, 148, 149, 154, 155, 156, 157, + 158, 159, 128, 143, 150, 153, 160, 191, + 149, 157, 173, 186, 188, 160, 161, 163, + 164, 167, 168, 132, 134, 149, 157, 186, + 191, 139, 140, 192, 255, 133, 145, 128, + 134, 135, 137, 138, 255, 166, 167, 129, + 155, 187, 149, 181, 143, 175, 137, 169, + 131, 140, 191, 192, 255, 160, 163, 164, + 165, 184, 185, 186, 128, 159, 161, 162, + 166, 191, 133, 191, 192, 255, 132, 160, + 163, 167, 179, 184, 186, 128, 164, 165, + 168, 169, 187, 188, 191, 130, 135, 137, + 139, 144, 147, 151, 153, 155, 157, 159, + 163, 171, 179, 184, 189, 191, 128, 140, + 141, 148, 149, 160, 161, 164, 165, 166, + 167, 190, 138, 164, 170, 128, 155, 156, + 160, 161, 187, 188, 191, 128, 191, 155, + 156, 128, 191, 151, 191, 192, 255, 156, + 157, 160, 128, 191, 181, 191, 192, 255, + 158, 159, 186, 128, 185, 187, 191, 192, + 255, 162, 191, 192, 255, 160, 168, 128, + 159, 161, 167, 169, 191, 158, 191, 192, + 255, 123, 123, 128, 191, 128, 191, 128, + 191, 128, 191, 128, 191, 10, 123, 123, + 128, 191, 128, 191, 128, 191, 123, 123, + 10, 123, 128, 191, 128, 191, 128, 191, + 123, 123, 170, 181, 183, 186, 128, 150, + 152, 182, 184, 255, 192, 255, 128, 255, + 173, 130, 133, 146, 159, 165, 171, 175, + 255, 181, 190, 184, 185, 192, 255, 140, + 134, 138, 142, 161, 163, 255, 182, 130, + 136, 137, 176, 151, 152, 154, 160, 190, + 136, 144, 192, 255, 135, 129, 130, 132, + 133, 144, 170, 176, 178, 144, 154, 160, + 191, 128, 169, 174, 255, 148, 169, 157, + 158, 189, 190, 192, 255, 144, 255, 139, + 140, 178, 255, 186, 128, 181, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 128, 173, + 128, 155, 160, 180, 182, 189, 148, 161, + 163, 255, 176, 164, 165, 132, 169, 177, + 141, 142, 145, 146, 179, 181, 186, 187, + 158, 133, 134, 137, 138, 143, 150, 152, + 155, 164, 165, 178, 255, 188, 129, 131, + 133, 138, 143, 144, 147, 168, 170, 176, + 178, 179, 181, 182, 184, 185, 190, 255, + 157, 131, 134, 137, 138, 142, 144, 146, + 152, 159, 165, 182, 255, 129, 131, 133, + 141, 143, 145, 147, 168, 170, 176, 178, + 179, 181, 185, 188, 255, 134, 138, 142, + 143, 145, 159, 164, 165, 176, 184, 186, + 255, 129, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 177, 128, 132, 135, 136, 139, 141, + 150, 151, 156, 157, 159, 163, 166, 175, + 156, 130, 131, 133, 138, 142, 144, 146, + 149, 153, 154, 158, 159, 163, 164, 168, + 170, 174, 185, 190, 191, 144, 151, 128, + 130, 134, 136, 138, 141, 166, 175, 128, + 131, 133, 140, 142, 144, 146, 168, 170, + 185, 189, 255, 133, 137, 151, 142, 148, + 155, 159, 164, 165, 176, 255, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 179, + 181, 185, 188, 191, 158, 128, 132, 134, + 136, 138, 141, 149, 150, 160, 163, 166, + 175, 177, 178, 129, 131, 133, 140, 142, + 144, 146, 186, 189, 255, 133, 137, 143, + 147, 152, 158, 164, 165, 176, 185, 192, + 255, 189, 130, 131, 133, 150, 154, 177, + 179, 187, 138, 150, 128, 134, 143, 148, + 152, 159, 166, 175, 178, 179, 129, 186, + 128, 142, 144, 153, 132, 138, 141, 165, + 167, 129, 130, 135, 136, 148, 151, 153, + 159, 161, 163, 170, 171, 173, 185, 187, + 189, 134, 128, 132, 136, 141, 144, 153, + 156, 159, 128, 181, 183, 185, 152, 153, + 160, 169, 190, 191, 128, 135, 137, 172, + 177, 191, 128, 132, 134, 151, 153, 188, + 134, 128, 129, 130, 131, 137, 138, 139, + 140, 141, 142, 143, 144, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 173, + 175, 176, 177, 178, 179, 181, 182, 183, + 188, 189, 190, 191, 132, 152, 172, 184, + 185, 187, 128, 191, 128, 137, 144, 255, + 158, 159, 134, 187, 136, 140, 142, 143, + 137, 151, 153, 142, 143, 158, 159, 137, + 177, 142, 143, 182, 183, 191, 255, 128, + 130, 133, 136, 150, 152, 255, 145, 150, + 151, 155, 156, 160, 168, 178, 255, 128, + 143, 160, 255, 182, 183, 190, 255, 129, + 255, 173, 174, 192, 255, 129, 154, 160, + 255, 171, 173, 185, 255, 128, 140, 142, + 148, 160, 180, 128, 147, 160, 172, 174, + 176, 178, 179, 148, 150, 152, 155, 158, + 159, 170, 255, 139, 141, 144, 153, 160, + 255, 184, 255, 128, 170, 176, 255, 182, + 255, 128, 158, 160, 171, 176, 187, 134, + 173, 176, 180, 128, 171, 176, 255, 138, + 143, 155, 255, 128, 155, 160, 255, 159, + 189, 190, 192, 255, 167, 128, 137, 144, + 153, 176, 189, 140, 143, 154, 170, 180, + 255, 180, 255, 128, 183, 128, 137, 141, + 189, 128, 136, 144, 146, 148, 182, 184, + 185, 128, 181, 187, 191, 150, 151, 158, + 159, 152, 154, 156, 158, 134, 135, 142, + 143, 190, 255, 190, 128, 180, 182, 188, + 130, 132, 134, 140, 144, 147, 150, 155, + 160, 172, 178, 180, 182, 188, 128, 129, + 130, 131, 132, 133, 134, 176, 177, 178, + 179, 180, 181, 182, 183, 191, 255, 129, + 147, 149, 176, 178, 190, 192, 255, 144, + 156, 161, 144, 156, 165, 176, 130, 135, + 149, 164, 166, 168, 138, 147, 152, 157, + 170, 185, 188, 191, 142, 133, 137, 160, + 255, 137, 255, 128, 174, 176, 255, 159, + 165, 170, 180, 255, 167, 173, 128, 165, + 176, 255, 168, 174, 176, 190, 192, 255, + 128, 150, 160, 166, 168, 174, 176, 182, + 184, 190, 128, 134, 136, 142, 144, 150, + 152, 158, 160, 191, 128, 129, 130, 131, + 132, 133, 134, 135, 144, 145, 255, 133, + 135, 161, 175, 177, 181, 184, 188, 160, + 151, 152, 187, 192, 255, 133, 173, 177, + 255, 143, 159, 187, 255, 176, 191, 182, + 183, 184, 191, 192, 255, 150, 255, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 255, 141, 255, 144, + 189, 141, 143, 172, 255, 191, 128, 175, + 180, 189, 151, 159, 162, 255, 175, 137, + 138, 184, 255, 183, 255, 168, 255, 128, + 179, 188, 134, 143, 154, 159, 184, 186, + 190, 255, 128, 173, 176, 255, 148, 159, + 189, 255, 129, 142, 154, 159, 191, 255, + 128, 182, 128, 141, 144, 153, 160, 182, + 186, 255, 128, 130, 155, 157, 160, 175, + 178, 182, 129, 134, 137, 142, 145, 150, + 160, 166, 168, 174, 176, 255, 155, 166, + 175, 128, 170, 172, 173, 176, 185, 158, + 159, 160, 255, 164, 175, 135, 138, 188, + 255, 164, 169, 171, 172, 173, 174, 175, + 180, 181, 182, 183, 184, 185, 187, 188, + 189, 190, 191, 165, 186, 174, 175, 154, + 255, 190, 128, 134, 147, 151, 157, 168, + 170, 182, 184, 188, 128, 129, 131, 132, + 134, 255, 147, 255, 190, 255, 144, 145, + 136, 175, 188, 255, 128, 143, 160, 175, + 179, 180, 141, 143, 176, 180, 182, 255, + 189, 255, 191, 144, 153, 161, 186, 129, + 154, 166, 255, 191, 255, 130, 135, 138, + 143, 146, 151, 154, 156, 144, 145, 146, + 147, 148, 150, 151, 152, 155, 157, 158, + 160, 170, 171, 172, 175, 161, 169, 128, + 129, 130, 131, 133, 135, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 152, 156, 157, 160, 161, 162, 163, + 164, 166, 168, 169, 170, 171, 172, 173, + 174, 176, 177, 153, 155, 178, 179, 128, + 139, 141, 166, 168, 186, 188, 189, 191, + 255, 142, 143, 158, 255, 187, 255, 128, + 180, 189, 128, 156, 160, 255, 145, 159, + 161, 255, 128, 159, 176, 255, 139, 143, + 187, 255, 128, 157, 160, 255, 144, 132, + 135, 150, 255, 158, 159, 170, 175, 148, + 151, 188, 255, 128, 167, 176, 255, 164, + 255, 183, 255, 128, 149, 160, 167, 136, + 188, 128, 133, 138, 181, 183, 184, 191, + 255, 150, 159, 183, 255, 128, 158, 160, + 178, 180, 181, 128, 149, 160, 185, 128, + 183, 190, 191, 191, 128, 131, 133, 134, + 140, 147, 149, 151, 153, 179, 184, 186, + 160, 188, 128, 156, 128, 135, 137, 166, + 128, 181, 128, 149, 160, 178, 128, 145, + 128, 178, 129, 130, 131, 132, 133, 135, + 136, 138, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 155, 156, + 162, 163, 171, 176, 177, 178, 128, 134, + 135, 165, 176, 190, 144, 168, 176, 185, + 128, 180, 182, 191, 182, 144, 179, 155, + 133, 137, 141, 143, 157, 255, 190, 128, + 145, 147, 183, 136, 128, 134, 138, 141, + 143, 157, 159, 168, 176, 255, 171, 175, + 186, 255, 128, 131, 133, 140, 143, 144, + 147, 168, 170, 176, 178, 179, 181, 185, + 188, 191, 144, 151, 128, 132, 135, 136, + 139, 141, 157, 163, 166, 172, 176, 180, + 128, 138, 144, 153, 134, 136, 143, 154, + 255, 128, 181, 184, 255, 129, 151, 158, + 255, 129, 131, 133, 143, 154, 255, 128, + 137, 128, 153, 157, 171, 176, 185, 160, + 255, 170, 190, 192, 255, 128, 184, 128, + 136, 138, 182, 184, 191, 128, 144, 153, + 178, 255, 168, 144, 145, 183, 255, 128, + 142, 145, 149, 129, 141, 144, 146, 147, + 148, 175, 255, 132, 255, 128, 144, 129, + 143, 144, 153, 145, 152, 135, 255, 160, + 168, 169, 171, 172, 173, 174, 188, 189, + 190, 191, 161, 167, 185, 255, 128, 158, + 160, 169, 144, 173, 176, 180, 128, 131, + 144, 153, 163, 183, 189, 255, 144, 255, + 133, 143, 191, 255, 143, 159, 160, 128, + 129, 255, 159, 160, 171, 172, 255, 173, + 255, 179, 255, 128, 176, 177, 178, 128, + 129, 171, 175, 189, 255, 128, 136, 144, + 153, 157, 158, 133, 134, 137, 144, 145, + 146, 147, 148, 149, 154, 155, 156, 157, + 158, 159, 168, 169, 170, 150, 153, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 149, 157, 173, + 186, 188, 160, 161, 163, 164, 167, 168, + 132, 134, 149, 157, 186, 139, 140, 191, + 255, 134, 128, 132, 138, 144, 146, 255, + 166, 167, 129, 155, 187, 149, 181, 143, + 175, 137, 169, 131, 140, 141, 192, 255, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 160, 163, + 164, 165, 184, 185, 186, 161, 162, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 133, 143, 151, 255, 139, 143, 154, + 255, 164, 167, 185, 187, 128, 131, 133, + 159, 161, 162, 169, 178, 180, 183, 130, + 135, 137, 139, 148, 151, 153, 155, 157, + 159, 164, 190, 141, 143, 145, 146, 161, + 162, 167, 170, 172, 178, 180, 183, 185, + 188, 128, 137, 139, 155, 161, 163, 165, + 169, 171, 187, 155, 156, 151, 255, 156, + 157, 160, 181, 255, 186, 187, 255, 162, + 255, 160, 168, 161, 167, 158, 255, 160, + 132, 135, 133, 134, 176, 255, 128, 191, + 154, 164, 168, 128, 149, 150, 191, 128, + 152, 153, 191, 181, 128, 159, 160, 189, + 190, 191, 189, 128, 131, 132, 185, 186, + 191, 144, 128, 151, 152, 161, 162, 176, + 177, 255, 169, 177, 129, 132, 141, 142, + 145, 146, 179, 181, 186, 188, 190, 191, + 192, 255, 142, 158, 128, 155, 156, 161, + 162, 175, 176, 177, 178, 191, 169, 177, + 180, 183, 128, 132, 133, 138, 139, 142, + 143, 144, 145, 146, 147, 185, 186, 191, + 157, 128, 152, 153, 158, 159, 177, 178, + 180, 181, 191, 142, 146, 169, 177, 180, + 189, 128, 132, 133, 185, 186, 191, 144, + 185, 128, 159, 160, 161, 162, 191, 169, + 177, 180, 189, 128, 132, 133, 140, 141, + 142, 143, 144, 145, 146, 147, 185, 186, + 191, 158, 177, 128, 155, 156, 161, 162, + 191, 131, 145, 155, 157, 128, 132, 133, + 138, 139, 141, 142, 149, 150, 152, 153, + 159, 160, 162, 163, 164, 165, 167, 168, + 170, 171, 173, 174, 185, 186, 191, 144, + 128, 191, 141, 145, 169, 189, 128, 132, + 133, 185, 186, 191, 128, 151, 152, 154, + 155, 159, 160, 161, 162, 191, 128, 141, + 145, 169, 180, 189, 129, 132, 133, 185, + 186, 191, 158, 128, 159, 160, 161, 162, + 176, 177, 178, 179, 191, 141, 145, 189, + 128, 132, 133, 186, 187, 191, 142, 128, + 147, 148, 150, 151, 158, 159, 161, 162, + 185, 186, 191, 178, 188, 128, 132, 133, + 150, 151, 153, 154, 189, 190, 191, 128, + 134, 135, 191, 128, 177, 129, 179, 180, + 191, 128, 131, 137, 141, 152, 160, 164, + 166, 172, 177, 189, 129, 132, 133, 134, + 135, 138, 139, 147, 148, 167, 168, 169, + 170, 179, 180, 191, 133, 128, 134, 135, + 155, 156, 159, 160, 191, 128, 129, 191, + 136, 128, 172, 173, 191, 128, 135, 136, + 140, 141, 191, 191, 128, 170, 171, 190, + 161, 128, 143, 144, 149, 150, 153, 154, + 157, 158, 164, 165, 166, 167, 173, 174, + 176, 177, 180, 181, 255, 130, 141, 143, + 159, 134, 187, 136, 140, 142, 143, 137, + 151, 153, 142, 143, 158, 159, 137, 177, + 191, 142, 143, 182, 183, 192, 255, 129, + 151, 128, 133, 134, 135, 136, 255, 145, + 150, 151, 155, 191, 192, 255, 128, 143, + 144, 159, 160, 255, 182, 183, 190, 191, + 192, 255, 128, 129, 255, 173, 174, 192, + 255, 128, 129, 154, 155, 159, 160, 255, + 171, 173, 185, 191, 192, 255, 141, 128, + 145, 146, 159, 160, 177, 178, 191, 173, + 128, 145, 146, 159, 160, 176, 177, 191, + 128, 179, 180, 191, 151, 156, 128, 191, + 128, 159, 160, 255, 184, 191, 192, 255, + 169, 128, 170, 171, 175, 176, 255, 182, + 191, 192, 255, 128, 158, 159, 191, 128, + 143, 144, 173, 174, 175, 176, 180, 181, + 191, 128, 171, 172, 175, 176, 255, 138, + 191, 192, 255, 128, 150, 151, 159, 160, + 255, 149, 191, 192, 255, 167, 128, 191, + 128, 132, 133, 179, 180, 191, 128, 132, + 133, 139, 140, 191, 128, 130, 131, 160, + 161, 173, 174, 175, 176, 185, 186, 255, + 166, 191, 192, 255, 128, 163, 164, 191, + 128, 140, 141, 143, 144, 153, 154, 189, + 190, 191, 128, 136, 137, 191, 173, 128, + 168, 169, 177, 178, 180, 181, 182, 183, + 191, 0, 127, 192, 255, 150, 151, 158, + 159, 152, 154, 156, 158, 134, 135, 142, + 143, 190, 191, 192, 255, 181, 189, 191, + 128, 190, 133, 181, 128, 129, 130, 140, + 141, 143, 144, 147, 148, 149, 150, 155, + 156, 159, 160, 172, 173, 177, 178, 188, + 189, 191, 177, 191, 128, 190, 128, 143, + 144, 156, 157, 191, 130, 135, 148, 164, + 166, 168, 128, 137, 138, 149, 150, 151, + 152, 157, 158, 169, 170, 185, 186, 187, + 188, 191, 142, 128, 132, 133, 137, 138, + 159, 160, 255, 137, 191, 192, 255, 175, + 128, 255, 159, 165, 170, 175, 177, 180, + 191, 192, 255, 166, 173, 128, 167, 168, + 175, 176, 255, 168, 174, 176, 191, 192, + 255, 167, 175, 183, 191, 128, 150, 151, + 159, 160, 190, 135, 143, 151, 128, 158, + 159, 191, 128, 132, 133, 135, 136, 160, + 161, 169, 170, 176, 177, 181, 182, 183, + 184, 188, 189, 191, 160, 151, 154, 187, + 192, 255, 128, 132, 133, 173, 174, 176, + 177, 255, 143, 159, 187, 191, 192, 255, + 128, 175, 176, 191, 150, 191, 192, 255, + 141, 191, 192, 255, 128, 143, 144, 189, + 190, 191, 141, 143, 160, 169, 172, 191, + 192, 255, 191, 128, 174, 175, 190, 128, + 157, 158, 159, 160, 255, 176, 191, 192, + 255, 128, 150, 151, 159, 160, 161, 162, + 255, 175, 137, 138, 184, 191, 192, 255, + 128, 182, 183, 255, 130, 134, 139, 163, + 191, 192, 255, 128, 129, 130, 179, 180, + 191, 187, 189, 128, 177, 178, 183, 184, + 191, 128, 137, 138, 165, 166, 175, 176, + 255, 135, 159, 189, 191, 192, 255, 128, + 131, 132, 178, 179, 191, 143, 165, 191, + 128, 159, 160, 175, 176, 185, 186, 190, + 128, 168, 169, 191, 131, 186, 128, 139, + 140, 159, 160, 182, 183, 189, 190, 255, + 176, 178, 180, 183, 184, 190, 191, 192, + 255, 129, 128, 130, 131, 154, 155, 157, + 158, 159, 160, 170, 171, 177, 178, 180, + 181, 191, 128, 167, 175, 129, 134, 135, + 136, 137, 142, 143, 144, 145, 150, 151, + 159, 160, 255, 155, 166, 175, 128, 162, + 163, 191, 164, 175, 135, 138, 188, 191, + 192, 255, 174, 175, 154, 191, 192, 255, + 157, 169, 183, 189, 191, 128, 134, 135, + 146, 147, 151, 152, 158, 159, 190, 130, + 133, 128, 255, 178, 191, 192, 255, 128, + 146, 147, 255, 190, 191, 192, 255, 128, + 143, 144, 255, 144, 145, 136, 175, 188, + 191, 192, 255, 181, 128, 175, 176, 255, + 189, 191, 192, 255, 128, 160, 161, 186, + 187, 191, 128, 129, 154, 155, 165, 166, + 255, 191, 192, 255, 128, 129, 130, 135, + 136, 137, 138, 143, 144, 145, 146, 151, + 152, 153, 154, 156, 157, 191, 128, 191, + 128, 129, 130, 131, 133, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 152, 156, 157, 160, 161, 162, 163, + 164, 166, 168, 169, 170, 171, 172, 173, + 174, 176, 177, 132, 151, 153, 155, 158, + 175, 178, 179, 180, 191, 140, 167, 187, + 190, 128, 255, 142, 143, 158, 191, 192, + 255, 187, 191, 192, 255, 128, 180, 181, + 191, 128, 156, 157, 159, 160, 255, 145, + 191, 192, 255, 128, 159, 160, 175, 176, + 255, 139, 143, 182, 191, 192, 255, 144, + 132, 135, 150, 191, 192, 255, 158, 175, + 148, 151, 188, 191, 192, 255, 128, 167, + 168, 175, 176, 255, 164, 191, 192, 255, + 183, 191, 192, 255, 128, 149, 150, 159, + 160, 167, 168, 191, 136, 182, 188, 128, + 133, 134, 137, 138, 184, 185, 190, 191, + 255, 150, 159, 183, 191, 192, 255, 179, + 128, 159, 160, 181, 182, 191, 128, 149, + 150, 159, 160, 185, 186, 191, 128, 183, + 184, 189, 190, 191, 128, 148, 152, 129, + 143, 144, 179, 180, 191, 128, 159, 160, + 188, 189, 191, 128, 156, 157, 191, 136, + 128, 164, 165, 191, 128, 181, 182, 191, + 128, 149, 150, 159, 160, 178, 179, 191, + 128, 145, 146, 191, 128, 178, 179, 191, + 128, 130, 131, 132, 133, 134, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 162, 163, + 171, 176, 177, 178, 129, 191, 128, 130, + 131, 183, 184, 191, 128, 130, 131, 175, + 176, 191, 128, 143, 144, 168, 169, 191, + 128, 130, 131, 166, 167, 191, 182, 128, + 143, 144, 178, 179, 191, 128, 130, 131, + 178, 179, 191, 128, 154, 156, 129, 132, + 133, 191, 146, 128, 171, 172, 191, 135, + 137, 142, 158, 128, 168, 169, 175, 176, + 255, 159, 191, 192, 255, 144, 128, 156, + 157, 161, 162, 191, 128, 134, 135, 138, + 139, 191, 128, 175, 176, 191, 134, 128, + 131, 132, 135, 136, 191, 128, 174, 175, + 191, 128, 151, 152, 155, 156, 191, 132, + 128, 191, 128, 170, 171, 191, 128, 153, + 154, 191, 160, 190, 192, 255, 128, 184, + 185, 191, 137, 128, 174, 175, 191, 128, + 129, 177, 178, 255, 144, 191, 192, 255, + 128, 142, 143, 144, 145, 146, 149, 129, + 148, 150, 191, 175, 191, 192, 255, 132, + 191, 192, 255, 128, 144, 129, 143, 145, + 191, 144, 153, 128, 143, 145, 152, 154, + 191, 135, 191, 192, 255, 160, 168, 169, + 171, 172, 173, 174, 188, 189, 190, 191, + 128, 159, 161, 167, 170, 187, 185, 191, + 192, 255, 128, 143, 144, 173, 174, 191, + 128, 131, 132, 162, 163, 183, 184, 188, + 189, 255, 133, 143, 145, 191, 192, 255, + 128, 146, 147, 159, 160, 191, 160, 128, + 191, 128, 129, 191, 192, 255, 159, 160, + 171, 128, 170, 172, 191, 192, 255, 173, + 191, 192, 255, 179, 191, 192, 255, 128, + 176, 177, 178, 129, 191, 128, 129, 130, + 191, 171, 175, 189, 191, 192, 255, 128, + 136, 137, 143, 144, 153, 154, 191, 144, + 145, 146, 147, 148, 149, 154, 155, 156, + 157, 158, 159, 128, 143, 150, 153, 160, + 191, 149, 157, 173, 186, 188, 160, 161, + 163, 164, 167, 168, 132, 134, 149, 157, + 186, 191, 139, 140, 192, 255, 133, 145, + 128, 134, 135, 137, 138, 255, 166, 167, + 129, 155, 187, 149, 181, 143, 175, 137, + 169, 131, 140, 191, 192, 255, 160, 163, + 164, 165, 184, 185, 186, 128, 159, 161, + 162, 166, 191, 133, 191, 192, 255, 132, + 160, 163, 167, 179, 184, 186, 128, 164, + 165, 168, 169, 187, 188, 191, 130, 135, + 137, 139, 144, 147, 151, 153, 155, 157, + 159, 163, 171, 179, 184, 189, 191, 128, + 140, 141, 148, 149, 160, 161, 164, 165, + 166, 167, 190, 138, 164, 170, 128, 155, + 156, 160, 161, 187, 188, 191, 128, 191, + 155, 156, 128, 191, 151, 191, 192, 255, + 156, 157, 160, 128, 191, 181, 191, 192, + 255, 158, 159, 186, 128, 185, 187, 191, + 192, 255, 162, 191, 192, 255, 160, 168, + 128, 159, 161, 167, 169, 191, 158, 191, + 192, 255, 9, 10, 13, 32, 33, 34, + 35, 37, 38, 46, 47, 60, 61, 62, + 64, 92, 95, 123, 124, 125, 126, 127, + 194, 195, 198, 199, 203, 204, 205, 206, + 207, 210, 212, 213, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 233, 234, 237, 238, 239, 240, + 0, 39, 40, 45, 48, 57, 58, 63, + 65, 90, 91, 96, 97, 122, 192, 193, + 196, 218, 229, 236, 241, 247, 9, 32, + 10, 61, 10, 38, 46, 42, 47, 42, + 46, 69, 101, 48, 57, 60, 61, 61, + 62, 61, 45, 95, 194, 195, 198, 199, + 203, 204, 205, 206, 207, 210, 212, 213, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 233, 234, + 237, 239, 240, 243, 48, 57, 65, 90, + 97, 122, 196, 218, 229, 236, 124, 125, + 128, 191, 170, 181, 186, 128, 191, 151, + 183, 128, 255, 192, 255, 0, 127, 173, + 130, 133, 146, 159, 165, 171, 175, 191, + 192, 255, 181, 190, 128, 175, 176, 183, + 184, 185, 186, 191, 134, 139, 141, 162, + 128, 135, 136, 255, 182, 130, 137, 176, + 151, 152, 154, 160, 136, 191, 192, 255, + 128, 143, 144, 170, 171, 175, 176, 178, + 179, 191, 128, 159, 160, 191, 176, 128, + 138, 139, 173, 174, 255, 148, 150, 164, + 167, 173, 176, 185, 189, 190, 192, 255, + 144, 128, 145, 146, 175, 176, 191, 128, + 140, 141, 255, 166, 176, 178, 191, 192, + 255, 186, 128, 137, 138, 170, 171, 179, + 180, 181, 182, 191, 160, 161, 162, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 128, 191, 128, 129, 130, 131, + 137, 138, 139, 140, 141, 142, 143, 144, + 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 182, 183, 184, 188, + 189, 190, 191, 132, 187, 129, 130, 132, + 133, 134, 176, 177, 178, 179, 180, 181, + 182, 183, 128, 191, 128, 129, 130, 131, + 132, 133, 134, 135, 144, 136, 143, 145, + 191, 192, 255, 182, 183, 184, 128, 191, + 128, 191, 191, 128, 190, 192, 255, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 191, 192, 255, 158, + 159, 128, 157, 160, 191, 192, 255, 128, + 191, 164, 169, 171, 172, 173, 174, 175, + 180, 181, 182, 183, 184, 185, 187, 188, + 189, 190, 191, 128, 163, 165, 186, 144, + 145, 146, 147, 148, 150, 151, 152, 155, + 157, 158, 160, 170, 171, 172, 175, 128, + 159, 161, 169, 173, 191, 128, 191, 10, + 13, 34, 36, 37, 92, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 34, 36, 37, 92, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 34, 36, 37, 92, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 34, 36, 37, 92, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 36, 37, + 92, 123, 192, 223, 224, 239, 240, 247, + 10, 13, 34, 36, 37, 92, 123, 128, + 191, 192, 223, 224, 239, 240, 247, 248, + 255, 10, 13, 34, 36, 37, 92, 123, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 34, 36, 37, 92, + 123, 128, 191, 192, 223, 224, 239, 240, + 247, 248, 255, 10, 13, 34, 36, 37, + 92, 128, 191, 192, 223, 224, 239, 240, + 247, 248, 255, 36, 37, 92, 123, 192, + 223, 224, 239, 240, 247, 10, 13, 34, + 36, 37, 92, 123, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 123, 126, + 123, 126, 128, 191, 128, 191, 128, 191, + 10, 13, 36, 37, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 36, 37, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 10, 13, 36, 37, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 36, 37, 128, 191, + 192, 223, 224, 239, 240, 247, 248, 255, + 126, 126, 128, 191, 128, 191, 128, 191, + 10, 13, 36, 37, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 36, 37, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 126, 126, 128, 191, + 128, 191, 128, 191, 95, 194, 195, 198, + 199, 203, 204, 205, 206, 207, 210, 212, + 213, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 233, + 234, 237, 238, 239, 240, 65, 90, 97, + 122, 128, 191, 192, 193, 196, 218, 229, + 236, 241, 247, 248, 255, 45, 95, 194, + 195, 198, 199, 203, 204, 205, 206, 207, + 210, 212, 213, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 233, 234, 237, 239, 240, 243, 48, + 57, 65, 90, 97, 122, 196, 218, 229, + 236, 128, 191, 170, 181, 186, 128, 191, + 151, 183, 128, 255, 192, 255, 0, 127, + 173, 130, 133, 146, 159, 165, 171, 175, + 191, 192, 255, 181, 190, 128, 175, 176, + 183, 184, 185, 186, 191, 134, 139, 141, + 162, 128, 135, 136, 255, 182, 130, 137, + 176, 151, 152, 154, 160, 136, 191, 192, + 255, 128, 143, 144, 170, 171, 175, 176, + 178, 179, 191, 128, 159, 160, 191, 176, + 128, 138, 139, 173, 174, 255, 148, 150, + 164, 167, 173, 176, 185, 189, 190, 192, + 255, 144, 128, 145, 146, 175, 176, 191, + 128, 140, 141, 255, 166, 176, 178, 191, + 192, 255, 186, 128, 137, 138, 170, 171, + 179, 180, 181, 182, 191, 160, 161, 162, + 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 128, 191, 128, 129, 130, + 131, 137, 138, 139, 140, 141, 142, 143, + 144, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 182, 183, 184, + 188, 189, 190, 191, 132, 187, 129, 130, + 132, 133, 134, 176, 177, 178, 179, 180, + 181, 182, 183, 128, 191, 128, 129, 130, + 131, 132, 133, 134, 135, 144, 136, 143, + 145, 191, 192, 255, 182, 183, 184, 128, + 191, 128, 191, 191, 128, 190, 192, 255, + 128, 146, 147, 148, 152, 153, 154, 155, + 156, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 129, 191, 192, 255, + 158, 159, 128, 157, 160, 191, 192, 255, + 128, 191, 164, 169, 171, 172, 173, 174, + 175, 180, 181, 182, 183, 184, 185, 187, + 188, 189, 190, 191, 128, 163, 165, 186, + 144, 145, 146, 147, 148, 150, 151, 152, + 155, 157, 158, 160, 170, 171, 172, 175, + 128, 159, 161, 169, 173, 191, 128, 191, +} + +var _hcltok_single_lengths []byte = []byte{ + 0, 1, 1, 1, 2, 3, 2, 0, + 32, 31, 36, 1, 4, 0, 0, 0, + 0, 1, 2, 1, 1, 1, 1, 0, + 1, 1, 0, 0, 2, 0, 0, 0, + 1, 32, 0, 0, 0, 0, 1, 3, + 1, 1, 1, 0, 2, 0, 1, 1, + 2, 0, 3, 0, 1, 0, 2, 1, + 2, 0, 0, 5, 1, 4, 0, 0, + 1, 43, 0, 0, 0, 2, 3, 2, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 4, + 1, 0, 15, 0, 0, 0, 1, 6, + 1, 0, 0, 1, 0, 2, 0, 0, + 0, 9, 0, 1, 1, 0, 0, 0, + 3, 0, 1, 0, 28, 0, 0, 0, + 1, 0, 1, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 2, 0, 0, 18, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 16, 36, 0, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 2, 0, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 28, 0, 0, 0, + 1, 1, 1, 1, 0, 0, 2, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 1, 4, 0, 0, 2, + 2, 0, 11, 0, 0, 0, 0, 0, + 0, 0, 1, 1, 3, 0, 0, 4, + 0, 0, 0, 18, 0, 0, 0, 1, + 4, 1, 4, 1, 0, 3, 2, 2, + 2, 1, 0, 0, 1, 8, 0, 0, + 0, 4, 12, 0, 2, 0, 3, 0, + 1, 0, 2, 0, 1, 2, 0, 3, + 1, 2, 0, 0, 0, 0, 0, 1, + 1, 0, 0, 1, 28, 3, 0, 1, + 1, 2, 1, 0, 1, 1, 2, 1, + 1, 2, 1, 1, 0, 2, 1, 1, + 1, 1, 0, 0, 6, 1, 1, 0, + 0, 46, 1, 1, 0, 0, 0, 0, + 2, 1, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 13, 2, 0, + 0, 0, 9, 0, 1, 28, 0, 1, + 3, 0, 2, 0, 0, 0, 1, 0, + 1, 1, 2, 0, 18, 2, 0, 0, + 16, 35, 0, 0, 0, 1, 0, 28, + 0, 0, 0, 0, 1, 0, 2, 0, + 0, 1, 0, 0, 1, 0, 0, 1, + 0, 0, 0, 0, 1, 11, 0, 0, + 0, 0, 4, 0, 12, 1, 7, 0, + 4, 0, 0, 0, 0, 1, 2, 1, + 1, 1, 1, 0, 1, 1, 0, 0, + 2, 0, 0, 0, 1, 32, 0, 0, + 0, 0, 1, 3, 1, 1, 1, 0, + 2, 0, 1, 1, 2, 0, 3, 0, + 1, 0, 2, 1, 2, 0, 0, 5, + 1, 4, 0, 0, 1, 43, 0, 0, + 0, 2, 3, 2, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 4, 1, 0, 15, 0, + 0, 0, 1, 6, 1, 0, 0, 1, + 0, 2, 0, 0, 0, 9, 0, 1, + 1, 0, 0, 0, 3, 0, 1, 0, + 28, 0, 0, 0, 1, 0, 1, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 2, 0, 0, 18, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 16, + 36, 0, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 2, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 28, 0, 0, 0, 1, 1, 1, 1, + 0, 0, 2, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 1, + 4, 0, 0, 2, 2, 0, 11, 0, + 0, 0, 0, 0, 0, 0, 1, 1, + 3, 0, 0, 4, 0, 0, 0, 18, + 0, 0, 0, 1, 4, 1, 4, 1, + 0, 3, 2, 2, 2, 1, 0, 0, + 1, 8, 0, 0, 0, 4, 12, 0, + 2, 0, 3, 0, 1, 0, 2, 0, + 1, 2, 0, 0, 3, 0, 1, 1, + 1, 2, 2, 4, 1, 6, 2, 4, + 2, 4, 1, 4, 0, 6, 1, 3, + 1, 2, 0, 2, 11, 1, 1, 1, + 0, 1, 1, 0, 2, 0, 3, 3, + 2, 1, 0, 0, 0, 1, 0, 1, + 0, 1, 1, 0, 2, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 4, 3, 2, 2, 0, + 6, 1, 0, 1, 1, 0, 2, 0, + 4, 3, 0, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 0, + 1, 0, 3, 0, 2, 0, 0, 0, + 3, 0, 2, 1, 1, 3, 1, 0, + 0, 0, 0, 0, 5, 2, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 1, + 1, 0, 0, 35, 4, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 3, 0, 1, 0, 0, + 3, 0, 0, 1, 0, 0, 0, 0, + 28, 0, 0, 0, 0, 1, 0, 3, + 1, 4, 0, 1, 0, 0, 1, 0, + 0, 1, 0, 0, 0, 0, 1, 1, + 0, 7, 0, 0, 2, 2, 0, 11, + 0, 0, 0, 0, 0, 1, 1, 3, + 0, 0, 4, 0, 0, 0, 12, 1, + 4, 1, 5, 2, 0, 3, 2, 2, + 2, 1, 7, 0, 7, 17, 3, 0, + 2, 0, 3, 0, 0, 1, 0, 2, + 0, 1, 1, 0, 0, 0, 0, 0, + 1, 1, 1, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 1, 1, 4, + 0, 0, 0, 0, 1, 2, 1, 1, + 1, 1, 0, 1, 1, 0, 0, 2, + 0, 0, 0, 1, 32, 0, 0, 0, + 0, 1, 3, 1, 1, 1, 0, 2, + 0, 1, 1, 2, 0, 3, 0, 1, + 0, 2, 1, 2, 0, 0, 5, 1, + 4, 0, 0, 1, 43, 0, 0, 0, + 2, 3, 2, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 4, 1, 0, 15, 0, 0, + 0, 1, 6, 1, 0, 0, 1, 0, + 2, 0, 0, 0, 9, 0, 1, 1, + 0, 0, 0, 3, 0, 1, 0, 28, + 0, 0, 0, 1, 0, 1, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 2, 0, 0, 18, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 16, 36, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 2, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 28, + 0, 0, 0, 1, 1, 1, 1, 0, + 0, 2, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 4, + 0, 0, 2, 2, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 3, + 0, 0, 4, 0, 0, 0, 18, 0, + 0, 0, 1, 4, 1, 4, 1, 0, + 3, 2, 2, 2, 1, 0, 0, 1, + 8, 0, 0, 0, 4, 12, 0, 2, + 0, 3, 0, 1, 0, 2, 0, 1, + 2, 0, 0, 3, 0, 1, 1, 1, + 2, 2, 4, 1, 6, 2, 4, 2, + 4, 1, 4, 0, 6, 1, 3, 1, + 2, 0, 2, 11, 1, 1, 1, 0, + 1, 1, 0, 2, 0, 3, 3, 2, + 1, 0, 0, 0, 1, 0, 1, 0, + 1, 1, 0, 2, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 4, 3, 2, 2, 0, 6, + 1, 0, 1, 1, 0, 2, 0, 4, + 3, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 1, + 0, 3, 0, 2, 0, 0, 0, 3, + 0, 2, 1, 1, 3, 1, 0, 0, + 0, 0, 0, 5, 2, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 1, 1, + 0, 0, 35, 4, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 3, 0, 1, 0, 0, 3, + 0, 0, 1, 0, 0, 0, 0, 28, + 0, 0, 0, 0, 1, 0, 3, 1, + 4, 0, 1, 0, 0, 1, 0, 0, + 1, 0, 0, 0, 0, 1, 1, 0, + 7, 0, 0, 2, 2, 0, 11, 0, + 0, 0, 0, 0, 1, 1, 3, 0, + 0, 4, 0, 0, 0, 12, 1, 4, + 1, 5, 2, 0, 3, 2, 2, 2, + 1, 7, 0, 7, 17, 3, 0, 2, + 0, 3, 0, 0, 1, 0, 2, 0, + 54, 2, 1, 1, 1, 1, 1, 2, + 1, 3, 2, 2, 1, 34, 1, 1, + 0, 3, 2, 0, 0, 0, 1, 2, + 4, 1, 0, 1, 0, 0, 0, 0, + 1, 1, 1, 0, 0, 1, 30, 47, + 13, 9, 3, 0, 1, 28, 2, 0, + 18, 16, 0, 6, 6, 6, 6, 5, + 4, 7, 7, 7, 6, 4, 7, 6, + 6, 6, 6, 6, 6, 6, 1, 1, + 1, 1, 0, 0, 0, 4, 4, 4, + 4, 1, 1, 0, 0, 0, 4, 2, + 1, 1, 0, 0, 0, 33, 34, 0, + 3, 2, 0, 0, 0, 1, 2, 4, + 1, 0, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 1, 30, 47, 13, + 9, 3, 0, 1, 28, 2, 0, 18, + 16, 0, +} + +var _hcltok_range_lengths []byte = []byte{ + 0, 0, 0, 0, 0, 1, 1, 1, + 5, 5, 5, 0, 0, 3, 0, 1, + 1, 4, 2, 3, 0, 1, 0, 2, + 2, 4, 2, 2, 3, 1, 1, 1, + 1, 0, 1, 1, 2, 2, 1, 4, + 6, 9, 6, 8, 5, 8, 7, 10, + 4, 6, 4, 7, 7, 5, 5, 4, + 5, 1, 2, 8, 4, 3, 3, 3, + 0, 3, 1, 2, 1, 2, 2, 3, + 3, 1, 3, 2, 2, 1, 2, 2, + 2, 3, 4, 4, 3, 1, 2, 1, + 3, 2, 2, 2, 2, 2, 3, 3, + 1, 1, 2, 1, 3, 2, 2, 3, + 2, 7, 0, 1, 4, 1, 2, 4, + 2, 1, 2, 0, 2, 2, 3, 5, + 5, 1, 4, 1, 1, 2, 2, 1, + 0, 0, 1, 1, 1, 1, 1, 2, + 2, 2, 2, 1, 1, 1, 4, 2, + 2, 3, 1, 4, 4, 6, 1, 3, + 1, 1, 2, 1, 1, 1, 5, 3, + 1, 1, 1, 2, 3, 3, 1, 2, + 2, 1, 4, 1, 2, 5, 2, 1, + 1, 0, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 1, 1, 2, 4, 2, + 1, 2, 2, 2, 6, 1, 1, 2, + 1, 2, 1, 1, 1, 2, 2, 2, + 1, 3, 2, 5, 2, 8, 6, 2, + 2, 2, 2, 3, 1, 3, 1, 2, + 1, 3, 2, 2, 3, 1, 1, 1, + 1, 1, 1, 1, 2, 2, 4, 1, + 2, 1, 0, 1, 1, 1, 1, 0, + 1, 2, 3, 1, 3, 3, 1, 0, + 3, 0, 2, 3, 1, 0, 0, 0, + 0, 2, 2, 2, 2, 1, 5, 2, + 2, 5, 7, 5, 0, 1, 0, 1, + 1, 1, 1, 1, 0, 1, 1, 0, + 3, 3, 1, 1, 2, 1, 3, 5, + 1, 1, 2, 2, 1, 1, 1, 1, + 2, 6, 3, 7, 2, 6, 1, 6, + 2, 8, 0, 4, 2, 5, 2, 3, + 3, 3, 1, 2, 8, 2, 0, 2, + 1, 2, 1, 5, 2, 1, 3, 3, + 0, 2, 1, 2, 1, 0, 1, 1, + 3, 1, 1, 2, 3, 0, 0, 3, + 2, 4, 1, 4, 1, 1, 3, 1, + 1, 1, 1, 2, 2, 1, 3, 1, + 4, 3, 3, 1, 1, 5, 2, 1, + 1, 2, 1, 2, 1, 3, 2, 0, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 1, 1, 1, 1, 1, 0, + 1, 1, 2, 2, 1, 1, 1, 3, + 2, 1, 0, 2, 1, 1, 1, 1, + 0, 3, 0, 1, 1, 4, 2, 3, + 0, 1, 0, 2, 2, 4, 2, 2, + 3, 1, 1, 1, 1, 0, 1, 1, + 2, 2, 1, 4, 6, 9, 6, 8, + 5, 8, 7, 10, 4, 6, 4, 7, + 7, 5, 5, 4, 5, 1, 2, 8, + 4, 3, 3, 3, 0, 3, 1, 2, + 1, 2, 2, 3, 3, 1, 3, 2, + 2, 1, 2, 2, 2, 3, 4, 4, + 3, 1, 2, 1, 3, 2, 2, 2, + 2, 2, 3, 3, 1, 1, 2, 1, + 3, 2, 2, 3, 2, 7, 0, 1, + 4, 1, 2, 4, 2, 1, 2, 0, + 2, 2, 3, 5, 5, 1, 4, 1, + 1, 2, 2, 1, 0, 0, 1, 1, + 1, 1, 1, 2, 2, 2, 2, 1, + 1, 1, 4, 2, 2, 3, 1, 4, + 4, 6, 1, 3, 1, 1, 2, 1, + 1, 1, 5, 3, 1, 1, 1, 2, + 3, 3, 1, 2, 2, 1, 4, 1, + 2, 5, 2, 1, 1, 0, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 1, + 1, 2, 4, 2, 1, 2, 2, 2, + 6, 1, 1, 2, 1, 2, 1, 1, + 1, 2, 2, 2, 1, 3, 2, 5, + 2, 8, 6, 2, 2, 2, 2, 3, + 1, 3, 1, 2, 1, 3, 2, 2, + 3, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 4, 1, 2, 1, 0, 1, + 1, 1, 1, 0, 1, 2, 3, 1, + 3, 3, 1, 0, 3, 0, 2, 3, + 1, 0, 0, 0, 0, 2, 2, 2, + 2, 1, 5, 2, 2, 5, 7, 5, + 0, 1, 0, 1, 1, 1, 1, 1, + 0, 1, 1, 1, 2, 2, 3, 3, + 4, 7, 5, 7, 5, 3, 3, 7, + 3, 13, 1, 3, 5, 3, 5, 3, + 6, 5, 2, 2, 8, 4, 1, 2, + 3, 2, 10, 2, 2, 0, 2, 3, + 3, 1, 2, 3, 3, 1, 2, 3, + 3, 4, 4, 2, 1, 2, 2, 3, + 2, 2, 5, 3, 2, 3, 2, 1, + 3, 3, 6, 2, 2, 5, 2, 5, + 1, 1, 2, 4, 1, 11, 1, 3, + 8, 4, 2, 1, 0, 4, 3, 3, + 3, 2, 9, 1, 1, 4, 3, 2, + 2, 2, 3, 4, 2, 3, 2, 4, + 3, 2, 2, 3, 3, 4, 3, 3, + 4, 2, 5, 4, 8, 7, 1, 2, + 1, 3, 1, 2, 5, 1, 2, 2, + 2, 2, 1, 3, 2, 2, 3, 3, + 1, 9, 1, 5, 1, 3, 2, 2, + 3, 2, 3, 3, 3, 1, 3, 3, + 2, 2, 4, 5, 3, 3, 4, 3, + 3, 3, 2, 2, 2, 4, 2, 2, + 1, 3, 3, 3, 3, 3, 3, 2, + 2, 3, 2, 3, 3, 2, 3, 2, + 3, 1, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 3, 2, 3, + 2, 3, 5, 3, 3, 1, 2, 3, + 2, 2, 1, 2, 3, 4, 3, 0, + 3, 0, 2, 3, 1, 0, 0, 0, + 0, 2, 3, 2, 4, 6, 4, 1, + 1, 2, 1, 2, 1, 3, 2, 3, + 2, 0, 0, 1, 1, 1, 1, 1, + 0, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 0, + 3, 0, 1, 1, 4, 2, 3, 0, + 1, 0, 2, 2, 4, 2, 2, 3, + 1, 1, 1, 1, 0, 1, 1, 2, + 2, 1, 4, 6, 9, 6, 8, 5, + 8, 7, 10, 4, 6, 4, 7, 7, + 5, 5, 4, 5, 1, 2, 8, 4, + 3, 3, 3, 0, 3, 1, 2, 1, + 2, 2, 3, 3, 1, 3, 2, 2, + 1, 2, 2, 2, 3, 4, 4, 3, + 1, 2, 1, 3, 2, 2, 2, 2, + 2, 3, 3, 1, 1, 2, 1, 3, + 2, 2, 3, 2, 7, 0, 1, 4, + 1, 2, 4, 2, 1, 2, 0, 2, + 2, 3, 5, 5, 1, 4, 1, 1, + 2, 2, 1, 0, 0, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 1, 1, + 1, 4, 2, 2, 3, 1, 4, 4, + 6, 1, 3, 1, 1, 2, 1, 1, + 1, 5, 3, 1, 1, 1, 2, 3, + 3, 1, 2, 2, 1, 4, 1, 2, + 5, 2, 1, 1, 0, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 1, 1, + 2, 4, 2, 1, 2, 2, 2, 6, + 1, 1, 2, 1, 2, 1, 1, 1, + 2, 2, 2, 1, 3, 2, 5, 2, + 8, 6, 2, 2, 2, 2, 3, 1, + 3, 1, 2, 1, 3, 2, 2, 3, + 1, 1, 1, 1, 1, 1, 1, 2, + 2, 4, 1, 2, 1, 0, 1, 1, + 1, 1, 0, 1, 2, 3, 1, 3, + 3, 1, 0, 3, 0, 2, 3, 1, + 0, 0, 0, 0, 2, 2, 2, 2, + 1, 5, 2, 2, 5, 7, 5, 0, + 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 2, 3, 3, 4, + 7, 5, 7, 5, 3, 3, 7, 3, + 13, 1, 3, 5, 3, 5, 3, 6, + 5, 2, 2, 8, 4, 1, 2, 3, + 2, 10, 2, 2, 0, 2, 3, 3, + 1, 2, 3, 3, 1, 2, 3, 3, + 4, 4, 2, 1, 2, 2, 3, 2, + 2, 5, 3, 2, 3, 2, 1, 3, + 3, 6, 2, 2, 5, 2, 5, 1, + 1, 2, 4, 1, 11, 1, 3, 8, + 4, 2, 1, 0, 4, 3, 3, 3, + 2, 9, 1, 1, 4, 3, 2, 2, + 2, 3, 4, 2, 3, 2, 4, 3, + 2, 2, 3, 3, 4, 3, 3, 4, + 2, 5, 4, 8, 7, 1, 2, 1, + 3, 1, 2, 5, 1, 2, 2, 2, + 2, 1, 3, 2, 2, 3, 3, 1, + 9, 1, 5, 1, 3, 2, 2, 3, + 2, 3, 3, 3, 1, 3, 3, 2, + 2, 4, 5, 3, 3, 4, 3, 3, + 3, 2, 2, 2, 4, 2, 2, 1, + 3, 3, 3, 3, 3, 3, 2, 2, + 3, 2, 3, 3, 2, 3, 2, 3, + 1, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 3, 2, 3, 2, + 3, 5, 3, 3, 1, 2, 3, 2, + 2, 1, 2, 3, 4, 3, 0, 3, + 0, 2, 3, 1, 0, 0, 0, 0, + 2, 3, 2, 4, 6, 4, 1, 1, + 2, 1, 2, 1, 3, 2, 3, 2, + 11, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 5, 0, 0, + 1, 1, 1, 0, 1, 1, 5, 4, + 2, 0, 1, 0, 2, 2, 5, 2, + 3, 5, 3, 2, 3, 5, 1, 1, + 1, 3, 1, 1, 2, 2, 3, 1, + 2, 3, 1, 5, 5, 5, 5, 5, + 3, 5, 5, 5, 5, 3, 5, 5, + 5, 5, 5, 5, 5, 5, 0, 0, + 0, 0, 1, 1, 1, 5, 5, 5, + 5, 0, 0, 1, 1, 1, 5, 6, + 0, 0, 1, 1, 1, 8, 5, 1, + 1, 1, 0, 1, 1, 5, 4, 2, + 0, 1, 0, 2, 2, 5, 2, 3, + 5, 3, 2, 3, 5, 1, 1, 1, + 3, 1, 1, 2, 2, 3, 1, 2, + 3, 1, +} + +var _hcltok_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 6, 9, 14, 18, + 20, 58, 95, 137, 139, 144, 148, 149, + 151, 153, 159, 164, 169, 171, 174, 176, + 179, 183, 189, 192, 195, 201, 203, 205, + 207, 210, 243, 245, 247, 250, 253, 256, + 264, 272, 283, 291, 300, 308, 317, 326, + 338, 345, 352, 360, 368, 377, 383, 391, + 397, 405, 407, 410, 424, 430, 438, 442, + 446, 448, 495, 497, 500, 502, 507, 513, + 519, 524, 527, 531, 534, 537, 539, 542, + 545, 548, 552, 557, 562, 566, 568, 571, + 573, 577, 580, 583, 586, 589, 593, 598, + 602, 604, 606, 609, 611, 615, 618, 621, + 629, 633, 641, 657, 659, 664, 666, 670, + 681, 685, 687, 690, 692, 695, 700, 704, + 710, 716, 727, 732, 735, 738, 741, 744, + 746, 750, 751, 754, 756, 786, 788, 790, + 793, 797, 800, 804, 806, 808, 810, 816, + 819, 822, 826, 828, 833, 838, 845, 848, + 852, 856, 858, 861, 881, 883, 885, 892, + 896, 898, 900, 902, 905, 909, 913, 915, + 919, 922, 924, 929, 947, 986, 992, 995, + 997, 999, 1001, 1004, 1007, 1010, 1013, 1016, + 1020, 1023, 1026, 1029, 1031, 1033, 1036, 1043, + 1046, 1048, 1051, 1054, 1057, 1065, 1067, 1069, + 1072, 1074, 1077, 1079, 1081, 1111, 1114, 1117, + 1120, 1123, 1128, 1132, 1139, 1142, 1151, 1160, + 1163, 1167, 1170, 1173, 1177, 1179, 1183, 1185, + 1188, 1190, 1194, 1198, 1202, 1210, 1212, 1214, + 1218, 1222, 1224, 1237, 1239, 1242, 1245, 1250, + 1252, 1255, 1257, 1259, 1262, 1267, 1269, 1271, + 1276, 1278, 1281, 1285, 1305, 1309, 1313, 1315, + 1317, 1325, 1327, 1334, 1339, 1341, 1345, 1348, + 1351, 1354, 1358, 1361, 1364, 1368, 1378, 1384, + 1387, 1390, 1400, 1420, 1426, 1429, 1431, 1435, + 1437, 1440, 1442, 1446, 1448, 1450, 1454, 1456, + 1460, 1465, 1471, 1473, 1475, 1478, 1480, 1484, + 1491, 1494, 1496, 1499, 1503, 1533, 1538, 1540, + 1543, 1547, 1556, 1561, 1569, 1573, 1581, 1585, + 1593, 1597, 1608, 1610, 1616, 1619, 1627, 1631, + 1636, 1641, 1646, 1648, 1651, 1666, 1670, 1672, + 1675, 1677, 1726, 1729, 1736, 1739, 1741, 1745, + 1749, 1752, 1756, 1758, 1761, 1763, 1765, 1767, + 1769, 1773, 1775, 1777, 1780, 1784, 1798, 1801, + 1805, 1808, 1813, 1824, 1829, 1832, 1862, 1866, + 1869, 1874, 1876, 1880, 1883, 1886, 1888, 1893, + 1895, 1901, 1906, 1912, 1914, 1934, 1942, 1945, + 1947, 1965, 2003, 2005, 2008, 2010, 2015, 2018, + 2047, 2049, 2051, 2053, 2055, 2058, 2060, 2064, + 2067, 2069, 2072, 2074, 2076, 2079, 2081, 2083, + 2085, 2087, 2089, 2092, 2095, 2098, 2111, 2113, + 2117, 2120, 2122, 2127, 2130, 2144, 2147, 2156, + 2158, 2163, 2167, 2168, 2170, 2172, 2178, 2183, + 2188, 2190, 2193, 2195, 2198, 2202, 2208, 2211, + 2214, 2220, 2222, 2224, 2226, 2229, 2262, 2264, + 2266, 2269, 2272, 2275, 2283, 2291, 2302, 2310, + 2319, 2327, 2336, 2345, 2357, 2364, 2371, 2379, + 2387, 2396, 2402, 2410, 2416, 2424, 2426, 2429, + 2443, 2449, 2457, 2461, 2465, 2467, 2514, 2516, + 2519, 2521, 2526, 2532, 2538, 2543, 2546, 2550, + 2553, 2556, 2558, 2561, 2564, 2567, 2571, 2576, + 2581, 2585, 2587, 2590, 2592, 2596, 2599, 2602, + 2605, 2608, 2612, 2617, 2621, 2623, 2625, 2628, + 2630, 2634, 2637, 2640, 2648, 2652, 2660, 2676, + 2678, 2683, 2685, 2689, 2700, 2704, 2706, 2709, + 2711, 2714, 2719, 2723, 2729, 2735, 2746, 2751, + 2754, 2757, 2760, 2763, 2765, 2769, 2770, 2773, + 2775, 2805, 2807, 2809, 2812, 2816, 2819, 2823, + 2825, 2827, 2829, 2835, 2838, 2841, 2845, 2847, + 2852, 2857, 2864, 2867, 2871, 2875, 2877, 2880, + 2900, 2902, 2904, 2911, 2915, 2917, 2919, 2921, + 2924, 2928, 2932, 2934, 2938, 2941, 2943, 2948, + 2966, 3005, 3011, 3014, 3016, 3018, 3020, 3023, + 3026, 3029, 3032, 3035, 3039, 3042, 3045, 3048, + 3050, 3052, 3055, 3062, 3065, 3067, 3070, 3073, + 3076, 3084, 3086, 3088, 3091, 3093, 3096, 3098, + 3100, 3130, 3133, 3136, 3139, 3142, 3147, 3151, + 3158, 3161, 3170, 3179, 3182, 3186, 3189, 3192, + 3196, 3198, 3202, 3204, 3207, 3209, 3213, 3217, + 3221, 3229, 3231, 3233, 3237, 3241, 3243, 3256, + 3258, 3261, 3264, 3269, 3271, 3274, 3276, 3278, + 3281, 3286, 3288, 3290, 3295, 3297, 3300, 3304, + 3324, 3328, 3332, 3334, 3336, 3344, 3346, 3353, + 3358, 3360, 3364, 3367, 3370, 3373, 3377, 3380, + 3383, 3387, 3397, 3403, 3406, 3409, 3419, 3439, + 3445, 3448, 3450, 3454, 3456, 3459, 3461, 3465, + 3467, 3469, 3473, 3475, 3477, 3483, 3486, 3491, + 3496, 3502, 3512, 3520, 3532, 3539, 3549, 3555, + 3567, 3573, 3591, 3594, 3602, 3608, 3618, 3625, + 3632, 3640, 3648, 3651, 3656, 3676, 3682, 3685, + 3689, 3693, 3697, 3709, 3712, 3717, 3718, 3724, + 3731, 3737, 3740, 3743, 3747, 3751, 3754, 3757, + 3762, 3766, 3772, 3778, 3781, 3785, 3788, 3791, + 3796, 3799, 3802, 3808, 3812, 3815, 3819, 3822, + 3825, 3829, 3833, 3840, 3843, 3846, 3852, 3855, + 3862, 3864, 3866, 3869, 3878, 3883, 3897, 3901, + 3905, 3920, 3926, 3929, 3932, 3934, 3939, 3945, + 3949, 3957, 3963, 3973, 3976, 3979, 3984, 3988, + 3991, 3994, 3997, 4001, 4006, 4010, 4014, 4017, + 4022, 4027, 4030, 4036, 4040, 4046, 4051, 4055, + 4059, 4067, 4070, 4078, 4084, 4094, 4105, 4108, + 4111, 4113, 4117, 4119, 4122, 4133, 4137, 4140, + 4143, 4146, 4149, 4151, 4155, 4159, 4162, 4166, + 4171, 4174, 4184, 4186, 4227, 4233, 4237, 4240, + 4243, 4247, 4250, 4254, 4258, 4263, 4265, 4269, + 4273, 4276, 4279, 4284, 4293, 4297, 4302, 4307, + 4311, 4318, 4322, 4325, 4329, 4332, 4337, 4340, + 4343, 4373, 4377, 4381, 4385, 4389, 4394, 4398, + 4404, 4408, 4416, 4419, 4424, 4428, 4431, 4436, + 4439, 4443, 4446, 4449, 4452, 4455, 4458, 4462, + 4466, 4469, 4479, 4482, 4485, 4490, 4496, 4499, + 4514, 4517, 4521, 4527, 4531, 4535, 4538, 4542, + 4549, 4552, 4555, 4561, 4564, 4568, 4573, 4589, + 4591, 4599, 4601, 4609, 4615, 4617, 4621, 4624, + 4627, 4630, 4634, 4645, 4648, 4660, 4684, 4692, + 4694, 4698, 4701, 4706, 4709, 4711, 4716, 4719, + 4725, 4728, 4730, 4732, 4734, 4736, 4738, 4740, + 4742, 4744, 4746, 4748, 4750, 4752, 4754, 4756, + 4758, 4760, 4762, 4764, 4766, 4768, 4770, 4772, + 4777, 4781, 4782, 4784, 4786, 4792, 4797, 4802, + 4804, 4807, 4809, 4812, 4816, 4822, 4825, 4828, + 4834, 4836, 4838, 4840, 4843, 4876, 4878, 4880, + 4883, 4886, 4889, 4897, 4905, 4916, 4924, 4933, + 4941, 4950, 4959, 4971, 4978, 4985, 4993, 5001, + 5010, 5016, 5024, 5030, 5038, 5040, 5043, 5057, + 5063, 5071, 5075, 5079, 5081, 5128, 5130, 5133, + 5135, 5140, 5146, 5152, 5157, 5160, 5164, 5167, + 5170, 5172, 5175, 5178, 5181, 5185, 5190, 5195, + 5199, 5201, 5204, 5206, 5210, 5213, 5216, 5219, + 5222, 5226, 5231, 5235, 5237, 5239, 5242, 5244, + 5248, 5251, 5254, 5262, 5266, 5274, 5290, 5292, + 5297, 5299, 5303, 5314, 5318, 5320, 5323, 5325, + 5328, 5333, 5337, 5343, 5349, 5360, 5365, 5368, + 5371, 5374, 5377, 5379, 5383, 5384, 5387, 5389, + 5419, 5421, 5423, 5426, 5430, 5433, 5437, 5439, + 5441, 5443, 5449, 5452, 5455, 5459, 5461, 5466, + 5471, 5478, 5481, 5485, 5489, 5491, 5494, 5514, + 5516, 5518, 5525, 5529, 5531, 5533, 5535, 5538, + 5542, 5546, 5548, 5552, 5555, 5557, 5562, 5580, + 5619, 5625, 5628, 5630, 5632, 5634, 5637, 5640, + 5643, 5646, 5649, 5653, 5656, 5659, 5662, 5664, + 5666, 5669, 5676, 5679, 5681, 5684, 5687, 5690, + 5698, 5700, 5702, 5705, 5707, 5710, 5712, 5714, + 5744, 5747, 5750, 5753, 5756, 5761, 5765, 5772, + 5775, 5784, 5793, 5796, 5800, 5803, 5806, 5810, + 5812, 5816, 5818, 5821, 5823, 5827, 5831, 5835, + 5843, 5845, 5847, 5851, 5855, 5857, 5870, 5872, + 5875, 5878, 5883, 5885, 5888, 5890, 5892, 5895, + 5900, 5902, 5904, 5909, 5911, 5914, 5918, 5938, + 5942, 5946, 5948, 5950, 5958, 5960, 5967, 5972, + 5974, 5978, 5981, 5984, 5987, 5991, 5994, 5997, + 6001, 6011, 6017, 6020, 6023, 6033, 6053, 6059, + 6062, 6064, 6068, 6070, 6073, 6075, 6079, 6081, + 6083, 6087, 6089, 6091, 6097, 6100, 6105, 6110, + 6116, 6126, 6134, 6146, 6153, 6163, 6169, 6181, + 6187, 6205, 6208, 6216, 6222, 6232, 6239, 6246, + 6254, 6262, 6265, 6270, 6290, 6296, 6299, 6303, + 6307, 6311, 6323, 6326, 6331, 6332, 6338, 6345, + 6351, 6354, 6357, 6361, 6365, 6368, 6371, 6376, + 6380, 6386, 6392, 6395, 6399, 6402, 6405, 6410, + 6413, 6416, 6422, 6426, 6429, 6433, 6436, 6439, + 6443, 6447, 6454, 6457, 6460, 6466, 6469, 6476, + 6478, 6480, 6483, 6492, 6497, 6511, 6515, 6519, + 6534, 6540, 6543, 6546, 6548, 6553, 6559, 6563, + 6571, 6577, 6587, 6590, 6593, 6598, 6602, 6605, + 6608, 6611, 6615, 6620, 6624, 6628, 6631, 6636, + 6641, 6644, 6650, 6654, 6660, 6665, 6669, 6673, + 6681, 6684, 6692, 6698, 6708, 6719, 6722, 6725, + 6727, 6731, 6733, 6736, 6747, 6751, 6754, 6757, + 6760, 6763, 6765, 6769, 6773, 6776, 6780, 6785, + 6788, 6798, 6800, 6841, 6847, 6851, 6854, 6857, + 6861, 6864, 6868, 6872, 6877, 6879, 6883, 6887, + 6890, 6893, 6898, 6907, 6911, 6916, 6921, 6925, + 6932, 6936, 6939, 6943, 6946, 6951, 6954, 6957, + 6987, 6991, 6995, 6999, 7003, 7008, 7012, 7018, + 7022, 7030, 7033, 7038, 7042, 7045, 7050, 7053, + 7057, 7060, 7063, 7066, 7069, 7072, 7076, 7080, + 7083, 7093, 7096, 7099, 7104, 7110, 7113, 7128, + 7131, 7135, 7141, 7145, 7149, 7152, 7156, 7163, + 7166, 7169, 7175, 7178, 7182, 7187, 7203, 7205, + 7213, 7215, 7223, 7229, 7231, 7235, 7238, 7241, + 7244, 7248, 7259, 7262, 7274, 7298, 7306, 7308, + 7312, 7315, 7320, 7323, 7325, 7330, 7333, 7339, + 7342, 7408, 7411, 7413, 7415, 7417, 7419, 7421, + 7424, 7426, 7431, 7434, 7437, 7439, 7479, 7481, + 7483, 7485, 7490, 7494, 7495, 7497, 7499, 7506, + 7513, 7520, 7522, 7524, 7526, 7529, 7532, 7538, + 7541, 7546, 7553, 7558, 7561, 7565, 7572, 7604, + 7653, 7668, 7681, 7686, 7688, 7692, 7723, 7729, + 7731, 7752, 7772, 7774, 7786, 7798, 7810, 7822, + 7833, 7841, 7854, 7867, 7880, 7892, 7900, 7913, + 7925, 7937, 7949, 7961, 7973, 7985, 7997, 7999, + 8001, 8003, 8005, 8007, 8009, 8011, 8021, 8031, + 8041, 8051, 8053, 8055, 8057, 8059, 8061, 8071, + 8080, 8082, 8084, 8086, 8088, 8090, 8132, 8172, + 8174, 8179, 8183, 8184, 8186, 8188, 8195, 8202, + 8209, 8211, 8213, 8215, 8218, 8221, 8227, 8230, + 8235, 8242, 8247, 8250, 8254, 8261, 8293, 8342, + 8357, 8370, 8375, 8377, 8381, 8412, 8418, 8420, + 8441, 8461, +} + +var _hcltok_indicies []int16 = []int16{ + 2, 1, 4, 3, 6, 5, 6, 7, + 5, 9, 11, 11, 10, 8, 12, 12, + 10, 8, 10, 8, 13, 14, 15, 16, + 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 42, + 43, 44, 45, 46, 14, 14, 17, 17, + 41, 3, 14, 15, 16, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 42, 43, 44, 45, + 46, 14, 14, 17, 17, 41, 3, 47, + 48, 14, 14, 49, 16, 18, 19, 20, + 19, 50, 51, 23, 52, 25, 26, 53, + 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 40, 42, 66, 44, + 67, 68, 69, 14, 14, 14, 17, 41, + 3, 47, 3, 14, 14, 14, 14, 3, + 14, 14, 14, 3, 14, 3, 14, 3, + 14, 3, 3, 3, 3, 3, 14, 3, + 3, 3, 3, 14, 14, 14, 14, 14, + 3, 3, 14, 3, 3, 14, 3, 14, + 3, 3, 14, 3, 3, 3, 14, 14, + 14, 14, 14, 14, 3, 14, 14, 3, + 14, 14, 3, 3, 3, 3, 3, 3, + 14, 14, 3, 3, 14, 3, 14, 14, + 14, 3, 70, 71, 72, 73, 17, 74, + 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 3, 14, 3, 14, 3, 14, + 14, 3, 14, 14, 3, 3, 3, 14, + 3, 3, 3, 3, 3, 3, 3, 14, + 3, 3, 3, 3, 3, 3, 3, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 3, 3, 3, 3, 3, 3, + 3, 3, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 3, 3, 3, 3, 3, + 3, 3, 3, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 3, 14, 14, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 3, 14, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 3, + 3, 3, 3, 3, 3, 3, 3, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 14, 14, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 3, 3, + 3, 3, 3, 3, 3, 3, 14, 14, + 14, 14, 14, 14, 3, 14, 14, 14, + 14, 14, 14, 14, 3, 14, 3, 14, + 14, 3, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 14, 3, 14, 14, 14, 3, 14, 3, + 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, + 117, 19, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 17, 18, 136, 137, + 138, 139, 140, 17, 19, 17, 3, 14, + 3, 14, 14, 3, 3, 14, 3, 3, + 3, 3, 14, 3, 3, 3, 3, 3, + 14, 3, 3, 3, 3, 3, 14, 14, + 14, 14, 14, 3, 3, 3, 14, 3, + 3, 3, 14, 14, 14, 3, 3, 3, + 14, 14, 3, 3, 3, 14, 14, 14, + 3, 3, 3, 14, 14, 14, 14, 3, + 14, 14, 14, 14, 3, 3, 3, 3, + 3, 14, 14, 14, 14, 3, 3, 14, + 14, 14, 3, 3, 14, 14, 14, 14, + 3, 14, 14, 3, 14, 14, 3, 3, + 3, 14, 14, 14, 3, 3, 3, 3, + 14, 14, 14, 14, 14, 3, 3, 3, + 3, 14, 3, 14, 14, 3, 14, 14, + 3, 14, 3, 14, 14, 14, 3, 14, + 14, 3, 3, 3, 14, 3, 3, 3, + 3, 3, 3, 3, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 14, + 3, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 17, 150, 151, 152, 153, 154, + 3, 14, 3, 3, 3, 3, 3, 14, + 14, 3, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 3, 3, 14, 14, + 14, 3, 3, 14, 3, 3, 14, 14, + 14, 14, 14, 3, 3, 3, 3, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 3, 155, 112, 156, 157, + 158, 17, 159, 160, 19, 17, 3, 14, + 14, 14, 14, 3, 3, 3, 14, 3, + 3, 14, 14, 14, 3, 3, 3, 14, + 14, 3, 122, 3, 19, 17, 17, 161, + 3, 17, 3, 14, 19, 162, 163, 19, + 164, 165, 19, 60, 166, 167, 168, 169, + 170, 19, 171, 172, 173, 19, 174, 175, + 176, 18, 177, 178, 179, 18, 180, 19, + 17, 3, 3, 14, 14, 3, 3, 3, + 14, 14, 14, 14, 3, 14, 14, 3, + 3, 3, 3, 14, 14, 3, 3, 14, + 14, 3, 3, 3, 3, 3, 3, 14, + 14, 14, 3, 3, 3, 14, 3, 3, + 3, 14, 14, 3, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 14, 3, 3, 3, 14, + 14, 14, 14, 3, 181, 182, 3, 17, + 3, 14, 3, 3, 14, 19, 183, 184, + 185, 186, 60, 187, 188, 58, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 17, + 3, 3, 14, 3, 14, 14, 14, 14, + 14, 14, 14, 3, 14, 14, 14, 3, + 14, 3, 3, 14, 3, 14, 3, 3, + 14, 14, 14, 14, 3, 14, 14, 14, + 3, 3, 14, 14, 14, 14, 3, 14, + 14, 3, 3, 14, 14, 14, 14, 14, + 3, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 204, 209, 210, 211, + 212, 41, 3, 213, 214, 19, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 19, + 17, 224, 225, 226, 227, 19, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 19, 147, 17, + 243, 3, 14, 14, 14, 14, 14, 3, + 3, 3, 14, 3, 14, 14, 3, 14, + 3, 14, 14, 3, 3, 3, 14, 14, + 14, 3, 3, 3, 14, 14, 14, 3, + 3, 3, 3, 14, 3, 3, 14, 3, + 3, 14, 14, 14, 3, 3, 14, 3, + 14, 14, 14, 3, 14, 14, 14, 14, + 14, 14, 3, 3, 3, 14, 14, 3, + 14, 14, 3, 14, 14, 3, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 14, + 3, 14, 3, 14, 3, 14, 14, 3, + 14, 3, 14, 14, 3, 14, 3, 14, + 3, 244, 215, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 101, 254, 19, 255, + 256, 257, 19, 258, 132, 259, 260, 261, + 262, 263, 264, 265, 266, 19, 3, 3, + 3, 14, 14, 14, 3, 14, 14, 3, + 14, 14, 3, 3, 3, 3, 3, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 14, 14, 3, 3, 3, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 3, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 14, 3, 3, 3, 3, 14, 14, + 14, 3, 3, 3, 14, 3, 3, 3, + 14, 14, 3, 14, 14, 14, 3, 14, + 3, 3, 3, 14, 14, 3, 14, 14, + 14, 3, 14, 14, 14, 3, 3, 3, + 3, 14, 19, 184, 267, 268, 17, 19, + 17, 3, 3, 14, 3, 14, 19, 267, + 17, 3, 19, 269, 17, 3, 3, 14, + 19, 270, 271, 272, 175, 273, 274, 19, + 275, 276, 277, 17, 3, 3, 14, 14, + 14, 3, 14, 14, 3, 14, 14, 14, + 14, 3, 3, 14, 3, 3, 14, 14, + 3, 14, 3, 19, 17, 3, 278, 19, + 279, 3, 17, 3, 14, 3, 14, 280, + 19, 281, 282, 3, 14, 3, 3, 3, + 14, 14, 14, 14, 3, 283, 284, 285, + 19, 286, 287, 288, 289, 290, 291, 292, + 293, 294, 295, 296, 297, 298, 299, 17, + 3, 14, 14, 14, 3, 3, 3, 3, + 14, 14, 3, 3, 14, 3, 3, 3, + 3, 3, 3, 3, 14, 3, 14, 3, + 3, 3, 3, 3, 3, 14, 14, 14, + 14, 14, 3, 3, 14, 3, 3, 3, + 14, 3, 3, 14, 3, 3, 14, 3, + 3, 14, 3, 3, 3, 14, 14, 14, + 3, 3, 3, 14, 14, 14, 14, 3, + 300, 19, 301, 19, 302, 303, 304, 305, + 17, 3, 14, 14, 14, 14, 14, 3, + 3, 3, 14, 3, 3, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 14, 3, 306, 19, 17, 3, 14, 307, + 19, 103, 17, 3, 14, 308, 3, 17, + 3, 14, 19, 309, 17, 3, 3, 14, + 310, 3, 19, 311, 17, 3, 3, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 3, 3, + 14, 3, 14, 14, 14, 3, 14, 3, + 14, 14, 14, 3, 3, 3, 3, 3, + 3, 3, 14, 14, 14, 3, 14, 3, + 3, 3, 14, 14, 14, 14, 3, 312, + 313, 72, 314, 315, 316, 317, 318, 319, + 320, 321, 322, 323, 324, 325, 326, 327, + 328, 329, 330, 331, 332, 334, 335, 336, + 337, 338, 339, 333, 3, 14, 14, 14, + 14, 3, 14, 3, 14, 14, 3, 14, + 14, 14, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 3, 14, 14, 14, + 14, 14, 14, 14, 3, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 3, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 3, 14, 14, 14, 14, 14, 3, + 14, 14, 3, 14, 14, 14, 14, 14, + 14, 14, 3, 14, 14, 14, 3, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 3, 14, 3, + 14, 14, 3, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 3, 14, 14, 14, 3, 14, 3, + 14, 14, 3, 14, 3, 340, 341, 342, + 104, 105, 106, 107, 108, 343, 110, 111, + 112, 113, 114, 115, 344, 345, 170, 346, + 261, 120, 347, 122, 232, 272, 125, 348, + 349, 350, 351, 352, 353, 354, 355, 356, + 357, 134, 358, 19, 17, 18, 19, 137, + 138, 139, 140, 17, 17, 3, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 3, + 3, 3, 14, 3, 14, 14, 14, 14, + 3, 14, 14, 14, 3, 14, 14, 3, + 14, 14, 14, 3, 3, 14, 14, 14, + 3, 3, 14, 14, 3, 14, 3, 14, + 3, 14, 14, 14, 3, 3, 14, 14, + 3, 14, 14, 3, 14, 14, 14, 3, + 359, 143, 145, 146, 147, 148, 149, 17, + 360, 151, 361, 153, 362, 3, 14, 14, + 3, 3, 3, 3, 14, 3, 3, 14, + 14, 14, 14, 14, 3, 363, 112, 364, + 157, 158, 17, 159, 160, 19, 17, 3, + 14, 14, 14, 14, 3, 3, 3, 14, + 19, 162, 163, 19, 365, 366, 222, 311, + 166, 167, 168, 367, 170, 368, 369, 370, + 371, 372, 373, 374, 375, 376, 377, 178, + 179, 18, 378, 19, 17, 3, 3, 3, + 3, 14, 14, 14, 3, 3, 3, 3, + 3, 14, 14, 3, 14, 14, 14, 3, + 14, 14, 3, 3, 3, 14, 14, 3, + 14, 14, 14, 14, 3, 14, 3, 14, + 14, 14, 14, 14, 3, 3, 3, 3, + 3, 14, 14, 14, 14, 14, 14, 3, + 14, 3, 19, 183, 184, 379, 186, 60, + 187, 188, 58, 189, 190, 380, 17, 193, + 381, 195, 196, 197, 17, 3, 14, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 3, 14, 3, 382, 383, 200, 201, 202, + 384, 204, 205, 385, 386, 387, 204, 209, + 210, 211, 212, 41, 3, 213, 214, 19, + 215, 216, 218, 388, 220, 389, 222, 223, + 19, 17, 390, 225, 226, 227, 19, 228, + 229, 230, 231, 232, 233, 234, 235, 391, + 237, 238, 392, 240, 241, 242, 19, 147, + 17, 243, 3, 3, 14, 3, 3, 14, + 3, 14, 14, 14, 14, 14, 3, 14, + 14, 3, 393, 394, 395, 396, 397, 398, + 399, 400, 250, 401, 322, 402, 216, 403, + 404, 405, 406, 407, 404, 408, 409, 410, + 261, 411, 263, 412, 413, 274, 3, 14, + 3, 14, 3, 14, 3, 14, 3, 14, + 14, 3, 14, 3, 14, 14, 14, 3, + 14, 14, 3, 3, 14, 14, 14, 3, + 14, 3, 14, 3, 14, 14, 3, 14, + 3, 14, 3, 14, 3, 14, 3, 14, + 3, 3, 3, 14, 14, 14, 3, 14, + 14, 3, 19, 270, 232, 414, 404, 415, + 274, 19, 416, 417, 277, 17, 3, 14, + 3, 14, 14, 14, 3, 3, 3, 14, + 14, 3, 280, 19, 281, 418, 3, 14, + 14, 3, 19, 286, 287, 288, 289, 290, + 291, 292, 293, 294, 295, 419, 17, 3, + 3, 3, 14, 19, 420, 19, 268, 303, + 304, 305, 17, 3, 3, 14, 422, 422, + 422, 422, 421, 422, 422, 422, 421, 422, + 421, 422, 422, 421, 421, 421, 421, 421, + 421, 422, 421, 421, 421, 421, 422, 422, + 422, 422, 422, 421, 421, 422, 421, 421, + 422, 421, 422, 421, 421, 422, 421, 421, + 421, 422, 422, 422, 422, 422, 422, 421, + 422, 422, 421, 422, 422, 421, 421, 421, + 421, 421, 421, 422, 422, 421, 421, 422, + 421, 422, 422, 422, 421, 423, 424, 425, + 426, 427, 428, 429, 430, 431, 432, 433, + 434, 435, 436, 437, 438, 439, 440, 441, + 442, 443, 444, 445, 446, 447, 448, 449, + 450, 451, 452, 453, 454, 421, 422, 421, + 422, 421, 422, 422, 421, 422, 422, 421, + 421, 421, 422, 421, 421, 421, 421, 421, + 421, 421, 422, 421, 421, 421, 421, 421, + 421, 421, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 421, 421, 421, + 421, 421, 421, 421, 421, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 421, 421, + 421, 421, 421, 421, 421, 421, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 421, + 422, 422, 422, 422, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 421, 422, 422, 422, + 422, 422, 422, 421, 422, 422, 422, 422, + 422, 422, 421, 421, 421, 421, 421, 421, + 421, 421, 422, 422, 422, 422, 422, 422, + 422, 422, 421, 422, 422, 422, 422, 422, + 422, 422, 422, 421, 422, 422, 422, 422, + 422, 421, 421, 421, 421, 421, 421, 421, + 421, 422, 422, 422, 422, 422, 422, 421, + 422, 422, 422, 422, 422, 422, 422, 421, + 422, 421, 422, 422, 421, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 421, 422, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 422, 422, + 421, 422, 422, 422, 421, 422, 422, 422, + 421, 422, 421, 455, 456, 457, 458, 459, + 460, 461, 462, 463, 464, 465, 466, 467, + 468, 469, 470, 471, 472, 473, 474, 475, + 476, 477, 478, 479, 480, 481, 482, 483, + 484, 485, 486, 487, 488, 489, 490, 427, + 491, 492, 493, 494, 495, 496, 427, 472, + 427, 421, 422, 421, 422, 422, 421, 421, + 422, 421, 421, 421, 421, 422, 421, 421, + 421, 421, 421, 422, 421, 421, 421, 421, + 421, 422, 422, 422, 422, 422, 421, 421, + 421, 422, 421, 421, 421, 422, 422, 422, + 421, 421, 421, 422, 422, 421, 421, 421, + 422, 422, 422, 421, 421, 421, 422, 422, + 422, 422, 421, 422, 422, 422, 422, 421, + 421, 421, 421, 421, 422, 422, 422, 422, + 421, 421, 422, 422, 422, 421, 421, 422, + 422, 422, 422, 421, 422, 422, 421, 422, + 422, 421, 421, 421, 422, 422, 422, 421, + 421, 421, 421, 422, 422, 422, 422, 422, + 421, 421, 421, 421, 422, 421, 422, 422, + 421, 422, 422, 421, 422, 421, 422, 422, + 422, 421, 422, 422, 421, 421, 421, 422, + 421, 421, 421, 421, 421, 421, 421, 422, + 422, 422, 422, 421, 422, 422, 422, 422, + 422, 422, 422, 421, 497, 498, 499, 500, + 501, 502, 503, 504, 505, 427, 506, 507, + 508, 509, 510, 421, 422, 421, 421, 421, + 421, 421, 422, 422, 421, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 421, 422, 422, 422, 421, + 421, 422, 422, 422, 421, 421, 422, 421, + 421, 422, 422, 422, 422, 422, 421, 421, + 421, 421, 422, 422, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 421, 511, + 466, 512, 513, 514, 427, 515, 516, 472, + 427, 421, 422, 422, 422, 422, 421, 421, + 421, 422, 421, 421, 422, 422, 422, 421, + 421, 421, 422, 422, 421, 477, 421, 472, + 427, 427, 517, 421, 427, 421, 422, 472, + 518, 519, 472, 520, 521, 472, 522, 523, + 524, 525, 526, 527, 472, 528, 529, 530, + 472, 531, 532, 533, 491, 534, 535, 536, + 491, 537, 472, 427, 421, 421, 422, 422, + 421, 421, 421, 422, 422, 422, 422, 421, + 422, 422, 421, 421, 421, 421, 422, 422, + 421, 421, 422, 422, 421, 421, 421, 421, + 421, 421, 422, 422, 422, 421, 421, 421, + 422, 421, 421, 421, 422, 422, 421, 422, + 422, 422, 422, 421, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 422, 421, + 421, 421, 422, 422, 422, 422, 421, 538, + 539, 421, 427, 421, 422, 421, 421, 422, + 472, 540, 541, 542, 543, 522, 544, 545, + 546, 547, 548, 549, 550, 551, 552, 553, + 554, 555, 427, 421, 421, 422, 421, 422, + 422, 422, 422, 422, 422, 422, 421, 422, + 422, 422, 421, 422, 421, 421, 422, 421, + 422, 421, 421, 422, 422, 422, 422, 421, + 422, 422, 422, 421, 421, 422, 422, 422, + 422, 421, 422, 422, 421, 421, 422, 422, + 422, 422, 422, 421, 556, 557, 558, 559, + 560, 561, 562, 563, 564, 565, 566, 562, + 568, 569, 570, 571, 567, 421, 572, 573, + 472, 574, 575, 576, 577, 578, 579, 580, + 581, 582, 472, 427, 583, 584, 585, 586, + 472, 587, 588, 589, 590, 591, 592, 593, + 594, 595, 596, 597, 598, 599, 600, 601, + 472, 503, 427, 602, 421, 422, 422, 422, + 422, 422, 421, 421, 421, 422, 421, 422, + 422, 421, 422, 421, 422, 422, 421, 421, + 421, 422, 422, 422, 421, 421, 421, 422, + 422, 422, 421, 421, 421, 421, 422, 421, + 421, 422, 421, 421, 422, 422, 422, 421, + 421, 422, 421, 422, 422, 422, 421, 422, + 422, 422, 422, 422, 422, 421, 421, 421, + 422, 422, 421, 422, 422, 421, 422, 422, + 421, 422, 422, 421, 422, 422, 422, 422, + 422, 422, 422, 421, 422, 421, 422, 421, + 422, 422, 421, 422, 421, 422, 422, 421, + 422, 421, 422, 421, 603, 574, 604, 605, + 606, 607, 608, 609, 610, 611, 612, 455, + 613, 472, 614, 615, 616, 472, 617, 487, + 618, 619, 620, 621, 622, 623, 624, 625, + 472, 421, 421, 421, 422, 422, 422, 421, + 422, 422, 421, 422, 422, 421, 421, 421, + 421, 421, 422, 422, 422, 422, 421, 422, + 422, 422, 422, 422, 422, 421, 421, 421, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 421, 422, 422, 422, 422, 422, 422, + 422, 422, 421, 422, 422, 421, 421, 421, + 421, 422, 422, 422, 421, 421, 421, 422, + 421, 421, 421, 422, 422, 421, 422, 422, + 422, 421, 422, 421, 421, 421, 422, 422, + 421, 422, 422, 422, 421, 422, 422, 422, + 421, 421, 421, 421, 422, 472, 541, 626, + 627, 427, 472, 427, 421, 421, 422, 421, + 422, 472, 626, 427, 421, 472, 628, 427, + 421, 421, 422, 472, 629, 630, 631, 532, + 632, 633, 472, 634, 635, 636, 427, 421, + 421, 422, 422, 422, 421, 422, 422, 421, + 422, 422, 422, 422, 421, 421, 422, 421, + 421, 422, 422, 421, 422, 421, 472, 427, + 421, 637, 472, 638, 421, 427, 421, 422, + 421, 422, 639, 472, 640, 641, 421, 422, + 421, 421, 421, 422, 422, 422, 422, 421, + 642, 643, 644, 472, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 427, 421, 422, 422, 422, 421, + 421, 421, 421, 422, 422, 421, 421, 422, + 421, 421, 421, 421, 421, 421, 421, 422, + 421, 422, 421, 421, 421, 421, 421, 421, + 422, 422, 422, 422, 422, 421, 421, 422, + 421, 421, 421, 422, 421, 421, 422, 421, + 421, 422, 421, 421, 422, 421, 421, 421, + 422, 422, 422, 421, 421, 421, 422, 422, + 422, 422, 421, 659, 472, 660, 472, 661, + 662, 663, 664, 427, 421, 422, 422, 422, + 422, 422, 421, 421, 421, 422, 421, 421, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 421, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 421, 422, + 422, 422, 422, 422, 421, 665, 472, 427, + 421, 422, 666, 472, 457, 427, 421, 422, + 667, 421, 427, 421, 422, 472, 668, 427, + 421, 421, 422, 669, 421, 472, 670, 427, + 421, 421, 422, 672, 671, 422, 422, 422, + 422, 672, 671, 422, 672, 671, 672, 672, + 422, 672, 671, 422, 672, 422, 672, 671, + 422, 672, 422, 672, 422, 671, 672, 672, + 672, 672, 672, 672, 672, 672, 671, 422, + 422, 672, 672, 422, 672, 422, 672, 671, + 672, 672, 672, 672, 672, 422, 672, 422, + 672, 422, 672, 671, 672, 672, 422, 672, + 422, 672, 671, 672, 672, 672, 672, 672, + 422, 672, 422, 672, 671, 422, 422, 672, + 422, 672, 671, 672, 672, 672, 422, 672, + 422, 672, 422, 672, 422, 672, 671, 672, + 422, 672, 422, 672, 671, 422, 672, 672, + 672, 672, 422, 672, 422, 672, 422, 672, + 422, 672, 422, 672, 422, 672, 671, 422, + 672, 671, 672, 672, 672, 422, 672, 422, + 672, 671, 672, 422, 672, 422, 672, 671, + 422, 672, 672, 672, 672, 422, 672, 422, + 672, 671, 422, 672, 422, 672, 422, 672, + 671, 672, 672, 422, 672, 422, 672, 671, + 422, 672, 422, 672, 422, 672, 422, 671, + 672, 672, 672, 422, 672, 422, 672, 671, + 422, 672, 671, 672, 672, 422, 672, 671, + 672, 672, 672, 422, 672, 672, 672, 672, + 672, 672, 422, 422, 672, 422, 672, 422, + 672, 422, 672, 671, 672, 422, 672, 422, + 672, 671, 422, 672, 671, 672, 422, 672, + 671, 672, 422, 672, 671, 422, 422, 672, + 671, 422, 672, 422, 672, 422, 672, 422, + 672, 422, 672, 422, 671, 672, 672, 422, + 672, 672, 672, 672, 422, 422, 672, 672, + 672, 672, 672, 422, 672, 672, 672, 672, + 672, 671, 422, 672, 672, 422, 672, 422, + 671, 672, 672, 422, 672, 671, 422, 422, + 672, 422, 671, 672, 672, 671, 422, 672, + 422, 671, 672, 671, 422, 672, 422, 672, + 422, 671, 672, 672, 671, 422, 672, 422, + 672, 422, 672, 671, 672, 422, 672, 422, + 672, 671, 422, 672, 671, 422, 422, 672, + 671, 672, 422, 671, 672, 671, 422, 672, + 422, 672, 422, 671, 672, 671, 422, 422, + 672, 671, 672, 422, 672, 422, 672, 671, + 422, 672, 422, 671, 672, 671, 422, 422, + 672, 422, 671, 672, 671, 422, 422, 672, + 671, 672, 422, 672, 671, 672, 422, 672, + 671, 672, 422, 672, 422, 672, 422, 671, + 672, 671, 422, 422, 672, 671, 672, 422, + 672, 422, 672, 671, 422, 672, 671, 672, + 672, 422, 672, 422, 672, 671, 671, 422, + 671, 422, 672, 672, 422, 672, 672, 672, + 672, 672, 672, 672, 671, 422, 672, 672, + 672, 422, 671, 672, 672, 672, 422, 672, + 422, 672, 422, 672, 422, 672, 422, 672, + 671, 422, 422, 672, 671, 672, 422, 672, + 671, 422, 422, 672, 422, 422, 422, 672, + 422, 672, 422, 672, 422, 672, 422, 671, + 422, 672, 422, 672, 422, 671, 672, 671, + 422, 672, 422, 671, 672, 422, 672, 672, + 672, 671, 422, 672, 422, 422, 672, 422, + 671, 672, 672, 671, 422, 672, 672, 672, + 672, 422, 672, 422, 671, 672, 672, 672, + 422, 672, 671, 672, 422, 672, 422, 672, + 422, 672, 422, 672, 671, 672, 672, 422, + 672, 671, 422, 672, 422, 672, 422, 671, + 672, 672, 671, 422, 672, 422, 671, 672, + 671, 422, 672, 671, 422, 672, 422, 672, + 671, 672, 672, 672, 671, 422, 422, 422, + 672, 671, 422, 672, 422, 671, 672, 671, + 422, 672, 422, 672, 422, 671, 672, 672, + 672, 671, 422, 672, 422, 671, 672, 672, + 672, 672, 671, 422, 672, 422, 672, 671, + 422, 422, 672, 422, 672, 671, 672, 422, + 672, 422, 671, 672, 672, 671, 422, 672, + 422, 672, 671, 422, 672, 672, 672, 422, + 672, 422, 671, 422, 672, 671, 672, 422, + 422, 672, 422, 672, 422, 671, 672, 672, + 672, 672, 671, 422, 672, 422, 672, 422, + 672, 422, 672, 422, 672, 671, 672, 672, + 672, 422, 672, 422, 672, 422, 672, 422, + 671, 672, 672, 422, 422, 672, 671, 672, + 422, 672, 672, 671, 422, 672, 422, 672, + 671, 422, 422, 672, 672, 672, 672, 422, + 672, 422, 672, 422, 671, 672, 672, 422, + 671, 672, 671, 422, 672, 422, 671, 672, + 671, 422, 672, 422, 671, 672, 422, 672, + 672, 671, 422, 672, 672, 422, 671, 672, + 671, 422, 672, 422, 672, 671, 672, 422, + 672, 422, 671, 672, 671, 422, 672, 422, + 672, 422, 672, 422, 672, 422, 672, 671, + 673, 671, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 683, 684, 676, 685, 686, + 687, 688, 689, 676, 690, 691, 692, 693, + 694, 695, 696, 697, 698, 699, 700, 701, + 702, 703, 704, 676, 705, 673, 685, 673, + 706, 673, 671, 672, 672, 672, 672, 422, + 671, 672, 672, 671, 422, 672, 671, 422, + 422, 672, 671, 422, 672, 422, 671, 672, + 671, 422, 422, 672, 422, 671, 672, 672, + 671, 422, 672, 672, 672, 671, 422, 672, + 422, 672, 672, 671, 422, 422, 672, 422, + 671, 672, 671, 422, 672, 671, 422, 422, + 672, 422, 672, 671, 422, 672, 422, 422, + 672, 422, 672, 422, 671, 672, 672, 671, + 422, 672, 672, 422, 672, 671, 422, 672, + 422, 672, 671, 422, 672, 422, 671, 422, + 672, 672, 672, 422, 672, 671, 672, 422, + 672, 671, 422, 672, 671, 672, 422, 672, + 671, 422, 672, 671, 422, 672, 422, 672, + 671, 422, 672, 671, 422, 672, 671, 707, + 708, 709, 710, 711, 712, 713, 714, 715, + 716, 717, 718, 678, 719, 720, 721, 722, + 723, 720, 724, 725, 726, 727, 728, 729, + 730, 731, 732, 673, 671, 672, 422, 672, + 671, 672, 422, 672, 671, 672, 422, 672, + 671, 672, 422, 672, 671, 422, 672, 422, + 672, 671, 672, 422, 672, 671, 672, 422, + 422, 422, 672, 671, 672, 422, 672, 671, + 672, 672, 672, 672, 422, 672, 422, 671, + 672, 671, 422, 422, 672, 422, 672, 671, + 672, 422, 672, 671, 422, 672, 671, 672, + 672, 422, 672, 671, 422, 672, 671, 672, + 422, 672, 671, 422, 672, 671, 422, 672, + 671, 422, 672, 671, 672, 671, 422, 422, + 672, 671, 672, 422, 672, 671, 422, 672, + 422, 671, 672, 671, 422, 676, 733, 673, + 676, 734, 676, 735, 685, 673, 671, 672, + 671, 422, 672, 671, 422, 676, 734, 685, + 673, 671, 676, 736, 673, 685, 673, 671, + 672, 671, 422, 676, 737, 694, 738, 720, + 739, 732, 676, 740, 741, 742, 673, 685, + 673, 671, 672, 671, 422, 672, 422, 672, + 671, 422, 672, 422, 672, 422, 671, 672, + 672, 671, 422, 672, 422, 672, 671, 422, + 672, 671, 676, 685, 427, 671, 743, 676, + 744, 685, 673, 671, 427, 672, 671, 422, + 672, 671, 422, 745, 676, 746, 747, 673, + 671, 422, 672, 671, 672, 672, 671, 422, + 422, 672, 422, 672, 671, 676, 748, 749, + 750, 751, 752, 753, 754, 755, 756, 757, + 758, 673, 685, 673, 671, 672, 422, 672, + 672, 672, 672, 672, 672, 672, 422, 672, + 422, 672, 672, 672, 672, 672, 672, 671, + 422, 672, 672, 422, 672, 422, 671, 672, + 422, 672, 672, 672, 422, 672, 672, 422, + 672, 672, 422, 672, 672, 422, 672, 672, + 671, 422, 676, 759, 676, 735, 760, 761, + 762, 673, 685, 673, 671, 672, 671, 422, + 672, 672, 672, 422, 672, 672, 672, 422, + 672, 422, 672, 671, 422, 422, 422, 422, + 672, 672, 422, 422, 422, 422, 422, 672, + 672, 672, 672, 672, 672, 672, 422, 672, + 422, 672, 422, 671, 672, 672, 672, 422, + 672, 422, 672, 671, 685, 427, 763, 676, + 685, 427, 672, 671, 422, 764, 676, 765, + 685, 427, 672, 671, 422, 672, 422, 766, + 685, 673, 671, 427, 672, 671, 422, 676, + 767, 673, 685, 673, 671, 672, 671, 422, + 768, 769, 768, 770, 771, 768, 772, 768, + 773, 768, 771, 774, 775, 774, 777, 776, + 778, 779, 778, 780, 781, 776, 782, 776, + 783, 778, 784, 779, 785, 780, 787, 786, + 788, 789, 789, 786, 790, 786, 791, 788, + 792, 789, 793, 789, 795, 795, 795, 795, + 794, 795, 795, 795, 794, 795, 794, 795, + 795, 794, 794, 794, 794, 794, 794, 795, + 794, 794, 794, 794, 795, 795, 795, 795, + 795, 794, 794, 795, 794, 794, 795, 794, + 795, 794, 794, 795, 794, 794, 794, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 794, + 794, 795, 795, 794, 794, 795, 794, 795, + 795, 795, 794, 797, 798, 799, 800, 801, + 802, 803, 804, 805, 806, 807, 808, 809, + 810, 811, 812, 813, 814, 815, 816, 817, + 818, 819, 820, 821, 822, 823, 824, 825, + 826, 827, 828, 794, 795, 794, 795, 794, + 795, 795, 794, 795, 795, 794, 794, 794, + 795, 794, 794, 794, 794, 794, 794, 794, + 795, 794, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 794, 794, + 794, 794, 794, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 794, 794, 794, 794, + 794, 794, 794, 794, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 794, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 794, 794, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 794, + 794, 794, 794, 794, 794, 794, 794, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 795, 795, 795, 795, 795, 794, 795, 794, + 795, 795, 794, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 794, 795, 795, 795, 794, 795, + 794, 829, 830, 831, 832, 833, 834, 835, + 836, 837, 838, 839, 840, 841, 842, 843, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 855, 856, 857, 858, 859, + 860, 861, 862, 863, 864, 801, 865, 866, + 867, 868, 869, 870, 801, 846, 801, 794, + 795, 794, 795, 795, 794, 794, 795, 794, + 794, 794, 794, 795, 794, 794, 794, 794, + 794, 795, 794, 794, 794, 794, 794, 795, + 795, 795, 795, 795, 794, 794, 794, 795, + 794, 794, 794, 795, 795, 795, 794, 794, + 794, 795, 795, 794, 794, 794, 795, 795, + 795, 794, 794, 794, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 794, 794, 794, + 794, 794, 795, 795, 795, 795, 794, 794, + 795, 795, 795, 794, 794, 795, 795, 795, + 795, 794, 795, 795, 794, 795, 795, 794, + 794, 794, 795, 795, 795, 794, 794, 794, + 794, 795, 795, 795, 795, 795, 794, 794, + 794, 794, 795, 794, 795, 795, 794, 795, + 795, 794, 795, 794, 795, 795, 795, 794, + 795, 795, 794, 794, 794, 795, 794, 794, + 794, 794, 794, 794, 794, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 795, 794, 871, 872, 873, 874, 875, 876, + 877, 878, 879, 801, 880, 881, 882, 883, + 884, 794, 795, 794, 794, 794, 794, 794, + 795, 795, 794, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 794, 794, 795, + 795, 795, 794, 794, 795, 794, 794, 795, + 795, 795, 795, 795, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 794, 885, 840, 886, + 887, 888, 801, 889, 890, 846, 801, 794, + 795, 795, 795, 795, 794, 794, 794, 795, + 794, 794, 795, 795, 795, 794, 794, 794, + 795, 795, 794, 851, 794, 846, 801, 801, + 891, 794, 801, 794, 795, 846, 892, 893, + 846, 894, 895, 846, 896, 897, 898, 899, + 900, 901, 846, 902, 903, 904, 846, 905, + 906, 907, 865, 908, 909, 910, 865, 911, + 846, 801, 794, 794, 795, 795, 794, 794, + 794, 795, 795, 795, 795, 794, 795, 795, + 794, 794, 794, 794, 795, 795, 794, 794, + 795, 795, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 794, 794, 794, 795, 794, + 794, 794, 795, 795, 794, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 794, 794, 794, + 795, 795, 795, 795, 794, 912, 913, 794, + 801, 794, 795, 794, 794, 795, 846, 914, + 915, 916, 917, 896, 918, 919, 920, 921, + 922, 923, 924, 925, 926, 927, 928, 929, + 801, 794, 794, 795, 794, 795, 795, 795, + 795, 795, 795, 795, 794, 795, 795, 795, + 794, 795, 794, 794, 795, 794, 795, 794, + 794, 795, 795, 795, 795, 794, 795, 795, + 795, 794, 794, 795, 795, 795, 795, 794, + 795, 795, 794, 794, 795, 795, 795, 795, + 795, 794, 930, 931, 932, 933, 934, 935, + 936, 937, 938, 939, 940, 936, 942, 943, + 944, 945, 941, 794, 946, 947, 846, 948, + 949, 950, 951, 952, 953, 954, 955, 956, + 846, 801, 957, 958, 959, 960, 846, 961, + 962, 963, 964, 965, 966, 967, 968, 969, + 970, 971, 972, 973, 974, 975, 846, 877, + 801, 976, 794, 795, 795, 795, 795, 795, + 794, 794, 794, 795, 794, 795, 795, 794, + 795, 794, 795, 795, 794, 794, 794, 795, + 795, 795, 794, 794, 794, 795, 795, 795, + 794, 794, 794, 794, 795, 794, 794, 795, + 794, 794, 795, 795, 795, 794, 794, 795, + 794, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 795, 795, + 794, 795, 795, 794, 795, 795, 794, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 794, 795, 794, 795, 795, + 794, 795, 794, 795, 795, 794, 795, 794, + 795, 794, 977, 948, 978, 979, 980, 981, + 982, 983, 984, 985, 986, 829, 987, 846, + 988, 989, 990, 846, 991, 861, 992, 993, + 994, 995, 996, 997, 998, 999, 846, 794, + 794, 794, 795, 795, 795, 794, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 795, + 795, 795, 794, 794, 794, 795, 794, 794, + 794, 795, 795, 794, 795, 795, 795, 794, + 795, 794, 794, 794, 795, 795, 794, 795, + 795, 795, 794, 795, 795, 795, 794, 794, + 794, 794, 795, 846, 915, 1000, 1001, 801, + 846, 801, 794, 794, 795, 794, 795, 846, + 1000, 801, 794, 846, 1002, 801, 794, 794, + 795, 846, 1003, 1004, 1005, 906, 1006, 1007, + 846, 1008, 1009, 1010, 801, 794, 794, 795, + 795, 795, 794, 795, 795, 794, 795, 795, + 795, 795, 794, 794, 795, 794, 794, 795, + 795, 794, 795, 794, 846, 801, 794, 1011, + 846, 1012, 794, 801, 794, 795, 794, 795, + 1013, 846, 1014, 1015, 794, 795, 794, 794, + 794, 795, 795, 795, 795, 794, 1016, 1017, + 1018, 846, 1019, 1020, 1021, 1022, 1023, 1024, + 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, + 801, 794, 795, 795, 795, 794, 794, 794, + 794, 795, 795, 794, 794, 795, 794, 794, + 794, 794, 794, 794, 794, 795, 794, 795, + 794, 794, 794, 794, 794, 794, 795, 795, + 795, 795, 795, 794, 794, 795, 794, 794, + 794, 795, 794, 794, 795, 794, 794, 795, + 794, 794, 795, 794, 794, 794, 795, 795, + 795, 794, 794, 794, 795, 795, 795, 795, + 794, 1033, 846, 1034, 846, 1035, 1036, 1037, + 1038, 801, 794, 795, 795, 795, 795, 795, + 794, 794, 794, 795, 794, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 794, 1039, 846, 801, 794, 795, + 1040, 846, 831, 801, 794, 795, 1041, 794, + 801, 794, 795, 846, 1042, 801, 794, 794, + 795, 1043, 794, 846, 1044, 801, 794, 794, + 795, 1046, 1045, 795, 795, 795, 795, 1046, + 1045, 795, 1046, 1045, 1046, 1046, 795, 1046, + 1045, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1045, 795, 795, 1046, + 1046, 795, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 1046, 1046, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 1046, 795, 1046, 795, 1046, + 1045, 1046, 1046, 1046, 1046, 1046, 795, 1046, + 795, 1046, 1045, 795, 795, 1046, 795, 1046, + 1045, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 1045, 1046, 795, 1046, + 795, 1046, 1045, 795, 1046, 1046, 1046, 1046, + 795, 1046, 795, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 1045, 795, 1046, 1045, + 1046, 1046, 1046, 795, 1046, 795, 1046, 1045, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 1046, 1046, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 795, 1046, 795, 1046, 1045, 1046, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1046, 795, 1046, 795, 1045, 1046, 1046, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 1045, 1046, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 1046, 1046, 1046, + 795, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 795, 795, 1046, 1045, 795, + 1046, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 795, 1046, 1046, + 1046, 1046, 795, 795, 1046, 1046, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 1046, 1046, 1045, + 795, 1046, 1046, 795, 1046, 795, 1045, 1046, + 1046, 795, 1046, 1045, 795, 795, 1046, 795, + 1045, 1046, 1046, 1045, 795, 1046, 795, 1045, + 1046, 1045, 795, 1046, 795, 1046, 795, 1045, + 1046, 1046, 1045, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 1045, 795, 795, 1046, 1045, 1046, + 795, 1045, 1046, 1045, 795, 1046, 795, 1046, + 795, 1045, 1046, 1045, 795, 795, 1046, 1045, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1045, 1046, 1045, 795, 795, 1046, 795, + 1045, 1046, 1045, 795, 795, 1046, 1045, 1046, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 795, 1046, 795, 1045, 1046, 1045, + 795, 795, 1046, 1045, 1046, 795, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 1046, 795, + 1046, 795, 1046, 1045, 1045, 795, 1045, 795, + 1046, 1046, 795, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1045, 795, 1046, 1046, 1046, 795, + 1045, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 795, 1046, 1045, 795, + 795, 1046, 1045, 1046, 795, 1046, 1045, 795, + 795, 1046, 795, 795, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 795, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1045, 795, 1046, + 795, 1045, 1046, 795, 1046, 1046, 1046, 1045, + 795, 1046, 795, 795, 1046, 795, 1045, 1046, + 1046, 1045, 795, 1046, 1046, 1046, 1046, 795, + 1046, 795, 1045, 1046, 1046, 1046, 795, 1046, + 1045, 1046, 795, 1046, 795, 1046, 795, 1046, + 795, 1046, 1045, 1046, 1046, 795, 1046, 1045, + 795, 1046, 795, 1046, 795, 1045, 1046, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 1046, 1045, 795, 1046, 795, 1046, 1045, 1046, + 1046, 1046, 1045, 795, 795, 795, 1046, 1045, + 795, 1046, 795, 1045, 1046, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1045, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1045, 795, 1046, 795, 1046, 1045, 795, 795, + 1046, 795, 1046, 1045, 1046, 795, 1046, 795, + 1045, 1046, 1046, 1045, 795, 1046, 795, 1046, + 1045, 795, 1046, 1046, 1046, 795, 1046, 795, + 1045, 795, 1046, 1045, 1046, 795, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1045, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 795, 1046, 1045, 1046, 1046, 1046, 795, + 1046, 795, 1046, 795, 1046, 795, 1045, 1046, + 1046, 795, 795, 1046, 1045, 1046, 795, 1046, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 795, 1046, 1046, 1046, 1046, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 795, 1045, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 1046, 795, 1045, 1046, 795, 1046, 1046, 1045, + 795, 1046, 1046, 795, 1045, 1046, 1045, 795, + 1046, 795, 1046, 1045, 1046, 795, 1046, 795, + 1045, 1046, 1045, 795, 1046, 795, 1046, 795, + 1046, 795, 1046, 795, 1046, 1045, 1047, 1045, + 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, + 1056, 1057, 1058, 1050, 1059, 1060, 1061, 1062, + 1063, 1050, 1064, 1065, 1066, 1067, 1068, 1069, + 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1050, 1079, 1047, 1059, 1047, 1080, 1047, + 1045, 1046, 1046, 1046, 1046, 795, 1045, 1046, + 1046, 1045, 795, 1046, 1045, 795, 795, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 795, 1046, 795, 1045, 1046, 1046, 1045, 795, + 1046, 1046, 1046, 1045, 795, 1046, 795, 1046, + 1046, 1045, 795, 795, 1046, 795, 1045, 1046, + 1045, 795, 1046, 1045, 795, 795, 1046, 795, + 1046, 1045, 795, 1046, 795, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 1045, 795, 1046, + 1046, 795, 1046, 1045, 795, 1046, 795, 1046, + 1045, 795, 1046, 795, 1045, 795, 1046, 1046, + 1046, 795, 1046, 1045, 1046, 795, 1046, 1045, + 795, 1046, 1045, 1046, 795, 1046, 1045, 795, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 1046, 1045, 795, 1046, 1045, 1081, 1082, 1083, + 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, + 1092, 1052, 1093, 1094, 1095, 1096, 1097, 1094, + 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, + 1106, 1047, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 795, 1046, 795, 1046, 1045, + 1046, 795, 1046, 1045, 1046, 795, 795, 795, + 1046, 1045, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 1046, 795, 1046, 795, 1045, 1046, 1045, + 795, 795, 1046, 795, 1046, 1045, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 795, 1046, + 1045, 795, 1046, 1045, 795, 1046, 1045, 795, + 1046, 1045, 1046, 1045, 795, 795, 1046, 1045, + 1046, 795, 1046, 1045, 795, 1046, 795, 1045, + 1046, 1045, 795, 1050, 1107, 1047, 1050, 1108, + 1050, 1109, 1059, 1047, 1045, 1046, 1045, 795, + 1046, 1045, 795, 1050, 1108, 1059, 1047, 1045, + 1050, 1110, 1047, 1059, 1047, 1045, 1046, 1045, + 795, 1050, 1111, 1068, 1112, 1094, 1113, 1106, + 1050, 1114, 1115, 1116, 1047, 1059, 1047, 1045, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 1046, 795, 1046, 795, 1045, 1046, 1046, 1045, + 795, 1046, 795, 1046, 1045, 795, 1046, 1045, + 1050, 1059, 801, 1045, 1117, 1050, 1118, 1059, + 1047, 1045, 801, 1046, 1045, 795, 1046, 1045, + 795, 1119, 1050, 1120, 1121, 1047, 1045, 795, + 1046, 1045, 1046, 1046, 1045, 795, 795, 1046, + 795, 1046, 1045, 1050, 1122, 1123, 1124, 1125, + 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1047, + 1059, 1047, 1045, 1046, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 795, 1046, 795, 1046, + 1046, 1046, 1046, 1046, 1046, 1045, 795, 1046, + 1046, 795, 1046, 795, 1045, 1046, 795, 1046, + 1046, 1046, 795, 1046, 1046, 795, 1046, 1046, + 795, 1046, 1046, 795, 1046, 1046, 1045, 795, + 1050, 1133, 1050, 1109, 1134, 1135, 1136, 1047, + 1059, 1047, 1045, 1046, 1045, 795, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 795, 1046, 795, + 1046, 1045, 795, 795, 795, 795, 1046, 1046, + 795, 795, 795, 795, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1045, 1046, 1046, 1046, 795, 1046, 795, + 1046, 1045, 1059, 801, 1137, 1050, 1059, 801, + 1046, 1045, 795, 1138, 1050, 1139, 1059, 801, + 1046, 1045, 795, 1046, 795, 1140, 1059, 1047, + 1045, 801, 1046, 1045, 795, 1050, 1141, 1047, + 1059, 1047, 1045, 1046, 1045, 795, 1142, 1143, + 1144, 1142, 1145, 1146, 1147, 1148, 1149, 1150, + 1151, 1152, 1153, 1154, 672, 672, 422, 1155, + 1156, 1157, 1158, 672, 1161, 1162, 1164, 1165, + 1166, 1160, 1167, 1168, 1169, 1170, 1171, 1172, + 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, + 1181, 1182, 1183, 1184, 1185, 1186, 1188, 1189, + 1190, 1191, 1192, 1193, 672, 1148, 10, 1148, + 422, 1148, 422, 1160, 1163, 1187, 1194, 1159, + 1142, 1142, 1195, 1143, 1196, 1198, 1197, 2, + 1, 1199, 1197, 1200, 1197, 5, 1, 1197, + 6, 5, 9, 11, 11, 10, 1202, 1203, + 1204, 1197, 1205, 1206, 1197, 1207, 1197, 422, + 422, 1209, 1210, 491, 472, 1211, 472, 1212, + 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, + 1221, 1222, 546, 1223, 522, 1224, 1225, 1226, + 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, + 1235, 422, 422, 422, 427, 567, 1208, 1236, + 1197, 1237, 1197, 672, 1238, 422, 422, 422, + 672, 1238, 672, 672, 422, 1238, 422, 1238, + 422, 1238, 422, 672, 672, 672, 672, 672, + 1238, 422, 672, 672, 672, 422, 672, 422, + 1238, 422, 672, 672, 672, 672, 422, 1238, + 672, 422, 672, 422, 672, 422, 672, 672, + 422, 672, 1238, 422, 672, 422, 672, 422, + 672, 1238, 672, 422, 1238, 672, 422, 672, + 422, 1238, 672, 672, 672, 672, 672, 1238, + 422, 422, 672, 422, 672, 1238, 672, 422, + 1238, 672, 672, 1238, 422, 422, 672, 422, + 672, 422, 672, 1238, 1239, 1240, 1241, 1242, + 1243, 1244, 1245, 1246, 1247, 1248, 1249, 717, + 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, + 1258, 1259, 1260, 1261, 1260, 1262, 1263, 1264, + 1265, 1266, 673, 1238, 1267, 1268, 1269, 1270, + 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, + 1279, 1280, 1281, 1282, 1283, 1284, 1285, 727, + 1286, 1287, 1288, 694, 1289, 1290, 1291, 1292, + 1293, 1294, 673, 1295, 1296, 1297, 1298, 1299, + 1300, 1301, 1302, 676, 1303, 673, 676, 1304, + 1305, 1306, 1307, 685, 1238, 1308, 1309, 1310, + 1311, 705, 1312, 1313, 685, 1314, 1315, 1316, + 1317, 1318, 673, 1238, 1319, 1278, 1320, 1321, + 1322, 685, 1323, 1324, 676, 673, 685, 427, + 1238, 1288, 673, 676, 685, 427, 685, 427, + 1325, 685, 1238, 427, 676, 1326, 1327, 676, + 1328, 1329, 683, 1330, 1331, 1332, 1333, 1334, + 1284, 1335, 1336, 1337, 1338, 1339, 1340, 1341, + 1342, 1343, 1344, 1345, 1346, 1303, 1347, 676, + 685, 427, 1238, 1348, 1349, 685, 673, 1238, + 427, 673, 1238, 676, 1350, 733, 1351, 1352, + 1353, 1354, 1355, 1356, 1357, 1358, 673, 1359, + 1360, 1361, 1362, 1363, 1364, 673, 685, 1238, + 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, + 1374, 1375, 1376, 1372, 1378, 1379, 1380, 1381, + 1365, 1377, 1365, 1238, 1365, 1238, 1382, 1382, + 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, + 1387, 771, 1391, 1391, 1391, 1392, 1393, 1386, + 1391, 772, 773, 1394, 1391, 771, 1395, 1395, + 1395, 1397, 1398, 1399, 1395, 1400, 1401, 1402, + 1395, 1396, 1403, 1403, 1403, 1405, 1406, 1407, + 1403, 1408, 1409, 1410, 1403, 1404, 1391, 1391, + 1411, 1412, 1386, 1391, 772, 773, 1394, 1391, + 771, 1413, 1414, 1415, 771, 1416, 1417, 1418, + 769, 769, 769, 769, 1420, 1421, 1422, 1396, + 769, 1423, 1424, 1425, 769, 1419, 770, 770, + 770, 1427, 1428, 1429, 1396, 770, 1430, 1431, + 1432, 770, 1426, 769, 769, 769, 1434, 1435, + 1436, 1404, 769, 1437, 1438, 1439, 769, 1433, + 1395, 1395, 771, 1440, 1441, 1399, 1395, 1400, + 1401, 1402, 1395, 1396, 1442, 1443, 1444, 771, + 1445, 1446, 1447, 770, 770, 770, 770, 1449, + 1450, 1451, 1404, 770, 1452, 1453, 1454, 770, + 1448, 1403, 1403, 771, 1455, 1456, 1407, 1403, + 1408, 1409, 1410, 1403, 1404, 1403, 1403, 1403, + 1405, 1406, 1407, 771, 1408, 1409, 1410, 1403, + 1404, 1403, 1403, 1403, 1405, 1406, 1407, 772, + 1408, 1409, 1410, 1403, 1404, 1403, 1403, 1403, + 1405, 1406, 1407, 773, 1408, 1409, 1410, 1403, + 1404, 1395, 1395, 1395, 1397, 1398, 1399, 771, + 1400, 1401, 1402, 1395, 1396, 1395, 1395, 1395, + 1397, 1398, 1399, 772, 1400, 1401, 1402, 1395, + 1396, 1395, 1395, 1395, 1397, 1398, 1399, 773, + 1400, 1401, 1402, 1395, 1396, 1458, 769, 1460, + 1459, 1461, 770, 1463, 1462, 771, 1464, 775, + 1464, 1465, 1464, 777, 1466, 1467, 1468, 1469, + 1470, 1471, 1472, 1469, 781, 777, 1466, 1474, + 1475, 1473, 782, 783, 1476, 1473, 781, 1479, + 1480, 1481, 1482, 1477, 1483, 1484, 1485, 1477, + 1478, 1488, 1489, 1490, 1491, 1486, 1492, 1493, + 1494, 1486, 1487, 1496, 1495, 1498, 1497, 781, + 1499, 782, 1499, 783, 1499, 787, 1500, 1501, + 1502, 1503, 1504, 1505, 1506, 1503, 789, 787, + 1500, 1508, 1507, 790, 791, 1509, 1507, 789, + 1511, 1510, 1513, 1512, 789, 1514, 790, 1514, + 791, 1514, 795, 1517, 1518, 1520, 1521, 1522, + 1516, 1523, 1524, 1525, 1526, 1527, 1528, 1529, + 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, + 1538, 1539, 1540, 1541, 1542, 1544, 1545, 1546, + 1547, 1548, 1549, 795, 795, 1515, 1516, 1519, + 1543, 1550, 1515, 1046, 795, 795, 1552, 1553, + 865, 846, 1554, 846, 1555, 1556, 1557, 1558, + 1559, 1560, 1561, 1562, 1563, 1564, 1565, 920, + 1566, 896, 1567, 1568, 1569, 1570, 1571, 1572, + 1573, 1574, 1575, 1576, 1577, 1578, 795, 795, + 795, 801, 941, 1551, 1046, 1579, 795, 795, + 795, 1046, 1579, 1046, 1046, 795, 1579, 795, + 1579, 795, 1579, 795, 1046, 1046, 1046, 1046, + 1046, 1579, 795, 1046, 1046, 1046, 795, 1046, + 795, 1579, 795, 1046, 1046, 1046, 1046, 795, + 1579, 1046, 795, 1046, 795, 1046, 795, 1046, + 1046, 795, 1046, 1579, 795, 1046, 795, 1046, + 795, 1046, 1579, 1046, 795, 1579, 1046, 795, + 1046, 795, 1579, 1046, 1046, 1046, 1046, 1046, + 1579, 795, 795, 1046, 795, 1046, 1579, 1046, + 795, 1579, 1046, 1046, 1579, 795, 795, 1046, + 795, 1046, 795, 1046, 1579, 1580, 1581, 1582, + 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, + 1091, 1591, 1592, 1593, 1594, 1595, 1596, 1597, + 1598, 1599, 1600, 1601, 1602, 1601, 1603, 1604, + 1605, 1606, 1607, 1047, 1579, 1608, 1609, 1610, + 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, + 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, + 1101, 1627, 1628, 1629, 1068, 1630, 1631, 1632, + 1633, 1634, 1635, 1047, 1636, 1637, 1638, 1639, + 1640, 1641, 1642, 1643, 1050, 1644, 1047, 1050, + 1645, 1646, 1647, 1648, 1059, 1579, 1649, 1650, + 1651, 1652, 1079, 1653, 1654, 1059, 1655, 1656, + 1657, 1658, 1659, 1047, 1579, 1660, 1619, 1661, + 1662, 1663, 1059, 1664, 1665, 1050, 1047, 1059, + 801, 1579, 1629, 1047, 1050, 1059, 801, 1059, + 801, 1666, 1059, 1579, 801, 1050, 1667, 1668, + 1050, 1669, 1670, 1057, 1671, 1672, 1673, 1674, + 1675, 1625, 1676, 1677, 1678, 1679, 1680, 1681, + 1682, 1683, 1684, 1685, 1686, 1687, 1644, 1688, + 1050, 1059, 801, 1579, 1689, 1690, 1059, 1047, + 1579, 801, 1047, 1579, 1050, 1691, 1107, 1692, + 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1047, + 1700, 1701, 1702, 1703, 1704, 1705, 1047, 1059, + 1579, 1707, 1708, 1709, 1710, 1711, 1712, 1713, + 1714, 1715, 1716, 1717, 1713, 1719, 1720, 1721, + 1722, 1706, 1718, 1706, 1579, 1706, 1579, +} + +var _hcltok_trans_targs []int16 = []int16{ + 1464, 1, 1464, 1464, 1464, 3, 4, 1472, + 1464, 5, 1473, 6, 7, 9, 10, 287, + 13, 14, 15, 16, 17, 288, 289, 20, + 290, 22, 23, 291, 292, 293, 294, 295, + 296, 297, 298, 299, 300, 329, 349, 354, + 128, 129, 130, 357, 152, 372, 376, 1464, + 11, 12, 18, 19, 21, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 65, + 106, 121, 132, 155, 171, 284, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 122, 123, 124, 125, 126, + 127, 131, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 153, 154, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 172, 204, + 228, 231, 232, 234, 243, 244, 247, 251, + 269, 276, 278, 280, 282, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 229, 230, 233, 235, 236, + 237, 238, 239, 240, 241, 242, 245, 246, + 248, 249, 250, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 270, 271, 272, 273, + 274, 275, 277, 279, 281, 283, 285, 286, + 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, + 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 342, 343, 344, 345, 346, 347, 348, 350, + 351, 352, 353, 355, 356, 358, 359, 360, + 361, 362, 363, 364, 365, 366, 367, 368, + 369, 370, 371, 373, 374, 375, 377, 383, + 405, 410, 412, 414, 378, 379, 380, 381, + 382, 384, 385, 386, 387, 388, 389, 390, + 391, 392, 393, 394, 395, 396, 397, 398, + 399, 400, 401, 402, 403, 404, 406, 407, + 408, 409, 411, 413, 415, 1464, 1477, 438, + 439, 440, 441, 418, 442, 443, 444, 445, + 446, 447, 448, 449, 450, 451, 452, 453, + 454, 455, 456, 457, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 467, 468, 470, + 471, 472, 473, 474, 475, 476, 477, 478, + 479, 480, 481, 482, 483, 484, 485, 486, + 420, 487, 488, 489, 490, 491, 492, 493, + 494, 495, 496, 497, 498, 499, 500, 501, + 502, 503, 504, 419, 505, 506, 507, 508, + 509, 511, 512, 513, 514, 515, 516, 517, + 518, 519, 520, 521, 522, 523, 524, 526, + 527, 528, 529, 530, 531, 535, 537, 538, + 539, 540, 435, 541, 542, 543, 544, 545, + 546, 547, 548, 549, 550, 551, 552, 553, + 554, 555, 557, 558, 560, 561, 562, 563, + 564, 565, 433, 566, 567, 568, 569, 570, + 571, 572, 573, 574, 576, 608, 632, 635, + 636, 638, 647, 648, 651, 655, 673, 533, + 680, 682, 684, 686, 577, 578, 579, 580, + 581, 582, 583, 584, 585, 586, 587, 588, + 589, 590, 591, 592, 593, 594, 595, 596, + 597, 598, 599, 600, 601, 602, 603, 604, + 605, 606, 607, 609, 610, 611, 612, 613, + 614, 615, 616, 617, 618, 619, 620, 621, + 622, 623, 624, 625, 626, 627, 628, 629, + 630, 631, 633, 634, 637, 639, 640, 641, + 642, 643, 644, 645, 646, 649, 650, 652, + 653, 654, 656, 657, 658, 659, 660, 661, + 662, 663, 664, 665, 666, 667, 668, 669, + 670, 671, 672, 674, 675, 676, 677, 678, + 679, 681, 683, 685, 687, 689, 690, 1464, + 1464, 691, 828, 829, 760, 830, 831, 832, + 833, 834, 835, 789, 836, 725, 837, 838, + 839, 840, 841, 842, 843, 844, 745, 845, + 846, 847, 848, 849, 850, 851, 852, 853, + 854, 770, 855, 857, 858, 859, 860, 861, + 862, 863, 864, 865, 866, 703, 867, 868, + 869, 870, 871, 872, 873, 874, 875, 741, + 876, 877, 878, 879, 880, 811, 882, 883, + 886, 888, 889, 890, 891, 892, 893, 896, + 897, 899, 900, 901, 903, 904, 905, 906, + 907, 908, 909, 910, 911, 912, 913, 915, + 916, 917, 918, 921, 923, 924, 926, 928, + 1515, 1517, 1518, 1516, 931, 932, 1515, 934, + 1541, 1541, 1541, 1543, 1544, 1542, 939, 940, + 1545, 1546, 1550, 1550, 1550, 1551, 946, 947, + 1552, 1553, 1557, 1558, 1557, 973, 974, 975, + 976, 953, 977, 978, 979, 980, 981, 982, + 983, 984, 985, 986, 987, 988, 989, 990, + 991, 992, 993, 994, 995, 996, 997, 998, + 999, 1000, 1001, 1002, 1003, 1005, 1006, 1007, + 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, + 1016, 1017, 1018, 1019, 1020, 1021, 955, 1022, + 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, + 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, + 1039, 954, 1040, 1041, 1042, 1043, 1044, 1046, + 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, + 1055, 1056, 1057, 1058, 1059, 1061, 1062, 1063, + 1064, 1065, 1066, 1070, 1072, 1073, 1074, 1075, + 970, 1076, 1077, 1078, 1079, 1080, 1081, 1082, + 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, + 1092, 1093, 1095, 1096, 1097, 1098, 1099, 1100, + 968, 1101, 1102, 1103, 1104, 1105, 1106, 1107, + 1108, 1109, 1111, 1143, 1167, 1170, 1171, 1173, + 1182, 1183, 1186, 1190, 1208, 1068, 1215, 1217, + 1219, 1221, 1112, 1113, 1114, 1115, 1116, 1117, + 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, + 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, + 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, + 1142, 1144, 1145, 1146, 1147, 1148, 1149, 1150, + 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, + 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, + 1168, 1169, 1172, 1174, 1175, 1176, 1177, 1178, + 1179, 1180, 1181, 1184, 1185, 1187, 1188, 1189, + 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, + 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, + 1207, 1209, 1210, 1211, 1212, 1213, 1214, 1216, + 1218, 1220, 1222, 1224, 1225, 1557, 1557, 1226, + 1363, 1364, 1295, 1365, 1366, 1367, 1368, 1369, + 1370, 1324, 1371, 1260, 1372, 1373, 1374, 1375, + 1376, 1377, 1378, 1379, 1280, 1380, 1381, 1382, + 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1305, + 1390, 1392, 1393, 1394, 1395, 1396, 1397, 1398, + 1399, 1400, 1401, 1238, 1402, 1403, 1404, 1405, + 1406, 1407, 1408, 1409, 1410, 1276, 1411, 1412, + 1413, 1414, 1415, 1346, 1417, 1418, 1421, 1423, + 1424, 1425, 1426, 1427, 1428, 1431, 1432, 1434, + 1435, 1436, 1438, 1439, 1440, 1441, 1442, 1443, + 1444, 1445, 1446, 1447, 1448, 1450, 1451, 1452, + 1453, 1456, 1458, 1459, 1461, 1463, 1465, 1464, + 1466, 1467, 1464, 1468, 1464, 1469, 1470, 1471, + 1474, 1475, 1476, 1464, 1478, 1464, 1479, 1464, + 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, + 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, + 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, + 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, + 1512, 1513, 1514, 1464, 1464, 1464, 1464, 1464, + 2, 1464, 1464, 8, 1464, 1464, 1464, 1464, + 1464, 416, 417, 421, 422, 423, 424, 425, + 426, 427, 428, 429, 430, 431, 432, 434, + 436, 437, 469, 510, 525, 532, 534, 536, + 556, 559, 575, 688, 1464, 1464, 1464, 692, + 693, 694, 695, 696, 697, 698, 699, 700, + 701, 702, 704, 705, 706, 707, 708, 709, + 710, 711, 712, 713, 714, 715, 716, 717, + 718, 719, 720, 721, 722, 723, 724, 726, + 727, 728, 729, 730, 731, 732, 733, 734, + 735, 736, 737, 738, 739, 740, 742, 743, + 744, 746, 747, 748, 749, 750, 751, 752, + 753, 754, 755, 756, 757, 758, 759, 761, + 762, 763, 764, 765, 766, 767, 768, 769, + 771, 772, 773, 774, 775, 776, 777, 778, + 779, 780, 781, 782, 783, 784, 785, 786, + 787, 788, 790, 791, 792, 793, 794, 795, + 796, 797, 798, 799, 800, 801, 802, 803, + 804, 805, 806, 807, 808, 809, 810, 812, + 813, 814, 815, 816, 817, 818, 819, 820, + 821, 822, 823, 824, 825, 826, 827, 856, + 881, 884, 885, 887, 894, 895, 898, 902, + 914, 919, 920, 922, 925, 927, 1515, 1515, + 1534, 1536, 1519, 1515, 1538, 1539, 1540, 1515, + 929, 930, 933, 1515, 1516, 929, 930, 1519, + 931, 932, 933, 1515, 1516, 929, 930, 1519, + 931, 932, 933, 1520, 1525, 1521, 1522, 1524, + 1531, 1532, 1533, 1517, 1521, 1522, 1524, 1531, + 1532, 1533, 1518, 1523, 1526, 1527, 1528, 1529, + 1530, 1517, 1521, 1522, 1524, 1531, 1532, 1533, + 1520, 1525, 1523, 1526, 1527, 1528, 1529, 1530, + 1518, 1523, 1526, 1527, 1528, 1529, 1530, 1520, + 1525, 1515, 1535, 1515, 1515, 1537, 1515, 1515, + 1515, 935, 936, 942, 943, 1541, 1547, 1548, + 1549, 1541, 937, 938, 941, 1541, 1542, 1541, + 936, 937, 938, 939, 940, 941, 1541, 1542, + 1541, 936, 937, 938, 939, 940, 941, 1541, + 1541, 1541, 1541, 1541, 944, 949, 950, 1550, + 1554, 1555, 1556, 1550, 945, 948, 1550, 1550, + 1550, 1550, 1550, 1557, 1559, 1560, 1561, 1562, + 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, + 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, + 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, + 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1557, + 951, 952, 956, 957, 958, 959, 960, 961, + 962, 963, 964, 965, 966, 967, 969, 971, + 972, 1004, 1045, 1060, 1067, 1069, 1071, 1091, + 1094, 1110, 1223, 1557, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1239, + 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, + 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, + 1256, 1257, 1258, 1259, 1261, 1262, 1263, 1264, + 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, + 1273, 1274, 1275, 1277, 1278, 1279, 1281, 1282, + 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, + 1291, 1292, 1293, 1294, 1296, 1297, 1298, 1299, + 1300, 1301, 1302, 1303, 1304, 1306, 1307, 1308, + 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, + 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1325, + 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, + 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, + 1342, 1343, 1344, 1345, 1347, 1348, 1349, 1350, + 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, + 1359, 1360, 1361, 1362, 1391, 1416, 1419, 1420, + 1422, 1429, 1430, 1433, 1437, 1449, 1454, 1455, + 1457, 1460, 1462, +} + +var _hcltok_trans_actions []byte = []byte{ + 151, 0, 93, 147, 109, 0, 0, 201, + 143, 0, 13, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 123, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 145, 198, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 149, + 127, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 35, 13, 13, 13, 0, 0, 37, 0, + 57, 43, 55, 180, 180, 180, 0, 0, + 0, 0, 77, 63, 75, 186, 0, 0, + 0, 0, 87, 192, 91, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 89, 81, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 95, + 0, 0, 121, 210, 113, 0, 13, 204, + 13, 0, 0, 115, 0, 117, 0, 125, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 13, 13, + 13, 207, 207, 207, 207, 207, 207, 13, + 13, 207, 13, 129, 141, 137, 99, 105, + 0, 135, 131, 0, 103, 97, 111, 101, + 133, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 107, 119, 139, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 21, 19, + 0, 0, 13, 23, 0, 13, 13, 29, + 0, 0, 0, 153, 174, 1, 1, 174, + 1, 1, 1, 156, 177, 3, 3, 177, + 3, 3, 3, 0, 0, 0, 0, 13, + 13, 13, 13, 174, 1, 1, 174, 174, + 174, 174, 174, 1, 1, 174, 174, 174, + 174, 177, 3, 3, 177, 177, 177, 177, + 1, 1, 0, 0, 13, 13, 13, 13, + 177, 3, 3, 177, 177, 177, 177, 3, + 3, 31, 0, 25, 15, 0, 27, 17, + 33, 0, 0, 0, 0, 45, 0, 183, + 183, 51, 0, 0, 0, 162, 213, 159, + 5, 5, 5, 5, 5, 5, 168, 217, + 165, 7, 7, 7, 7, 7, 7, 47, + 39, 49, 41, 53, 0, 0, 0, 65, + 0, 189, 189, 71, 0, 0, 67, 59, + 69, 61, 73, 79, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 13, 13, 13, 195, 195, 195, + 195, 195, 195, 13, 13, 195, 13, 83, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 85, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, +} + +var _hcltok_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 9, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 171, 0, 0, + 0, 0, 0, 0, 0, 0, 171, 0, + 0, 0, 0, 0, 0, 9, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, +} + +var _hcltok_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 11, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 11, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, +} + +var _hcltok_eof_trans []int16 = []int16{ + 0, 1, 4, 1, 1, 9, 9, 9, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 422, 422, 1, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 769, 769, 769, 769, 769, 775, 775, + 777, 779, 779, 777, 777, 779, 0, 0, + 787, 789, 787, 787, 789, 0, 0, 795, + 795, 797, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 0, 1196, 1197, 1198, 1197, 1198, 1198, 1198, + 1202, 1203, 1198, 1198, 1198, 1209, 1198, 1198, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 0, 1392, 1396, 1404, 1392, + 1392, 1396, 1396, 1404, 1396, 1392, 1404, 1404, + 1404, 1404, 1404, 1396, 1396, 1396, 1458, 1460, + 1458, 1463, 1465, 1465, 1465, 0, 1474, 1478, + 1487, 1496, 1498, 1500, 1500, 1500, 0, 1508, + 1511, 1513, 1515, 1515, 1515, 0, 1552, 1580, + 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580, + 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580, + 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580, + 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580, + 1580, 1580, +} + +const hcltok_start int = 1464 +const hcltok_first_final int = 1464 +const hcltok_error int = 0 + +const hcltok_en_stringTemplate int = 1515 +const hcltok_en_heredocTemplate int = 1541 +const hcltok_en_bareTemplate int = 1550 +const hcltok_en_identOnly int = 1557 +const hcltok_en_main int = 1464 + +// line 16 "scan_tokens.rl" + +func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + f := &tokenAccum{ + Filename: filename, + Bytes: data, + Pos: start, + } + + // line 294 "scan_tokens.rl" + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + var stack []int + var top int + + var cs int // current state + switch mode { + case scanNormal: + cs = hcltok_en_main + case scanTemplate: + cs = hcltok_en_bareTemplate + case scanIdentOnly: + cs = hcltok_en_identOnly + default: + panic("invalid scanMode") + } + + braces := 0 + var retBraces []int // stack of brace levels that cause us to use fret + var heredocs []heredocInProgress // stack of heredocs we're currently processing + + // line 329 "scan_tokens.rl" + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + token := func(ty TokenType) { + f.emitToken(ty, ts, te) + } + selfToken := func() { + b := data[ts:te] + if len(b) != 1 { + // should never happen + panic("selfToken only works for single-character tokens") + } + f.emitToken(TokenType(b[0]), ts, te) + } + + // line 4372 "scan_tokens.go" + { + top = 0 + ts = 0 + te = 0 + act = 0 + } + + // line 4380 "scan_tokens.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _acts = int(_hcltok_from_state_actions[cs]) + _nacts = uint(_hcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 6: + // line 1 "NONE" + + ts = p + + // line 4404 "scan_tokens.go" + } + } + + _keys = int(_hcltok_key_offsets[cs]) + _trans = int(_hcltok_index_offsets[cs]) + + _klen = int(_hcltok_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _hcltok_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _hcltok_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_hcltok_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _hcltok_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _hcltok_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_hcltok_indicies[_trans]) + _eof_trans: + cs = int(_hcltok_trans_targs[_trans]) + + if _hcltok_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_hcltok_trans_actions[_trans]) + _nacts = uint(_hcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 0: + // line 218 "scan_tokens.rl" + + p-- + + case 1: + // line 219 "scan_tokens.rl" + + p-- + + case 2: + // line 224 "scan_tokens.rl" + + p-- + + case 3: + // line 225 "scan_tokens.rl" + + p-- + + case 7: + // line 1 "NONE" + + te = p + 1 + + case 8: + // line 155 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 9: + // line 165 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 10: + // line 79 "scan_tokens.rl" + + te = p + 1 + { + token(TokenCQuote) + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + case 11: + // line 239 "scan_tokens.rl" + + te = p + 1 + { + token(TokenInvalid) + } + case 12: + // line 240 "scan_tokens.rl" + + te = p + 1 + { + token(TokenBadUTF8) + } + case 13: + // line 155 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 14: + // line 165 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 15: + // line 238 "scan_tokens.rl" + + te = p + p-- + { + token(TokenQuotedLit) + } + case 16: + // line 239 "scan_tokens.rl" + + te = p + p-- + { + token(TokenInvalid) + } + case 17: + // line 240 "scan_tokens.rl" + + te = p + p-- + { + token(TokenBadUTF8) + } + case 18: + // line 238 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenQuotedLit) + } + case 19: + // line 240 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenBadUTF8) + } + case 20: + // line 143 "scan_tokens.rl" + + act = 10 + case 21: + // line 248 "scan_tokens.rl" + + act = 11 + case 22: + // line 155 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 23: + // line 165 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 24: + // line 106 "scan_tokens.rl" + + te = p + 1 + { + // This action is called specificially when a heredoc literal + // ends with a newline character. + + // This might actually be our end marker. + topdoc := &heredocs[len(heredocs)-1] + if topdoc.StartOfLine { + maybeMarker := bytes.TrimSpace(data[ts:te]) + if bytes.Equal(maybeMarker, topdoc.Marker) { + // We actually emit two tokens here: the end-of-heredoc + // marker first, and then separately the newline that + // follows it. This then avoids issues with the closing + // marker consuming a newline that would normally be used + // to mark the end of an attribute definition. + // We might have either a \n sequence or an \r\n sequence + // here, so we must handle both. + nls := te - 1 + nle := te + te-- + if data[te-1] == '\r' { + // back up one more byte + nls-- + te-- + } + token(TokenCHeredoc) + ts = nls + te = nle + token(TokenNewline) + heredocs = heredocs[:len(heredocs)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + } + + topdoc.StartOfLine = true + token(TokenStringLit) + } + case 25: + // line 248 "scan_tokens.rl" + + te = p + 1 + { + token(TokenBadUTF8) + } + case 26: + // line 155 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 27: + // line 165 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 28: + // line 143 "scan_tokens.rl" + + te = p + p-- + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 29: + // line 248 "scan_tokens.rl" + + te = p + p-- + { + token(TokenBadUTF8) + } + case 30: + // line 143 "scan_tokens.rl" + + p = (te) - 1 + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 31: + // line 1 "NONE" + + switch act { + case 0: + { + cs = 0 + goto _again + } + case 10: + { + p = (te) - 1 + + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 11: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 32: + // line 151 "scan_tokens.rl" + + act = 14 + case 33: + // line 255 "scan_tokens.rl" + + act = 15 + case 34: + // line 155 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 35: + // line 165 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 36: + // line 151 "scan_tokens.rl" + + te = p + 1 + { + token(TokenStringLit) + } + case 37: + // line 255 "scan_tokens.rl" + + te = p + 1 + { + token(TokenBadUTF8) + } + case 38: + // line 155 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 39: + // line 165 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 40: + // line 151 "scan_tokens.rl" + + te = p + p-- + { + token(TokenStringLit) + } + case 41: + // line 255 "scan_tokens.rl" + + te = p + p-- + { + token(TokenBadUTF8) + } + case 42: + // line 151 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenStringLit) + } + case 43: + // line 1 "NONE" + + switch act { + case 0: + { + cs = 0 + goto _again + } + case 14: + { + p = (te) - 1 + + token(TokenStringLit) + } + case 15: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 44: + // line 259 "scan_tokens.rl" + + act = 16 + case 45: + // line 260 "scan_tokens.rl" + + act = 17 + case 46: + // line 260 "scan_tokens.rl" + + te = p + 1 + { + token(TokenBadUTF8) + } + case 47: + // line 261 "scan_tokens.rl" + + te = p + 1 + { + token(TokenInvalid) + } + case 48: + // line 259 "scan_tokens.rl" + + te = p + p-- + { + token(TokenIdent) + } + case 49: + // line 260 "scan_tokens.rl" + + te = p + p-- + { + token(TokenBadUTF8) + } + case 50: + // line 259 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenIdent) + } + case 51: + // line 260 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenBadUTF8) + } + case 52: + // line 1 "NONE" + + switch act { + case 16: + { + p = (te) - 1 + token(TokenIdent) + } + case 17: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 53: + // line 267 "scan_tokens.rl" + + act = 21 + case 54: + // line 269 "scan_tokens.rl" + + act = 22 + case 55: + // line 280 "scan_tokens.rl" + + act = 32 + case 56: + // line 290 "scan_tokens.rl" + + act = 38 + case 57: + // line 291 "scan_tokens.rl" + + act = 39 + case 58: + // line 269 "scan_tokens.rl" + + te = p + 1 + { + token(TokenComment) + } + case 59: + // line 270 "scan_tokens.rl" + + te = p + 1 + { + token(TokenNewline) + } + case 60: + // line 272 "scan_tokens.rl" + + te = p + 1 + { + token(TokenEqualOp) + } + case 61: + // line 273 "scan_tokens.rl" + + te = p + 1 + { + token(TokenNotEqual) + } + case 62: + // line 274 "scan_tokens.rl" + + te = p + 1 + { + token(TokenGreaterThanEq) + } + case 63: + // line 275 "scan_tokens.rl" + + te = p + 1 + { + token(TokenLessThanEq) + } + case 64: + // line 276 "scan_tokens.rl" + + te = p + 1 + { + token(TokenAnd) + } + case 65: + // line 277 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOr) + } + case 66: + // line 278 "scan_tokens.rl" + + te = p + 1 + { + token(TokenEllipsis) + } + case 67: + // line 279 "scan_tokens.rl" + + te = p + 1 + { + token(TokenFatArrow) + } + case 68: + // line 280 "scan_tokens.rl" + + te = p + 1 + { + selfToken() + } + case 69: + // line 175 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOBrace) + braces++ + } + case 70: + // line 180 "scan_tokens.rl" + + te = p + 1 + { + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + token(TokenCBrace) + braces-- + } + } + case 71: + // line 192 "scan_tokens.rl" + + te = p + 1 + { + // Only consume from the retBraces stack and return if we are at + // a suitable brace nesting level, otherwise things will get + // confused. (Not entering this branch indicates a syntax error, + // which we will catch in the parser.) + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + // We intentionally generate a TokenTemplateSeqEnd here, + // even though the user apparently wanted a brace, because + // we want to allow the parser to catch the incorrect use + // of a ~} to balance a generic opening brace, rather than + // a template sequence. + token(TokenTemplateSeqEnd) + braces-- + } + } + case 72: + // line 74 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOQuote) + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1515 + goto _again + } + } + case 73: + // line 84 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOHeredoc) + // the token is currently the whole heredoc introducer, like + // < 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 4: + // line 1 "NONE" + + ts = 0 + + case 5: + // line 1 "NONE" + + act = 0 + + // line 5252 "scan_tokens.go" + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + if _hcltok_eof_trans[cs] > 0 { + _trans = int(_hcltok_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: + { + } + } + + // line 352 "scan_tokens.rl" + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which we'll + // deal with as an invalid. + if cs < hcltok_first_final { + if mode == scanTemplate && len(stack) == 0 { + // If we're scanning a bare template then any straggling + // top-level stuff is actually literal string, rather than + // invalid. This handles the case where the template ends + // with a single "$" or "%", which trips us up because we + // want to see another character to decide if it's a sequence + // or an escape. + f.emitToken(TokenStringLit, ts, len(data)) + } else { + f.emitToken(TokenInvalid, ts, len(data)) + } + } + + // We always emit a synthetic EOF token at the end, since it gives the + // parser position information for an "unexpected EOF" diagnostic. + f.emitToken(TokenEOF, len(data), len(data)) + + return f.Tokens +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl new file mode 100644 index 0000000..83ef65b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl @@ -0,0 +1,376 @@ + +package hclsyntax + +import ( + "bytes" + + "github.com/hashicorp/hcl2/hcl" +) + +// This file is generated from scan_tokens.rl. DO NOT EDIT. +%%{ + # (except you are actually in scan_tokens.rl here, so edit away!) + + machine hcltok; + write data; +}%% + +func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + f := &tokenAccum{ + Filename: filename, + Bytes: data, + Pos: start, + } + + %%{ + include UnicodeDerived "unicode_derived.rl"; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + BrokenUTF8 = any - AnyUTF8; + + NumberLitContinue = (digit|'.'|('e'|'E') ('+'|'-')? digit); + NumberLit = digit ("" | (NumberLitContinue - '.') | (NumberLitContinue* (NumberLitContinue - '.'))); + Ident = (ID_Start | '_') (ID_Continue | '-')*; + + # Symbols that just represent themselves are handled as a single rule. + SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`"; + + EqualOp = "=="; + NotEqual = "!="; + GreaterThanEqual = ">="; + LessThanEqual = "<="; + LogicalAnd = "&&"; + LogicalOr = "||"; + + Ellipsis = "..."; + FatArrow = "=>"; + + Newline = '\r' ? '\n'; + EndOfLine = Newline; + + BeginStringTmpl = '"'; + BeginHeredocTmpl = '<<' ('-')? Ident Newline; + + Comment = ( + ("#" (any - EndOfLine)* EndOfLine) | + ("//" (any - EndOfLine)* EndOfLine) | + ("/*" any* "*/") + ); + + # Note: hclwrite assumes that only ASCII spaces appear between tokens, + # and uses this assumption to recreate the spaces between tokens by + # looking at byte offset differences. This means it will produce + # incorrect results in the presence of tabs, but that's acceptable + # because the canonical style (which hclwrite itself can impose + # automatically is to never use tabs). + Spaces = (' ' | 0x09)+; + + action beginStringTemplate { + token(TokenOQuote); + fcall stringTemplate; + } + + action endStringTemplate { + token(TokenCQuote); + fret; + } + + action beginHeredocTemplate { + token(TokenOHeredoc); + // the token is currently the whole heredoc introducer, like + // < 0 { + heredocs[len(heredocs)-1].StartOfLine = false; + } + fcall main; + } + + action beginTemplateControl { + token(TokenTemplateControl); + braces++; + retBraces = append(retBraces, braces); + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false; + } + fcall main; + } + + action openBrace { + token(TokenOBrace); + braces++; + } + + action closeBrace { + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd); + braces--; + retBraces = retBraces[0:len(retBraces)-1] + fret; + } else { + token(TokenCBrace); + braces--; + } + } + + action closeTemplateSeqEatWhitespace { + // Only consume from the retBraces stack and return if we are at + // a suitable brace nesting level, otherwise things will get + // confused. (Not entering this branch indicates a syntax error, + // which we will catch in the parser.) + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd); + braces--; + retBraces = retBraces[0:len(retBraces)-1] + fret; + } else { + // We intentionally generate a TokenTemplateSeqEnd here, + // even though the user apparently wanted a brace, because + // we want to allow the parser to catch the incorrect use + // of a ~} to balance a generic opening brace, rather than + // a template sequence. + token(TokenTemplateSeqEnd); + braces--; + } + } + + TemplateInterp = "${" ("~")?; + TemplateControl = "%{" ("~")?; + EndStringTmpl = '"'; + StringLiteralChars = (AnyUTF8 - ("\r"|"\n")); + TemplateStringLiteral = ( + ('$' ^'{' %{ fhold; }) | + ('%' ^'{' %{ fhold; }) | + ('\\' StringLiteralChars) | + (StringLiteralChars - ("$" | '%' | '"')) + )+; + HeredocStringLiteral = ( + ('$' ^'{' %{ fhold; }) | + ('%' ^'{' %{ fhold; }) | + (StringLiteralChars - ("$" | '%')) + )*; + BareStringLiteral = ( + ('$' ^'{') | + ('%' ^'{') | + (StringLiteralChars - ("$" | '%')) + )* Newline?; + + stringTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + EndStringTmpl => endStringTemplate; + TemplateStringLiteral => { token(TokenQuotedLit); }; + AnyUTF8 => { token(TokenInvalid); }; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + heredocTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + HeredocStringLiteral EndOfLine => heredocLiteralEOL; + HeredocStringLiteral => heredocLiteralMidline; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + bareTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + BareStringLiteral => bareTemplateLiteral; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + identOnly := |* + Ident => { token(TokenIdent) }; + BrokenUTF8 => { token(TokenBadUTF8) }; + AnyUTF8 => { token(TokenInvalid) }; + *|; + + main := |* + Spaces => {}; + NumberLit => { token(TokenNumberLit) }; + Ident => { token(TokenIdent) }; + + Comment => { token(TokenComment) }; + Newline => { token(TokenNewline) }; + + EqualOp => { token(TokenEqualOp); }; + NotEqual => { token(TokenNotEqual); }; + GreaterThanEqual => { token(TokenGreaterThanEq); }; + LessThanEqual => { token(TokenLessThanEq); }; + LogicalAnd => { token(TokenAnd); }; + LogicalOr => { token(TokenOr); }; + Ellipsis => { token(TokenEllipsis); }; + FatArrow => { token(TokenFatArrow); }; + SelfToken => { selfToken() }; + + "{" => openBrace; + "}" => closeBrace; + + "~}" => closeTemplateSeqEatWhitespace; + + BeginStringTmpl => beginStringTemplate; + BeginHeredocTmpl => beginHeredocTemplate; + + BrokenUTF8 => { token(TokenBadUTF8) }; + AnyUTF8 => { token(TokenInvalid) }; + *|; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + var stack []int + var top int + + var cs int // current state + switch mode { + case scanNormal: + cs = hcltok_en_main + case scanTemplate: + cs = hcltok_en_bareTemplate + case scanIdentOnly: + cs = hcltok_en_identOnly + default: + panic("invalid scanMode") + } + + braces := 0 + var retBraces []int // stack of brace levels that cause us to use fret + var heredocs []heredocInProgress // stack of heredocs we're currently processing + + %%{ + prepush { + stack = append(stack, 0); + } + postpop { + stack = stack[:len(stack)-1]; + } + }%% + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + token := func (ty TokenType) { + f.emitToken(ty, ts, te) + } + selfToken := func () { + b := data[ts:te] + if len(b) != 1 { + // should never happen + panic("selfToken only works for single-character tokens") + } + f.emitToken(TokenType(b[0]), ts, te) + } + + %%{ + write init nocs; + write exec; + }%% + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which we'll + // deal with as an invalid. + if cs < hcltok_first_final { + if mode == scanTemplate && len(stack) == 0 { + // If we're scanning a bare template then any straggling + // top-level stuff is actually literal string, rather than + // invalid. This handles the case where the template ends + // with a single "$" or "%", which trips us up because we + // want to see another character to decide if it's a sequence + // or an escape. + f.emitToken(TokenStringLit, ts, len(data)) + } else { + f.emitToken(TokenInvalid, ts, len(data)) + } + } + + // We always emit a synthetic EOF token at the end, since it gives the + // parser position information for an "unexpected EOF" diagnostic. + f.emitToken(TokenEOF, len(data), len(data)) + + return f.Tokens +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md new file mode 100644 index 0000000..49b9a3e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md @@ -0,0 +1,923 @@ +# HCL Native Syntax Specification + +This is the specification of the syntax and semantics of the native syntax +for HCL. HCL is a system for defining configuration languages for applications. +The HCL information model is designed to support multiple concrete syntaxes +for configuration, but this native syntax is considered the primary format +and is optimized for human authoring and maintenence, as opposed to machine +generation of configuration. + +The language consists of three integrated sub-languages: + +* The _structural_ language defines the overall heirarchical configuration + structure, and is a serialization of HCL bodies, blocks and attributes. + +* The _expression_ language is used to express attribute values, either as + literals or as derivations of other values. + +* The _template_ language is used to compose values together into strings, + as one of several types of expression in the expression language. + +In normal use these three sub-languages are used together within configuration +files to describe an overall configuration, with the structural language +being used at the top level. The expression and template languages can also +be used in isolation, to implement features such as REPLs, debuggers, and +integration into more limited HCL syntaxes such as the JSON profile. + +## Syntax Notation + +Within this specification a semi-formal notation is used to illustrate the +details of syntax. This notation is intended for human consumption rather +than machine consumption, with the following conventions: + +* A naked name starting with an uppercase letter is a global production, + common to all of the syntax specifications in this document. +* A naked name starting with a lowercase letter is a local production, + meaningful only within the specification where it is defined. +* Double and single quotes (`"` and `'`) are used to mark literal character + sequences, which may be either punctuation markers or keywords. +* The default operator for combining items, which has no punctuation, + is concatenation. +* The symbol `|` indicates that any one of its left and right operands may + be present. +* The `*` symbol indicates zero or more repetitions of the item to its left. +* The `?` symbol indicates zero or one of the item to its left. +* Parentheses (`(` and `)`) are used to group items together to apply + the `|`, `*` and `?` operators to them collectively. + +The grammar notation does not fully describe the language. The prose may +augment or conflict with the illustrated grammar. In case of conflict, prose +has priority. + +## Source Code Representation + +Source code is unicode text expressed in the UTF-8 encoding. The language +itself does not perform unicode normalization, so syntax features such as +identifiers are sequences of unicode code points and so e.g. a precombined +accented character is distinct from a letter associated with a combining +accent. (String literals have some special handling with regard to Unicode +normalization which will be covered later in the relevant section.) + +UTF-8 encoded Unicode byte order marks are not permitted. Invalid or +non-normalized UTF-8 encoding is always a parse error. + +## Lexical Elements + +### Comments and Whitespace + +Comments and Whitespace are recognized as lexical elements but are ignored +except as described below. + +Whitespace is defined as a sequence of zero or more space characters +(U+0020). Newline sequences (either U+000A or U+000D followed by U+000A) +are _not_ considered whitespace but are ignored as such in certain contexts. + +Horizontal tab characters (U+0009) are not considered to be whitespace and +are not valid within HCL native syntax. + +Comments serve as program documentation and come in two forms: + +* _Line comments_ start with either the `//` or `#` sequences and end with + the next newline sequence. A line comments is considered equivalent to a + newline sequence. + +* _Inline comments_ start with the `/*` sequence and end with the `*/` + sequence, and may have any characters within except the ending sequence. + An inline comments is considered equivalent to a whitespace sequence. + +Comments and whitespace cannot begin within within other comments, or within +template literals except inside an interpolation sequence or template directive. + +### Identifiers + +Identifiers name entities such as blocks, attributes and expression variables. +Identifiers are interpreted as per [UAX #31][UAX31] Section 2. Specifically, +their syntax is defined in terms of the `ID_Start` and `ID_Continue` +character properties as follows: + +```ebnf +Identifier = ID_Start (ID_Continue | '-')*; +``` + +The Unicode specification provides the normative requirements for identifier +parsing. Non-normatively, the spirit of this specification is that `ID_Start` +consists of Unicode letter and certain unambiguous punctuation tokens, while +`ID_Continue` augments that set with Unicode digits, combining marks, etc. + +The dash character `-` is additionally allowed in identifiers, even though +that is not part of the unicode `ID_Continue` definition. This is to allow +attribute names and block type names to contain dashes, although underscores +as word separators are considered the idiomatic usage. + +[UAX31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax" + +### Keywords + +There are no globally-reserved words, but in some contexts certain identifiers +are reserved to function as keywords. These are discussed further in the +relevant documentation sections that follow. In such situations, the +identifier's role as a keyword supersedes any other valid interpretation that +may be possible. Outside of these specific situations, the keywords have no +special meaning and are interpreted as regular identifiers. + +### Operators and Delimiters + +The following character sequences represent operators, delimiters, and other +special tokens: + +``` ++ && == < : { [ ( ${ +- || != > ? } ] ) %{ +* ! <= = . +/ >= => , +% ... +``` + +### Numeric Literals + +A numeric literal is a decimal representation of a +real number. It has an integer part, a fractional part, +and an exponent part. + +```ebnf +NumericLit = decimal+ ("." decimal+)? (expmark decimal+)?; +decimal = '0' .. '9'; +expmark = ('e' | 'E') ("+" | "-")?; +``` + +## Structural Elements + +The structural language consists of syntax representing the following +constructs: + +* _Attributes_, which assign a value to a specified name. +* _Blocks_, which create a child body annotated by a type and optional labels. +* _Body Content_, which consists of a collection of attributes and blocks. + +These constructs correspond to the similarly-named concepts in the +language-agnostic HCL information model. + +```ebnf +ConfigFile = Body; +Body = (Attribute | Block)*; +Attribute = Identifier "=" Expression Newline; +Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline; +``` + +### Configuration Files + +A _configuration file_ is a sequence of characters whose top-level is +interpreted as a Body. + +### Bodies + +A _body_ is a collection of associated attributes and blocks. The meaning of +this association is defined by the calling application. + +### Attribute Definitions + +An _attribute definition_ assigns a value to a particular attribute name within +a body. Each distinct attribute name may be defined no more than once within a +single body. + +The attribute value is given as an expression, which is retained literally +for later evaluation by the calling application. + +### Blocks + +A _block_ creates a child body that is annotated with a block _type_ and +zero or more block _labels_. Blocks create a structural heirachy which can be +interpreted by the calling application. + +Block labels can either be quoted literal strings or naked identifiers. + +## Expressions + +The expression sub-language is used within attribute definitions to specify +values. + +```ebnf +Expression = ( + ExprTerm | + Operation | + Conditional +); +``` + +### Types + +The value types used within the expression language are those defined by the +syntax-agnostic HCL information model. An expression may return any valid +type, but only a subset of the available types have first-class syntax. +A calling application may make other types available via _variables_ and +_functions_. + +### Expression Terms + +Expression _terms_ are the operands for unary and binary expressions, as well +as acting as expressions in their own right. + +```ebnf +ExprTerm = ( + LiteralValue | + CollectionValue | + TemplateExpr | + VariableExpr | + FunctionCall | + ForExpr | + ExprTerm Index | + ExprTerm GetAttr | + ExprTerm Splat | + "(" Expression ")" +); +``` + +The productions for these different term types are given in their corresponding +sections. + +Between the `(` and `)` characters denoting a sub-expression, newline +characters are ignored as whitespace. + +### Literal Values + +A _literal value_ immediately represents a particular value of a primitive +type. + +```ebnf +LiteralValue = ( + NumericLit | + "true" | + "false" | + "null" +); +``` + +* Numeric literals represent values of type _number_. +* The `true` and `false` keywords represent values of type _bool_. +* The `null` keyword represents a null value of the dynamic pseudo-type. + +String literals are not directly available in the expression sub-language, but +are available via the template sub-language, which can in turn be incorporated +via _template expressions_. + +### Collection Values + +A _collection value_ combines zero or more other expressions to produce a +collection value. + +```ebnf +CollectionValue = tuple | object; +tuple = "[" ( + (Expression ("," Expression)* ","?)? +) "]"; +object = "{" ( + (objectelem ("," objectelem)* ","?)? +) "}"; +objectelem = (Identifier | Expression) "=" Expression; +``` + +Only tuple and object values can be directly constructed via native syntax. +Tuple and object values can in turn be converted to list, set and map values +with other operations, which behaves as defined by the syntax-agnostic HCL +information model. + +When specifying an object element, an identifier is interpreted as a literal +attribute name as opposed to a variable reference. To populate an item key +from a variable, use parentheses to disambiguate: + +* `{foo = "baz"}` is interpreted as an attribute literally named `foo`. +* `{(foo) = "baz"}` is interpreted as an attribute whose name is taken + from the variable named `foo`. + +Between the open and closing delimiters of these sequences, newline sequences +are ignored as whitespace. + +There is a syntax ambiguity between _for expressions_ and collection values +whose first element is a reference to a variable named `for`. The +_for expression_ interpretation has priority, so to produce a tuple whose +first element is the value of a variable named `for`, or an object with a +key named `for`, use paretheses to disambiguate: + +* `[for, foo, baz]` is a syntax error. +* `[(for), foo, baz]` is a tuple whose first element is the value of variable + `for`. +* `{for: 1, baz: 2}` is a syntax error. +* `{(for): 1, baz: 2}` is an object with an attribute literally named `for`. +* `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the + ambiguity by reordering. + +### Template Expressions + +A _template expression_ embeds a program written in the template sub-language +as an expression. Template expressions come in two forms: + +* A _quoted_ template expression is delimited by quote characters (`"`) and + defines a template as a single-line expression with escape characters. +* A _heredoc_ template expression is introduced by a `<<` sequence and + defines a template via a multi-line sequence terminated by a user-chosen + delimiter. + +In both cases the template interpolation and directive syntax is available for +use within the delimiters, and any text outside of these special sequences is +interpreted as a literal string. + +In _quoted_ template expressions any literal string sequences within the +template behave in a special way: literal newline sequences are not permitted +and instead _escape sequences_ can be included, starting with the +backslash `\`: + +``` + \n Unicode newline control character + \r Unicode carriage return control character + \t Unicode tab control character + \" Literal quote mark, used to prevent interpretation as end of string + \\ Literal backslash, used to prevent interpretation as escape sequence + \uNNNN Unicode character from Basic Multilingual Plane (NNNN is four hexadecimal digits) + \UNNNNNNNN Unicode character from supplementary planes (NNNNNNNN is eight hexadecimal digits) +``` + +The _heredoc_ template expression type is introduced by either `<<` or `<<-`, +followed by an identifier. The template expression ends when the given +identifier subsequently appears again on a line of its own. + +If a heredoc template is introduced with the `<<-` symbol, any literal string +at the start of each line is analyzed to find the minimum number of leading +spaces, and then that number of prefix spaces is removed from all line-leading +literal strings. The final closing marker may also have an arbitrary number +of spaces preceding it on its line. + +```ebnf +TemplateExpr = quotedTemplate | heredocTemplate; +quotedTemplate = (as defined in prose above); +heredocTemplate = ( + ("<<" | "<<-") Identifier Newline + (content as defined in prose above) + Identifier Newline +); +``` + +A quoted template expression containing only a single literal string serves +as a syntax for defining literal string _expressions_. In certain contexts +the template syntax is restricted in this manner: + +```ebnf +StringLit = '"' (quoted literals as defined in prose above) '"'; +``` + +The `StringLit` production permits the escape sequences discussed for quoted +template expressions as above, but does _not_ permit template interpolation +or directive sequences. + +### Variables and Variable Expressions + +A _variable_ is a value that has been assigned a symbolic name. Variables are +made available for use in expressions by the calling application, by populating +the _global scope_ used for expression evaluation. + +Variables can also be created by expressions themselves, which always creates +a _child scope_ that incorporates the variables from its parent scope but +(re-)defines zero or more names with new values. + +The value of a variable is accessed using a _variable expression_, which is +a standalone `Identifier` whose name corresponds to a defined variable: + +```ebnf +VariableExpr = Identifier; +``` + +Variables in a particular scope are immutable, but child scopes may _hide_ +a variable from an ancestor scope by defining a new variable of the same name. +When looking up variables, the most locally-defined variable of the given name +is used, and ancestor-scoped variables of the same name cannot be accessed. + +No direct syntax is provided for declaring or assigning variables, but other +expression constructs implicitly create child scopes and define variables as +part of their evaluation. + +### Functions and Function Calls + +A _function_ is an operation that has been assigned a symbolic name. Functions +are made available for use in expressions by the calling application, by +populating the _function table_ used for expression evaluation. + +The namespace of functions is distinct from the namespace of variables. A +function and a variable may share the same name with no implication that they +are in any way related. + +A function can be executed via a _function call_ expression: + +```ebnf +FunctionCall = Identifier "(" arguments ")"; +Arguments = ( + () || + (Expression ("," Expression)* ("," | "...")?) +); +``` + +The definition of functions and the semantics of calling them are defined by +the language-agnostic HCL information model. The given arguments are mapped +onto the function's _parameters_ and the result of a function call expression +is the return value of the named function when given those arguments. + +If the final argument expression is followed by the ellipsis symbol (`...`), +the final argument expression must evaluate to either a list or tuple value. +The elements of the value are each mapped to a single parameter of the +named function, beginning at the first parameter remaining after all other +argument expressions have been mapped. + +Within the parentheses that delimit the function arguments, newline sequences +are ignored as whitespace. + +### For Expressions + +A _for expression_ is a construct for constructing a collection by projecting +the items from another collection. + +```ebnf +ForExpr = forTupleExpr | forObjectExpr; +forTupleExpr = "[" forIntro Expression forCond? "]"; +forObjectExpr = "{" forIntro Expression "=>" Expression "..."? forCond? "}"; +forIntro = "for" Identifier ("," Identifier)? "in" Expression ":"; +forCond = "if" Expression; +``` + +The punctuation used to delimit a for expression decide whether it will produce +a tuple value (`[` and `]`) or an object value (`{` and `}`). + +The "introduction" is equivalent in both cases: the keyword `for` followed by +either one or two identifiers separated by a comma which define the temporary +variable names used for iteration, followed by the keyword `in` and then +an expression that must evaluate to a value that can be iterated. The +introduction is then terminated by the colon (`:`) symbol. + +If only one identifier is provided, it is the name of a variable that will +be temporarily assigned the value of each element during iteration. If both +are provided, the first is the key and the second is the value. + +Tuple, object, list, map, and set types are iterable. The type of collection +used defines how the key and value variables are populated: + +* For tuple and list types, the _key_ is the zero-based index into the + sequence for each element, and the _value_ is the element value. The + elements are visited in index order. +* For object and map types, the _key_ is the string attribute name or element + key, and the _value_ is the attribute or element value. The elements are + visited in the order defined by a lexicographic sort of the attribute names + or keys. +* For set types, the _key_ and _value_ are both the element value. The elements + are visited in an undefined but consistent order. + +The expression after the colon and (in the case of object `for`) the expression +after the `=>` are both evaluated once for each element of the source +collection, in a local scope that defines the key and value variable names +specified. + +The results of evaluating these expressions for each input element are used +to populate an element in the new collection. In the case of tuple `for`, the +single expression becomes an element, appending values to the tuple in visit +order. In the case of object `for`, the pair of expressions is used as an +attribute name and value respectively, creating an element in the resulting +object. + +In the case of object `for`, it is an error if two input elements produce +the same result from the attribute name expression, since duplicate +attributes are not possible. If the ellipsis symbol (`...`) appears +immediately after the value experssion, this activates the grouping mode in +which each value in the resulting object is a _tuple_ of all of the values +that were produced against each distinct key. + +* `[for v in ["a", "b"]: v]` returns `["a", "b"]`. +* `[for i, v in ["a", "b"]: i]` returns `[0, 1]`. +* `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`. +* `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute + `a` is defined twice. +* `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`. + +If the `if` keyword is used after the element expression(s), it applies an +additional predicate that can be used to conditionally filter elements from +the source collection from consideration. The expression following `if` is +evaluated once for each source element, in the same scope used for the +element expression(s). It must evaluate to a boolean value; if `true`, the +element will be evaluated as normal, while if `false` the element will be +skipped. + +* `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`. + +If the collection value, element expression(s) or condition expression return +unknown values that are otherwise type-valid, the result is a value of the +dynamic pseudo-type. + +### Index Operator + +The _index_ operator returns the value of a single element of a collection +value. It is a postfix operator and can be applied to any value that has +a tuple, object, map, or list type. + +```ebnf +Index = "[" Expression "]"; +``` + +The expression delimited by the brackets is the _key_ by which an element +will be looked up. + +If the index operator is applied to a value of tuple or list type, the +key expression must be an non-negative integer number representing the +zero-based element index to access. If applied to a value of object or map +type, the key expression must be a string representing the attribute name +or element key. If the given key value is not of the appropriate type, a +conversion is attempted using the conversion rules from the HCL +syntax-agnostic information model. + +An error is produced if the given key expression does not correspond to +an element in the collection, either because it is of an unconvertable type, +because it is outside the range of elements for a tuple or list, or because +the given attribute or key does not exist. + +If either the collection or the key are an unknown value of an +otherwise-suitable type, the return value is an unknown value whose type +matches what type would be returned given known values, or a value of the +dynamic pseudo-type if type information alone cannot determine a suitable +return type. + +Within the brackets that delimit the index key, newline sequences are ignored +as whitespace. + +### Attribute Access Operator + +The _attribute access_ operator returns the value of a single attribute in +an object value. It is a postfix operator and can be applied to any value +that has an object type. + +```ebnf +GetAttr = "." Identifier; +``` + +The given identifier is interpreted as the name of the attribute to access. +An error is produced if the object to which the operator is applied does not +have an attribute with the given name. + +If the object is an unknown value of a type that has the attribute named, the +result is an unknown value of the attribute's type. + +### Splat Operators + +The _splat operators_ allow convenient access to attributes or elements of +elements in a tuple, list, or set value. + +There are two kinds of "splat" operator: + +* The _attribute-only_ splat operator supports only attribute lookups into + the elements from a list, but supports an arbitrary number of them. + +* The _full_ splat operator additionally supports indexing into the elements + from a list, and allows any combination of attribute access and index + operations. + +```ebnf +Splat = attrSplat | fullSplat; +attrSplat = "." "*" GetAttr*; +fullSplat = "[" "*" "]" (GetAttr | Index)*; +``` + +The splat operators can be thought of as shorthands for common operations that +could otherwise be performed using _for expressions_: + +* `tuple.*.foo.bar[0]` is approximately equivalent to + `[for v in tuple: v.foo.bar][0]`. +* `tuple[*].foo.bar[0]` is approximately equivalent to + `[for v in tuple: v.foo.bar[0]]` + +Note the difference in how the trailing index operator is interpreted in +each case. This different interpretation is the key difference between the +_attribute-only_ and _full_ splat operators. + +Splat operators have one additional behavior compared to the equivalent +_for expressions_ shown above: if a splat operator is applied to a value that +is _not_ of tuple, list, or set type, the value is coerced automatically into +a single-value list of the value type: + +* `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object` + is a single object. +* `any_number.*` is equivalent to `[any_number]`, assuming that `any_number` + is a single number. + +If the left operand of a splat operator is an unknown value of any type, the +result is a value of the dynamic pseudo-type. + +### Operations + +Operations apply a particular operator to either one or two expression terms. + +```ebnf +Operation = unaryOp | binaryOp; +unaryOp = ("-" | "!") ExprTerm; +binaryOp = ExprTerm binaryOperator ExprTerm; +binaryOperator = compareOperator | arithmeticOperator | logicOperator; +compareOperator = "==" | "!=" | "<" | ">" | "<=" | ">="; +arithmeticOperator = "+" | "-" | "*" | "/" | "%"; +logicOperator = "&&" | "||" | "!"; +``` + +The unary operators have the highest precedence. + +The binary operators are grouped into the following precedence levels: + +``` +Level Operators + 6 * / % + 5 + - + 4 > >= < <= + 3 == != + 2 && + 1 || +``` + +Higher values of "level" bind tighter. Operators within the same precedence +level have left-to-right associativity. For example, `x / y * z` is equivalent +to `(x / y) * z`. + +### Comparison Operators + +Comparison operators always produce boolean values, as a result of testing +the relationship between two values. + +The two equality operators apply to values of any type: + +``` +a == b equal +a != b not equal +``` + +Two values are equal if the are of identical types and their values are +equal as defined in the HCL syntax-agnostic information model. The equality +operators are commutative and opposite, such that `(a == b) == !(a != b)` +and `(a == b) == (b == a)` for all values `a` and `b`. + +The four numeric comparison operators apply only to numbers: + +``` +a < b less than +a <= b less than or equal to +a > b greater than +a >= b greater than or equal to +``` + +If either operand of a comparison operator is a correctly-typed unknown value +or a value of the dynamic pseudo-type, the result is an unknown boolean. + +### Arithmetic Operators + +Arithmetic operators apply only to number values and always produce number +values as results. + +``` +a + b sum (addition) +a - b difference (subtraction) +a * b product (multiplication) +a / b quotient (division) +a % b remainder (modulo) +-a negation +``` + +Arithmetic operations are considered to be performed in an arbitrary-precision +number space. + +If either operand of an arithmetic operator is an unknown number or a value +of the dynamic pseudo-type, the result is an unknown number. + +### Logic Operators + +Logic operators apply only to boolean values and always produce boolean values +as results. + +``` +a && b logical AND +a || b logical OR +!a logical NOT +``` + +If either operand of a logic operator is an unknown bool value or a value +of the dynamic pseudo-type, the result is an unknown bool value. + +### Conditional Operator + +The conditional operator allows selecting from one of two expressions based on +the outcome of a boolean expression. + +```ebnf +Conditional = Expression "?" Expression ":" Expression; +``` + +The first expression is the _predicate_, which is evaluated and must produce +a boolean result. If the predicate value is `true`, the result of the second +expression is the result of the conditional. If the predicate value is +`false`, the result of the third expression is the result of the conditional. + +The second and third expressions must be of the same type or must be able to +unify into a common type using the type unification rules defined in the +HCL syntax-agnostic information model. This unified type is the result type +of the conditional, with both expressions converted as necessary to the +unified type. + +If the predicate is an unknown boolean value or a value of the dynamic +pseudo-type then the result is an unknown value of the unified type of the +other two expressions. + +If either the second or third expressions produce errors when evaluated, +these errors are passed through only if the erroneous expression is selected. +This allows for expressions such as +`length(some_list) > 0 ? some_list[0] : default` (given some suitable `length` +function) without producing an error when the predicate is `false`. + +## Templates + +The template sub-language is used within template expressions to concisely +combine strings and other values to produce other strings. It can also be +used in isolation as a standalone template language. + +```ebnf +Template = ( + TemplateLiteral | + TemplateInterpolation | + TemplateDirective +)* +TemplateDirective = TemplateIf | TemplateFor; +``` + +A template behaves like an expression that always returns a string value. +The different elements of the template are evaluated and combined into a +single string to return. If any of the elements produce an unknown string +or a value of the dynamic pseudo-type, the result is an unknown string. + +An important use-case for standalone templates is to enable the use of +expressions in alternative HCL syntaxes where a native expression grammar is +not available. For example, the HCL JSON profile treats the values of JSON +strings as standalone templates when attributes are evaluated in expression +mode. + +### Template Literals + +A template literal is a literal sequence of characters to include in the +resulting string. When the template sub-language is used standalone, a +template literal can contain any unicode character, with the exception +of the sequences that introduce interpolations and directives, and for the +sequences that escape those introductions. + +The interpolation and directive introductions are escaped by doubling their +leading characters. The `${` sequence is escaped as `$${` and the `%{` +sequence is escaped as `%%{`. + +When the template sub-language is embedded in the expression language via +_template expressions_, additional constraints and transforms are applied to +template literalsas described in the definition of template expressions. + +The value of a template literal can be modified by _strip markers_ in any +interpolations or directives that are adjacent to it. A strip marker is +a tilde (`~`) placed immediately after the opening `{` or before the closing +`}` of a template sequence: + +* `hello ${~ "world" }` produces `"helloworld"`. +* `%{ if true ~} hello %{~ endif }` produces `"hello"`. + +When a strip marker is present, any spaces adjacent to it in the corresponding +string literal (if any) are removed before producing the final value. Space +characters are interpreted as per Unicode's definition. + +Stripping is done at syntax level rather than value level. Values returned +by interpolations or directives are not subject to stripping: + +* `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`, + because the space is not in a template literal directly adjacent to the + strip marker. + +### Template Interpolations + +An _interpolation sequence_ evaluates an expression (written in the +expression sub-language), converts the result to a string value, and +replaces itself with the resulting string. + +```ebnf +TemplateInterpolation = ("${" | "${~") Expression ("}" | "~}"; +``` + +If the expression result cannot be converted to a string, an error is +produced. + +### Template If Directive + +The template `if` directive is the template equivalent of the +_conditional expression_, allowing selection of one of two sub-templates based +on the value of a predicate expression. + +```ebnf +TemplateIf = ( + ("%{" | "%{~") "if" Expression ("}" | "~}") + Template + ( + ("%{" | "%{~") "else" ("}" | "~}") + Template + )? + ("%{" | "%{~") "endif" ("}" | "~}") +); +``` + +The evaluation of the `if` directive is equivalent to the conditional +expression, with the following exceptions: + +* The two sub-templates always produce strings, and thus the result value is + also always a string. +* The `else` clause may be omitted, in which case the conditional's third + expression result is implied to be the empty string. + +### Template For Directive + +The template `for` directive is the template equivalent of the _for expression_, +producing zero or more copies of its sub-template based on the elements of +a collection. + +```ebnf +TemplateFor = ( + ("%{" | "%{~") "for" Identifier ("," Identifier) "in" Expression ("}" | "~}") + Template + ("%{" | "%{~") "endfor" ("}" | "~}") +); +``` + +The evaluation of the `for` directive is equivalent to the _for expression_ +when producing a tuple, with the following exceptions: + +* The sub-template always produces a string. +* There is no equivalent of the "if" clause on the for expression. +* The elements of the resulting tuple are all converted to strings and + concatenated to produce a flat string result. + +### Template Interpolation Unwrapping + +As a special case, a template that consists only of a single interpolation, +with no surrounding literals, directives or other interpolations, is +"unwrapped". In this case, the result of the interpolation expression is +returned verbatim, without conversion to string. + +This special case exists primarily to enable the native template language +to be used inside strings in alternative HCL syntaxes that lack a first-class +template or expression syntax. Unwrapping allows arbitrary expressions to be +used to populate attributes when strings in such languages are interpreted +as templates. + +* `${true}` produces the boolean value `true` +* `${"${true}"}` produces the boolean value `true`, because both the inner + and outer interpolations are subject to unwrapping. +* `hello ${true}` produces the string `"hello true"` +* `${""}${true}` produces the string `"true"` because there are two + interpolation sequences, even though one produces an empty result. +* `%{ for v in [true] }${v}%{ endif }` produces the string `true` because + the presence of the `for` directive circumvents the unwrapping even though + the final result is a single value. + +In some contexts this unwrapping behavior may be circumvented by the calling +application, by converting the final template result to string. This is +necessary, for example, if a standalone template is being used to produce +the direct contents of a file, since the result in that case must always be a +string. + +## Static Analysis + +The HCL static analysis operations are implemented for some expression types +in the native syntax, as described in the following sections. + +A goal for static analysis of the native syntax is for the interpretation to +be as consistent as possible with the dynamic evaluation interpretation of +the given expression, though some deviations are intentionally made in order +to maximize the potential for analysis. + +### Static List + +The tuple construction syntax can be interpreted as a static list. All of +the expression elements given are returned as the static list elements, +with no further interpretation. + +### Static Map + +The object construction syntax can be interpreted as a static map. All of the +key/value pairs given are returned as the static pairs, with no further +interpretation. + +The usual requirement that an attribute name be interpretable as a string +does not apply to this static analyis, allowing callers to provide map-like +constructs with different key types by building on the map syntax. + +### Static Call + +The function call syntax can be interpreted as a static call. The called +function name is returned verbatim and the given argument expressions are +returned as the static arguments, with no further interpretation. + +### Static Traversal + +A variable expression and any attached attribute access operations and +constant index operations can be interpreted as a static traversal. + +The keywords `true`, `false` and `null` can also be interpreted as +static traversals, behaving as if they were references to variables of those +names, to allow callers to redefine the meaning of those keywords in certain +contexts. diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go new file mode 100644 index 0000000..d69f65b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go @@ -0,0 +1,379 @@ +package hclsyntax + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// AsHCLBlock returns the block data expressed as a *hcl.Block. +func (b *Block) AsHCLBlock() *hcl.Block { + lastHeaderRange := b.TypeRange + if len(b.LabelRanges) > 0 { + lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] + } + + return &hcl.Block{ + Type: b.Type, + Labels: b.Labels, + Body: b.Body, + + DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange), + TypeRange: b.TypeRange, + LabelRanges: b.LabelRanges, + } +} + +// Body is the implementation of hcl.Body for the HCL native syntax. +type Body struct { + Attributes Attributes + Blocks Blocks + + // These are used with PartialContent to produce a "remaining items" + // body to return. They are nil on all bodies fresh out of the parser. + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]struct{} + + SrcRange hcl.Range + EndRange hcl.Range // Final token of the body, for reporting missing items +} + +// Assert that *Body implements hcl.Body +var assertBodyImplBody hcl.Body = &Body{} + +func (b *Body) walkChildNodes(w internalWalkFunc) { + b.Attributes = w(b.Attributes).(Attributes) + b.Blocks = w(b.Blocks).(Blocks) +} + +func (b *Body) Range() hcl.Range { + return b.SrcRange +} + +func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, remainHCL, diags := b.PartialContent(schema) + + // No we'll see if anything actually remains, to produce errors about + // extraneous items. + remain := remainHCL.(*Body) + + for name, attr := range b.Attributes { + if _, hidden := remain.hiddenAttrs[name]; !hidden { + var suggestions []string + for _, attrS := range schema.Attributes { + if _, defined := content.Attributes[attrS.Name]; defined { + continue + } + suggestions = append(suggestions, attrS.Name) + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there a block of the same name? + for _, blockS := range schema.Blocks { + if blockS.Type == name { + suggestion = fmt.Sprintf(" Did you mean to define a block of type %q?", name) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("An attribute named %q is not expected here.%s", name, suggestion), + Subject: &attr.NameRange, + }) + } + } + + for _, block := range b.Blocks { + blockTy := block.Type + if _, hidden := remain.hiddenBlocks[blockTy]; !hidden { + var suggestions []string + for _, blockS := range schema.Blocks { + suggestions = append(suggestions, blockS.Type) + } + suggestion := nameSuggestion(blockTy, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there an attribute of the same name? + for _, attrS := range schema.Attributes { + if attrS.Name == blockTy { + suggestion = fmt.Sprintf(" Did you mean to define attribute %q?", blockTy) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected here.%s", blockTy, suggestion), + Subject: &block.TypeRange, + }) + } + } + + return content, diags +} + +func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var blocks hcl.Blocks + var diags hcl.Diagnostics + hiddenAttrs := make(map[string]struct{}) + hiddenBlocks := make(map[string]struct{}) + + if b.hiddenAttrs != nil { + for k, v := range b.hiddenAttrs { + hiddenAttrs[k] = v + } + } + if b.hiddenBlocks != nil { + for k, v := range b.hiddenBlocks { + hiddenBlocks[k] = v + } + } + + for _, attrS := range schema.Attributes { + name := attrS.Name + attr, exists := b.Attributes[name] + _, hidden := hiddenAttrs[name] + if hidden || !exists { + if attrS.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), + Subject: b.MissingItemRange().Ptr(), + }) + } + continue + } + + hiddenAttrs[name] = struct{}{} + attrs[name] = attr.AsHCLAttribute() + } + + blocksWanted := make(map[string]hcl.BlockHeaderSchema) + for _, blockS := range schema.Blocks { + blocksWanted[blockS.Type] = blockS + } + + for _, block := range b.Blocks { + if _, hidden := hiddenBlocks[block.Type]; hidden { + continue + } + blockS, wanted := blocksWanted[block.Type] + if !wanted { + continue + } + + if len(block.Labels) > len(blockS.LabelNames) { + name := block.Type + if len(blockS.LabelNames) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "No labels are expected for %s blocks.", name, + ), + Subject: block.LabelRanges[0].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "Only %d labels (%s) are expected for %s blocks.", + len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), name, + ), + Subject: block.LabelRanges[len(blockS.LabelNames)].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } + continue + } + + if len(block.Labels) < len(blockS.LabelNames) { + name := block.Type + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s for %s", blockS.LabelNames[len(block.Labels)], name), + Detail: fmt.Sprintf( + "All %s blocks must have %d labels (%s).", + name, len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), + ), + Subject: &block.OpenBraceRange, + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + continue + } + + blocks = append(blocks, block.AsHCLBlock()) + } + + // We hide blocks only after we've processed all of them, since otherwise + // we can't process more than one of the same type. + for _, blockS := range schema.Blocks { + hiddenBlocks[blockS.Type] = struct{}{} + } + + remain := &Body{ + Attributes: b.Attributes, + Blocks: b.Blocks, + + hiddenAttrs: hiddenAttrs, + hiddenBlocks: hiddenBlocks, + + SrcRange: b.SrcRange, + EndRange: b.EndRange, + } + + return &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + + MissingItemRange: b.MissingItemRange(), + }, remain, diags +} + +func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var diags hcl.Diagnostics + + if len(b.Blocks) > 0 { + example := b.Blocks[0] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s block", example.Type), + Detail: "Blocks are not allowed here.", + Context: &example.TypeRange, + }) + // we will continue processing anyway, and return the attributes + // we are able to find so that certain analyses can still be done + // in the face of errors. + } + + if b.Attributes == nil { + return attrs, diags + } + + for name, attr := range b.Attributes { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + attrs[name] = attr.AsHCLAttribute() + } + + return attrs, diags +} + +func (b *Body) MissingItemRange() hcl.Range { + return b.EndRange +} + +// Attributes is the collection of attribute definitions within a body. +type Attributes map[string]*Attribute + +func (a Attributes) walkChildNodes(w internalWalkFunc) { + for k, attr := range a { + a[k] = w(attr).(*Attribute) + } +} + +// Range returns the range of some arbitrary point within the set of +// attributes, or an invalid range if there are no attributes. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (a Attributes) Range() hcl.Range { + // An attributes doesn't really have a useful range to report, since + // it's just a grouping construct. So we'll arbitrarily take the + // range of one of the attributes, or produce an invalid range if we have + // none. In practice, there's little reason to ask for the range of + // an Attributes. + for _, attr := range a { + return attr.Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Attribute represents a single attribute definition within a body. +type Attribute struct { + Name string + Expr Expression + + SrcRange hcl.Range + NameRange hcl.Range + EqualsRange hcl.Range +} + +func (a *Attribute) walkChildNodes(w internalWalkFunc) { + a.Expr = w(a.Expr).(Expression) +} + +func (a *Attribute) Range() hcl.Range { + return a.SrcRange +} + +// AsHCLAttribute returns the block data expressed as a *hcl.Attribute. +func (a *Attribute) AsHCLAttribute() *hcl.Attribute { + return &hcl.Attribute{ + Name: a.Name, + Expr: a.Expr, + + Range: a.SrcRange, + NameRange: a.NameRange, + } +} + +// Blocks is the list of nested blocks within a body. +type Blocks []*Block + +func (bs Blocks) walkChildNodes(w internalWalkFunc) { + for i, block := range bs { + bs[i] = w(block).(*Block) + } +} + +// Range returns the range of some arbitrary point within the list of +// blocks, or an invalid range if there are no blocks. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (bs Blocks) Range() hcl.Range { + if len(bs) > 0 { + return bs[0].Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Block represents a nested block structure +type Block struct { + Type string + Labels []string + Body *Body + + TypeRange hcl.Range + LabelRanges []hcl.Range + OpenBraceRange hcl.Range + CloseBraceRange hcl.Range +} + +func (b *Block) walkChildNodes(w internalWalkFunc) { + b.Body = w(b.Body).(*Body) +} + +func (b *Block) Range() hcl.Range { + return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go new file mode 100644 index 0000000..bcaa15f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go @@ -0,0 +1,272 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" +) + +// Token represents a sequence of bytes from some HCL code that has been +// tagged with a type and its range within the source file. +type Token struct { + Type TokenType + Bytes []byte + Range hcl.Range +} + +// Tokens is a slice of Token. +type Tokens []Token + +// TokenType is an enumeration used for the Type field on Token. +type TokenType rune + +const ( + // Single-character tokens are represented by their own character, for + // convenience in producing these within the scanner. However, the values + // are otherwise arbitrary and just intended to be mnemonic for humans + // who might see them in debug output. + + TokenOBrace TokenType = '{' + TokenCBrace TokenType = '}' + TokenOBrack TokenType = '[' + TokenCBrack TokenType = ']' + TokenOParen TokenType = '(' + TokenCParen TokenType = ')' + TokenOQuote TokenType = '«' + TokenCQuote TokenType = '»' + TokenOHeredoc TokenType = 'H' + TokenCHeredoc TokenType = 'h' + + TokenStar TokenType = '*' + TokenSlash TokenType = '/' + TokenPlus TokenType = '+' + TokenMinus TokenType = '-' + TokenPercent TokenType = '%' + + TokenEqual TokenType = '=' + TokenEqualOp TokenType = '≔' + TokenNotEqual TokenType = '≠' + TokenLessThan TokenType = '<' + TokenLessThanEq TokenType = '≤' + TokenGreaterThan TokenType = '>' + TokenGreaterThanEq TokenType = '≥' + + TokenAnd TokenType = '∧' + TokenOr TokenType = '∨' + TokenBang TokenType = '!' + + TokenDot TokenType = '.' + TokenComma TokenType = ',' + + TokenEllipsis TokenType = '…' + TokenFatArrow TokenType = '⇒' + + TokenQuestion TokenType = '?' + TokenColon TokenType = ':' + + TokenTemplateInterp TokenType = '∫' + TokenTemplateControl TokenType = 'λ' + TokenTemplateSeqEnd TokenType = '∎' + + TokenQuotedLit TokenType = 'Q' // might contain backslash escapes + TokenStringLit TokenType = 'S' // cannot contain backslash escapes + TokenNumberLit TokenType = 'N' + TokenIdent TokenType = 'I' + + TokenComment TokenType = 'C' + + TokenNewline TokenType = '\n' + TokenEOF TokenType = '␄' + + // The rest are not used in the language but recognized by the scanner so + // we can generate good diagnostics in the parser when users try to write + // things that might work in other languages they are familiar with, or + // simply make incorrect assumptions about the HCL language. + + TokenBitwiseAnd TokenType = '&' + TokenBitwiseOr TokenType = '|' + TokenBitwiseNot TokenType = '~' + TokenBitwiseXor TokenType = '^' + TokenStarStar TokenType = '➚' + TokenBacktick TokenType = '`' + TokenSemicolon TokenType = ';' + TokenTabs TokenType = '␉' + TokenInvalid TokenType = '�' + TokenBadUTF8 TokenType = '💩' + + // TokenNil is a placeholder for when a token is required but none is + // available, e.g. when reporting errors. The scanner will never produce + // this as part of a token stream. + TokenNil TokenType = '\x00' +) + +func (t TokenType) GoString() string { + return fmt.Sprintf("hclsyntax.%s", t.String()) +} + +type scanMode int + +const ( + scanNormal scanMode = iota + scanTemplate + scanIdentOnly +) + +type tokenAccum struct { + Filename string + Bytes []byte + Pos hcl.Pos + Tokens []Token +} + +func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) { + // Walk through our buffer to figure out how much we need to adjust + // the start pos to get our end pos. + + start := f.Pos + start.Column += startOfs - f.Pos.Byte // Safe because only ASCII spaces can be in the offset + start.Byte = startOfs + + end := start + end.Byte = endOfs + b := f.Bytes[startOfs:endOfs] + for len(b) > 0 { + advance, seq, _ := textseg.ScanGraphemeClusters(b, true) + if (len(seq) == 1 && seq[0] == '\n') || (len(seq) == 2 && seq[0] == '\r' && seq[1] == '\n') { + end.Line++ + end.Column = 1 + } else { + end.Column++ + } + b = b[advance:] + } + + f.Pos = end + + f.Tokens = append(f.Tokens, Token{ + Type: ty, + Bytes: f.Bytes[startOfs:endOfs], + Range: hcl.Range{ + Filename: f.Filename, + Start: start, + End: end, + }, + }) +} + +type heredocInProgress struct { + Marker []byte + StartOfLine bool +} + +// checkInvalidTokens does a simple pass across the given tokens and generates +// diagnostics for tokens that should _never_ appear in HCL source. This +// is intended to avoid the need for the parser to have special support +// for them all over. +// +// Returns a diagnostics with no errors if everything seems acceptable. +// Otherwise, returns zero or more error diagnostics, though tries to limit +// repetition of the same information. +func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { + var diags hcl.Diagnostics + + toldBitwise := 0 + toldExponent := 0 + toldBacktick := 0 + toldSemicolon := 0 + toldTabs := 0 + toldBadUTF8 := 0 + + for _, tok := range tokens { + switch tok.Type { + case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot: + if toldBitwise < 4 { + var suggestion string + switch tok.Type { + case TokenBitwiseAnd: + suggestion = " Did you mean boolean AND (\"&&\")?" + case TokenBitwiseOr: + suggestion = " Did you mean boolean OR (\"&&\")?" + case TokenBitwiseNot: + suggestion = " Did you mean boolean NOT (\"!\")?" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion), + Subject: &tok.Range, + }) + toldBitwise++ + } + case TokenStarStar: + if toldExponent < 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.", + Subject: &tok.Range, + }) + + toldExponent++ + } + case TokenBacktick: + // Only report for alternating (even) backticks, so we won't report both start and ends of the same + // backtick-quoted string. + if toldExponent < 4 && (toldExponent%2) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"< +# +# This script uses the unicode spec to generate a Ragel state machine +# that recognizes unicode alphanumeric characters. It generates 5 +# character classes: uupper, ulower, ualpha, udigit, and ualnum. +# Currently supported encodings are UTF-8 [default] and UCS-4. +# +# Usage: unicode2ragel.rb [options] +# -e, --encoding [ucs4 | utf8] Data encoding +# -h, --help Show this message +# +# This script was originally written as part of the Ferret search +# engine library. +# +# Author: Rakan El-Khalil + +require 'optparse' +require 'open-uri' + +ENCODINGS = [ :utf8, :ucs4 ] +ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } +DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" +DEFAULT_MACHINE_NAME= "WChar" + +### +# Display vars & default option + +TOTAL_WIDTH = 80 +RANGE_WIDTH = 23 +@encoding = :utf8 +@chart_url = DEFAULT_CHART_URL +machine_name = DEFAULT_MACHINE_NAME +properties = [] +@output = $stdout + +### +# Option parsing + +cli_opts = OptionParser.new do |opts| + opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| + @encoding = o.downcase.to_sym + end + opts.on("-h", "--help", "Show this message") do + puts opts + exit + end + opts.on("-u", "--url URL", "URL to process") do |o| + @chart_url = o + end + opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| + machine_name = o + end + opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| + properties = o + end + opts.on("-o", "--output FILE", "output file") do |o| + @output = File.new(o, "w+") + end +end + +cli_opts.parse(ARGV) +unless ENCODINGS.member? @encoding + puts "Invalid encoding: #{@encoding}" + puts cli_opts + exit +end + +## +# Downloads the document at url and yields every alpha line's hex +# range and description. + +def each_alpha( url, property ) + open( url ) do |file| + file.each_line do |line| + next if line =~ /^#/; + next if line !~ /; #{property} #/; + + range, description = line.split(/;/) + range.strip! + description.gsub!(/.*#/, '').strip! + + if range =~ /\.\./ + start, stop = range.split '..' + else start = stop = range + end + + yield start.hex .. stop.hex, description + end + end +end + +### +# Formats to hex at minimum width + +def to_hex( n ) + r = "%0X" % n + r = "0#{r}" unless (r.length % 2).zero? + r +end + +### +# UCS4 is just a straight hex conversion of the unicode codepoint. + +def to_ucs4( range ) + rangestr = "0x" + to_hex(range.begin) + rangestr << "..0x" + to_hex(range.end) if range.begin != range.end + [ rangestr ] +end + +## +# 0x00 - 0x7f -> 0zzzzzzz[7] +# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] +# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] +# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] + +UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] + +def to_utf8_enc( n ) + r = 0 + if n <= 0x7f + r = n + elsif n <= 0x7ff + y = 0xc0 | (n >> 6) + z = 0x80 | (n & 0x3f) + r = y << 8 | z + elsif n <= 0xffff + x = 0xe0 | (n >> 12) + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = x << 16 | y << 8 | z + elsif n <= 0x10ffff + w = 0xf0 | (n >> 18) + x = 0x80 | (n >> 12) & 0x3f + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = w << 24 | x << 16 | y << 8 | z + end + + to_hex(r) +end + +def from_utf8_enc( n ) + n = n.hex + r = 0 + if n <= 0x7f + r = n + elsif n <= 0xdfff + y = (n >> 8) & 0x1f + z = n & 0x3f + r = y << 6 | z + elsif n <= 0xefffff + x = (n >> 16) & 0x0f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = x << 10 | y << 6 | z + elsif n <= 0xf7ffffff + w = (n >> 24) & 0x07 + x = (n >> 16) & 0x3f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = w << 18 | x << 12 | y << 6 | z + end + r +end + +### +# Given a range, splits it up into ranges that can be continuously +# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] +# This is not strictly needed since the current [5.1] unicode standard +# doesn't have ranges that straddle utf8 boundaries. This is included +# for completeness as there is no telling if that will ever change. + +def utf8_ranges( range ) + ranges = [] + UTF8_BOUNDARIES.each do |max| + if range.begin <= max + if range.end <= max + ranges << range + return ranges + end + + ranges << (range.begin .. max) + range = (max + 1) .. range.end + end + end + ranges +end + +def build_range( start, stop ) + size = start.size/2 + left = size - 1 + return [""] if size < 1 + + a = start[0..1] + b = stop[0..1] + + ### + # Shared prefix + + if a == b + return build_range(start[2..-1], stop[2..-1]).map do |elt| + "0x#{a} " + elt + end + end + + ### + # Unshared prefix, end of run + + return ["0x#{a}..0x#{b} "] if left.zero? + + ### + # Unshared prefix, not end of run + # Range can be 0x123456..0x56789A + # Which is equivalent to: + # 0x123456 .. 0x12FFFF + # 0x130000 .. 0x55FFFF + # 0x560000 .. 0x56789A + + ret = [] + ret << build_range(start, a + "FF" * left) + + ### + # Only generate middle range if need be. + + if a.hex+1 != b.hex + max = to_hex(b.hex - 1) + max = "FF" if b == "FF" + ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left + end + + ### + # Don't generate last range if it is covered by first range + + ret << build_range(b + "00" * left, stop) unless b == "FF" + ret.flatten! +end + +def to_utf8( range ) + utf8_ranges( range ).map do |r| + begin_enc = to_utf8_enc(r.begin) + end_enc = to_utf8_enc(r.end) + build_range begin_enc, end_enc + end.flatten! +end + +## +# Perform a 3-way comparison of the number of codepoints advertised by +# the unicode spec for the given range, the originally parsed range, +# and the resulting utf8 encoded range. + +def count_codepoints( code ) + code.split(' ').inject(1) do |acc, elt| + if elt =~ /0x(.+)\.\.0x(.+)/ + if @encoding == :utf8 + acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) + else + acc * ($2.hex - $1.hex + 1) + end + else + acc + end + end +end + +def is_valid?( range, desc, codes ) + spec_count = 1 + spec_count = $1.to_i if desc =~ /\[(\d+)\]/ + range_count = range.end - range.begin + 1 + + sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } + sum == spec_count and sum == range_count +end + +## +# Generate the state maching to stdout + +def generate_machine( name, property ) + pipe = " " + @output.puts " #{name} = " + each_alpha( @chart_url, property ) do |range, desc| + + codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) + + #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless + # is_valid? range, desc, codes + + range_width = codes.map { |a| a.size }.max + range_width = RANGE_WIDTH if range_width < RANGE_WIDTH + + desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 + desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH + + if desc.size > desc_width + desc = desc[0..desc_width - 4] + "..." + end + + codes.each_with_index do |r, idx| + desc = "" unless idx.zero? + code = "%-#{range_width}s" % r + @output.puts " #{pipe} #{code} ##{desc}" + pipe = "|" + end + end + @output.puts " ;" + @output.puts "" +end + +@output.puts < 0 && ret[0] == '.' { + ret = ret[1:] + } + return ret +} + +func navigationStepsRev(v node, offset int) []string { + switch tv := v.(type) { + case *objectVal: + // Do any of our properties have an object that contains the target + // offset? + for _, attr := range tv.Attrs { + k := attr.Name + av := attr.Value + + switch av.(type) { + case *objectVal, *arrayVal: + // okay + default: + continue + } + + if av.Range().ContainsOffset(offset) { + return append(navigationStepsRev(av, offset), "."+k) + } + } + case *arrayVal: + // Do any of our elements contain the target offset? + for i, elem := range tv.Values { + + switch elem.(type) { + case *objectVal, *arrayVal: + // okay + default: + continue + } + + if elem.Range().ContainsOffset(offset) { + return append(navigationStepsRev(elem, offset), fmt.Sprintf("[%d]", i)) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go new file mode 100644 index 0000000..246fd1c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go @@ -0,0 +1,491 @@ +package json + +import ( + "encoding/json" + "fmt" + "math/big" + + "github.com/hashicorp/hcl2/hcl" +) + +func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) { + tokens := scan(buf, pos{ + Filename: filename, + Pos: hcl.Pos{ + Byte: 0, + Line: 1, + Column: 1, + }, + }) + p := newPeeker(tokens) + node, diags := parseValue(p) + if len(diags) == 0 && p.Peek().Type != tokenEOF { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous data after value", + Detail: "Extra characters appear after the JSON value.", + Subject: p.Peek().Range.Ptr(), + }) + } + return node, diags +} + +func parseValue(p *peeker) (node, hcl.Diagnostics) { + tok := p.Peek() + + wrapInvalid := func(n node, diags hcl.Diagnostics) (node, hcl.Diagnostics) { + if n != nil { + return n, diags + } + return invalidVal{tok.Range}, diags + } + + switch tok.Type { + case tokenBraceO: + return wrapInvalid(parseObject(p)) + case tokenBrackO: + return wrapInvalid(parseArray(p)) + case tokenNumber: + return wrapInvalid(parseNumber(p)) + case tokenString: + return wrapInvalid(parseString(p)) + case tokenKeyword: + return wrapInvalid(parseKeyword(p)) + case tokenBraceC: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing attribute value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + case tokenBrackC: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing array element value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + case tokenEOF: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing value", + Detail: "The JSON data ends prematurely.", + Subject: &tok.Range, + }, + }) + default: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid start of value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + } +} + +func tokenCanStartValue(tok token) bool { + switch tok.Type { + case tokenBraceO, tokenBrackO, tokenNumber, tokenString, tokenKeyword: + return true + default: + return false + } +} + +func parseObject(p *peeker) (node, hcl.Diagnostics) { + var diags hcl.Diagnostics + + open := p.Read() + attrs := []*objectAttr{} + + // recover is used to shift the peeker to what seems to be the end of + // our object, so that when we encounter an error we leave the peeker + // at a reasonable point in the token stream to continue parsing. + recover := func(tok token) { + open := 1 + for { + switch tok.Type { + case tokenBraceO: + open++ + case tokenBraceC: + open-- + if open <= 1 { + return + } + case tokenEOF: + // Ran out of source before we were able to recover, + // so we'll bail here and let the caller deal with it. + return + } + tok = p.Read() + } + } + +Token: + for { + if p.Peek().Type == tokenBraceC { + break Token + } + + keyNode, keyDiags := parseValue(p) + diags = diags.Extend(keyDiags) + if keyNode == nil { + return nil, diags + } + + keyStrNode, ok := keyNode.(*stringVal) + if !ok { + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object attribute name", + Detail: "A JSON object attribute name must be a string", + Subject: keyNode.StartRange().Ptr(), + }) + } + + key := keyStrNode.Value + + colon := p.Read() + if colon.Type != tokenColon { + recover(colon) + + if colon.Type == tokenBraceC || colon.Type == tokenComma { + // Catch common mistake of using braces instead of brackets + // for an object. + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing object value", + Detail: "A JSON object attribute must have a value, introduced by a colon.", + Subject: &colon.Range, + }) + } + + if colon.Type == tokenEquals { + // Possible confusion with native HCL syntax. + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute value colon", + Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.", + Subject: &colon.Range, + }) + } + + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute value colon", + Detail: "A colon must appear between an object attribute's name and its value.", + Subject: &colon.Range, + }) + } + + valNode, valDiags := parseValue(p) + diags = diags.Extend(valDiags) + if valNode == nil { + return nil, diags + } + + attrs = append(attrs, &objectAttr{ + Name: key, + Value: valNode, + NameRange: keyStrNode.SrcRange, + }) + + switch p.Peek().Type { + case tokenComma: + comma := p.Read() + if p.Peek().Type == tokenBraceC { + // Special error message for this common mistake + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Trailing comma in object", + Detail: "JSON does not permit a trailing comma after the final attribute in an object.", + Subject: &comma.Range, + }) + } + continue Token + case tokenEOF: + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed object", + Detail: "No closing brace was found for this JSON object.", + Subject: &open.Range, + }) + case tokenBrackC: + // Consume the bracket anyway, so that we don't return with the peeker + // at a strange place. + p.Read() + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Mismatched braces", + Detail: "A JSON object must be closed with a brace, not a bracket.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenBraceC: + break Token + default: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute seperator comma", + Detail: "A comma must appear between each attribute declaration in an object.", + Subject: p.Peek().Range.Ptr(), + }) + } + + } + + close := p.Read() + return &objectVal{ + Attrs: attrs, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + CloseRange: close.Range, + }, diags +} + +func parseArray(p *peeker) (node, hcl.Diagnostics) { + var diags hcl.Diagnostics + + open := p.Read() + vals := []node{} + + // recover is used to shift the peeker to what seems to be the end of + // our array, so that when we encounter an error we leave the peeker + // at a reasonable point in the token stream to continue parsing. + recover := func(tok token) { + open := 1 + for { + switch tok.Type { + case tokenBrackO: + open++ + case tokenBrackC: + open-- + if open <= 1 { + return + } + case tokenEOF: + // Ran out of source before we were able to recover, + // so we'll bail here and let the caller deal with it. + return + } + tok = p.Read() + } + } + +Token: + for { + if p.Peek().Type == tokenBrackC { + break Token + } + + valNode, valDiags := parseValue(p) + diags = diags.Extend(valDiags) + if valNode == nil { + return nil, diags + } + + vals = append(vals, valNode) + + switch p.Peek().Type { + case tokenComma: + comma := p.Read() + if p.Peek().Type == tokenBrackC { + // Special error message for this common mistake + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Trailing comma in array", + Detail: "JSON does not permit a trailing comma after the final attribute in an array.", + Subject: &comma.Range, + }) + } + continue Token + case tokenColon: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid array value", + Detail: "A colon is not used to introduce values in a JSON array.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenEOF: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed object", + Detail: "No closing bracket was found for this JSON array.", + Subject: &open.Range, + }) + case tokenBraceC: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Mismatched brackets", + Detail: "A JSON array must be closed with a bracket, not a brace.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenBrackC: + break Token + default: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute seperator comma", + Detail: "A comma must appear between each value in an array.", + Subject: p.Peek().Range.Ptr(), + }) + } + + } + + close := p.Read() + return &arrayVal{ + Values: vals, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func parseNumber(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + + // Use encoding/json to validate the number syntax. + // TODO: Do this more directly to produce better diagnostics. + var num json.Number + err := json.Unmarshal(tok.Bytes, &num) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON number", + Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), + Subject: &tok.Range, + }, + } + } + + f, _, err := big.ParseFloat(string(num), 10, 512, big.ToNearestEven) + if err != nil { + // Should never happen if above passed, since JSON numbers are a subset + // of what big.Float can parse... + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON number", + Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), + Subject: &tok.Range, + }, + } + } + + return &numberVal{ + Value: f, + SrcRange: tok.Range, + }, nil +} + +func parseString(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + var str string + err := json.Unmarshal(tok.Bytes, &str) + + if err != nil { + var errRange hcl.Range + if serr, ok := err.(*json.SyntaxError); ok { + errOfs := serr.Offset + errPos := tok.Range.Start + errPos.Byte += int(errOfs) + + // TODO: Use the byte offset to properly count unicode + // characters for the column, and mark the whole of the + // character that was wrong as part of our range. + errPos.Column += int(errOfs) + + errEndPos := errPos + errEndPos.Byte++ + errEndPos.Column++ + + errRange = hcl.Range{ + Filename: tok.Range.Filename, + Start: errPos, + End: errEndPos, + } + } else { + errRange = tok.Range + } + + var contextRange *hcl.Range + if errRange != tok.Range { + contextRange = &tok.Range + } + + // FIXME: Eventually we should parse strings directly here so + // we can produce a more useful error message in the face fo things + // such as invalid escapes, etc. + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON string", + Detail: fmt.Sprintf("There is a syntax error in the given JSON string."), + Subject: &errRange, + Context: contextRange, + }, + } + } + + return &stringVal{ + Value: str, + SrcRange: tok.Range, + }, nil +} + +func parseKeyword(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + s := string(tok.Bytes) + + switch s { + case "true": + return &booleanVal{ + Value: true, + SrcRange: tok.Range, + }, nil + case "false": + return &booleanVal{ + Value: false, + SrcRange: tok.Range, + }, nil + case "null": + return &nullVal{ + SrcRange: tok.Range, + }, nil + case "undefined", "NaN", "Infinity": + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON keyword", + Detail: fmt.Sprintf("The JavaScript identifier %q cannot be used in JSON.", s), + Subject: &tok.Range, + }, + } + default: + var dym string + if suggest := keywordSuggestion(s); suggest != "" { + dym = fmt.Sprintf(" Did you mean %q?", suggest) + } + + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON keyword", + Detail: fmt.Sprintf("%q is not a valid JSON keyword.%s", s, dym), + Subject: &tok.Range, + }, + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go b/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go new file mode 100644 index 0000000..fc7bbf5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go @@ -0,0 +1,25 @@ +package json + +type peeker struct { + tokens []token + pos int +} + +func newPeeker(tokens []token) *peeker { + return &peeker{ + tokens: tokens, + pos: 0, + } +} + +func (p *peeker) Peek() token { + return p.tokens[p.pos] +} + +func (p *peeker) Read() token { + ret := p.tokens[p.pos] + if ret.Type != tokenEOF { + p.pos++ + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/public.go b/vendor/github.com/hashicorp/hcl2/hcl/json/public.go new file mode 100644 index 0000000..2728aa1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/public.go @@ -0,0 +1,94 @@ +package json + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/hcl2/hcl" +) + +// Parse attempts to parse the given buffer as JSON and, if successful, returns +// a hcl.File for the HCL configuration represented by it. +// +// This is not a generic JSON parser. Instead, it deals only with the profile +// of JSON used to express HCL configuration. +// +// The returned file is valid only if the returned diagnostics returns false +// from its HasErrors method. If HasErrors returns true, the file represents +// the subset of data that was able to be parsed, which may be none. +func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + rootNode, diags := parseFileContent(src, filename) + + switch rootNode.(type) { + case *objectVal, *arrayVal: + // okay + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Root value must be object", + Detail: "The root value in a JSON-based configuration must be either a JSON object or a JSON array of objects.", + Subject: rootNode.StartRange().Ptr(), + }) + + // Since we've already produced an error message for this being + // invalid, we'll return an empty placeholder here so that trying to + // extract content from our root body won't produce a redundant + // error saying the same thing again in more general terms. + fakePos := hcl.Pos{ + Byte: 0, + Line: 1, + Column: 1, + } + fakeRange := hcl.Range{ + Filename: filename, + Start: fakePos, + End: fakePos, + } + rootNode = &objectVal{ + Attrs: []*objectAttr{}, + SrcRange: fakeRange, + OpenRange: fakeRange, + } + } + + file := &hcl.File{ + Body: &body{ + val: rootNode, + }, + Bytes: src, + Nav: navigation{rootNode}, + } + return file, diags +} + +// ParseFile is a convenience wrapper around Parse that first attempts to load +// data from the given filename, passing the result to Parse if successful. +// +// If the file cannot be read, an error diagnostic with nil context is returned. +func ParseFile(filename string) (*hcl.File, hcl.Diagnostics) { + f, err := os.Open(filename) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to open file", + Detail: fmt.Sprintf("The file %q could not be opened.", filename), + }, + } + } + defer f.Close() + + src, err := ioutil.ReadAll(f) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The file %q was opened, but an error occured while reading it.", filename), + }, + } + } + + return Parse(src, filename) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go b/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go new file mode 100644 index 0000000..0a8378b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go @@ -0,0 +1,293 @@ +package json + +import ( + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" +) + +//go:generate stringer -type tokenType scanner.go +type tokenType rune + +const ( + tokenBraceO tokenType = '{' + tokenBraceC tokenType = '}' + tokenBrackO tokenType = '[' + tokenBrackC tokenType = ']' + tokenComma tokenType = ',' + tokenColon tokenType = ':' + tokenKeyword tokenType = 'K' + tokenString tokenType = 'S' + tokenNumber tokenType = 'N' + tokenEOF tokenType = '␄' + tokenInvalid tokenType = 0 + tokenEquals tokenType = '=' // used only for reminding the user of JSON syntax +) + +type token struct { + Type tokenType + Bytes []byte + Range hcl.Range +} + +// scan returns the primary tokens for the given JSON buffer in sequence. +// +// The responsibility of this pass is to just mark the slices of the buffer +// as being of various types. It is lax in how it interprets the multi-byte +// token types keyword, string and number, preferring to capture erroneous +// extra bytes that we presume the user intended to be part of the token +// so that we can generate more helpful diagnostics in the parser. +func scan(buf []byte, start pos) []token { + var tokens []token + p := start + for { + if len(buf) == 0 { + tokens = append(tokens, token{ + Type: tokenEOF, + Bytes: nil, + Range: posRange(p, p), + }) + return tokens + } + + buf, p = skipWhitespace(buf, p) + + if len(buf) == 0 { + tokens = append(tokens, token{ + Type: tokenEOF, + Bytes: nil, + Range: posRange(p, p), + }) + return tokens + } + + start = p + + first := buf[0] + switch { + case first == '{' || first == '}' || first == '[' || first == ']' || first == ',' || first == ':' || first == '=': + p.Pos.Column++ + p.Pos.Byte++ + tokens = append(tokens, token{ + Type: tokenType(first), + Bytes: buf[0:1], + Range: posRange(start, p), + }) + buf = buf[1:] + case first == '"': + var tokBuf []byte + tokBuf, buf, p = scanString(buf, p) + tokens = append(tokens, token{ + Type: tokenString, + Bytes: tokBuf, + Range: posRange(start, p), + }) + case byteCanStartNumber(first): + var tokBuf []byte + tokBuf, buf, p = scanNumber(buf, p) + tokens = append(tokens, token{ + Type: tokenNumber, + Bytes: tokBuf, + Range: posRange(start, p), + }) + case byteCanStartKeyword(first): + var tokBuf []byte + tokBuf, buf, p = scanKeyword(buf, p) + tokens = append(tokens, token{ + Type: tokenKeyword, + Bytes: tokBuf, + Range: posRange(start, p), + }) + default: + tokens = append(tokens, token{ + Type: tokenInvalid, + Bytes: buf[:1], + Range: start.Range(1, 1), + }) + // If we've encountered an invalid then we might as well stop + // scanning since the parser won't proceed beyond this point. + return tokens + } + } +} + +func byteCanStartNumber(b byte) bool { + switch b { + // We are slightly more tolerant than JSON requires here since we + // expect the parser will make a stricter interpretation of the + // number bytes, but we specifically don't allow 'e' or 'E' here + // since we want the scanner to treat that as the start of an + // invalid keyword instead, to produce more intelligible error messages. + case '-', '+', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + default: + return false + } +} + +func scanNumber(buf []byte, start pos) ([]byte, []byte, pos) { + // The scanner doesn't check that the sequence of digit-ish bytes is + // in a valid order. The parser must do this when decoding a number + // token. + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + switch buf[i] { + case '-', '+', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + p.Pos.Byte++ + p.Pos.Column++ + default: + break Byte + } + } + return buf[:i], buf[i:], p +} + +func byteCanStartKeyword(b byte) bool { + switch { + // We allow any sequence of alphabetical characters here, even though + // JSON is more constrained, so that we can collect what we presume + // the user intended to be a single keyword and then check its validity + // in the parser, where we can generate better diagnostics. + // So e.g. we want to be able to say: + // unrecognized keyword "True". Did you mean "true"? + case b >= 'a' || b <= 'z' || b >= 'A' || b <= 'Z': + return true + default: + return false + } +} + +func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + b := buf[i] + switch { + case (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_': + p.Pos.Byte++ + p.Pos.Column++ + default: + break Byte + } + } + return buf[:i], buf[i:], p +} + +func scanString(buf []byte, start pos) ([]byte, []byte, pos) { + // The scanner doesn't validate correct use of escapes, etc. It pays + // attention to escapes only for the purpose of identifying the closing + // quote character. It's the parser's responsibility to do proper + // validation. + // + // The scanner also doesn't specifically detect unterminated string + // literals, though they can be identified in the parser by checking if + // the final byte in a string token is the double-quote character. + + // Skip the opening quote symbol + i := 1 + p := start + p.Pos.Byte++ + p.Pos.Column++ + escaping := false +Byte: + for i < len(buf) { + b := buf[i] + + switch { + case b == '\\': + escaping = !escaping + p.Pos.Byte++ + p.Pos.Column++ + i++ + case b == '"': + p.Pos.Byte++ + p.Pos.Column++ + i++ + if !escaping { + break Byte + } + escaping = false + case b < 32: + break Byte + default: + // Advance by one grapheme cluster, so that we consider each + // grapheme to be a "column". + // Ignoring error because this scanner cannot produce errors. + advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true) + + p.Pos.Byte += advance + p.Pos.Column++ + i += advance + + escaping = false + } + } + return buf[:i], buf[i:], p +} + +func skipWhitespace(buf []byte, start pos) ([]byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + switch buf[i] { + case ' ': + p.Pos.Byte++ + p.Pos.Column++ + case '\n': + p.Pos.Byte++ + p.Pos.Column = 1 + p.Pos.Line++ + case '\r': + // For the purpose of line/column counting we consider a + // carriage return to take up no space, assuming that it will + // be paired up with a newline (on Windows, for example) that + // will account for both of them. + p.Pos.Byte++ + case '\t': + // We arbitrarily count a tab as if it were two spaces, because + // we need to choose _some_ number here. This means any system + // that renders code on-screen with markers must itself treat + // tabs as a pair of spaces for rendering purposes, or instead + // use the byte offset and back into its own column position. + p.Pos.Byte++ + p.Pos.Column += 2 + default: + break Byte + } + } + return buf[i:], p +} + +type pos struct { + Filename string + Pos hcl.Pos +} + +func (p *pos) Range(byteLen, charLen int) hcl.Range { + start := p.Pos + end := p.Pos + end.Byte += byteLen + end.Column += charLen + return hcl.Range{ + Filename: p.Filename, + Start: start, + End: end, + } +} + +func posRange(start, end pos) hcl.Range { + return hcl.Range{ + Filename: start.Filename, + Start: start.Pos, + End: end.Pos, + } +} + +func (t token) GoString() string { + return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md new file mode 100644 index 0000000..9b33c7f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md @@ -0,0 +1,405 @@ +# HCL JSON Syntax Specification + +This is the specification for the JSON serialization for hcl. HCL is a system +for defining configuration languages for applications. The HCL information +model is designed to support multiple concrete syntaxes for configuration, +and this JSON-based format complements [the native syntax](../hclsyntax/spec.md) +by being easy to machine-generate, whereas the native syntax is oriented +towards human authoring and maintenence. + +This syntax is defined in terms of JSON as defined in +[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON +grammar as-is, and merely defines a specific methodology for interpreting +JSON constructs into HCL structural elements and expressions. + +This mapping is defined such that valid JSON-serialized HCL input can be +_produced_ using standard JSON implementations in various programming languages. +_Parsing_ such JSON has some additional constraints not beyond what is normally +supported by JSON parsers, so a specialized parser may be required that +is able to: + +* Preserve the relative ordering of properties defined in an object. +* Preserve multiple definitions of the same property name. +* Preserve numeric values to the precision required by the number type + in [the HCL syntax-agnostic information model](../spec.md). +* Retain source location information for parsed tokens/constructs in order + to produce good error messages. + +## Structural Elements + +[The HCL syntax-agnostic information model](../spec.md) defines a _body_ as an +abstract container for attribute definitions and child blocks. A body is +represented in JSON as either a single JSON object or a JSON array of objects. + +Body processing is in terms of JSON object properties, visited in the order +they appear in the input. Where a body is represented by a single JSON object, +the properties of that object are visited in order. Where a body is +represented by a JSON array, each of its elements are visited in order and +each element has its properties visited in order. If any element of the array +is not a JSON object then the input is erroneous. + +When a body is being processed in the _dynamic attributes_ mode, the allowance +of a JSON array in the previous paragraph does not apply and instead a single +JSON object is always required. + +As defined in the language-agnostic model, body processing is in terms +of a schema which provides context for interpreting the body's content. For +JSON bodies, the schema is crucial to allow differentiation of attribute +definitions and block definitions, both of which are represented via object +properties. + +The special property name `"//"`, when used in an object representing a HCL +body, is parsed and ignored. A property with this name can be used to +include human-readable comments. (This special property name is _not_ +processed in this way for any _other_ HCL constructs that are represented as +JSON objects.) + +### Attributes + +Where the given schema describes an attribute with a given name, the object +property with the matching name — if present — serves as the attribute's +definition. + +When a body is being processed in the _dynamic attributes_ mode, each object +property serves as an attribute definition for the attribute whose name +matches the property name. + +The value of an attribute definition property is interpreted as an _expression_, +as described in a later section. + +Given a schema that calls for an attribute named "foo", a JSON object like +the following provides a definition for that attribute: + +```json +{ + "foo": "bar baz" +} +``` + +### Blocks + +Where the given schema describes a block with a given type name, each object +property with the matching name serves as a definition of zero or more blocks +of that type. + +Processing of child blocks is in terms of nested JSON objects and arrays. +If the schema defines one or more _labels_ for the block type, a nested JSON +object or JSON array of objects is required for each labelling level. These +are flattened to a single ordered sequence of object properties using the +same algorithm as for body content as defined above. Each object property +serves as a label value at the corresponding level. + +After any labelling levels, the next nested value is either a JSON object +representing a single block body, or a JSON array of JSON objects that each +represent a single block body. Use of an array accommodates the definition +of multiple blocks that have identical type and labels. + +Given a schema that calls for a block type named "foo" with no labels, the +following JSON objects are all valid definitions of zero or more blocks of this +type: + +```json +{ + "foo": { + "child_attr": "baz" + } +} +``` + +```json +{ + "foo": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] +} +``` +```json +{ + "foo": [] +} +``` + +The first of these defines a single child block of type "foo". The second +defines _two_ such blocks. The final example shows a degenerate definition +of zero blocks, though generators should prefer to omit the property entirely +in this scenario. + +Given a schema that calls for a block type named "foo" with _two_ labels, the +extra label levels must be represented as objects or arrays of objects as in +the following examples: + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "boz": { + "baz": { + "child_attr": "baz" + }, + } + } +} +``` + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "boz": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } +} +``` + +```json +{ + "foo": [ + { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + }, + { + "bar": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } + ] +} +``` + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "bar": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } +} +``` + +Arrays can be introduced at either the label definition or block body +definition levels to define multiple definitions of the same block type +or labels while preserving order. + +A JSON HCL parser _must_ support duplicate definitions of the same property +name within a single object, preserving all of them and the relative ordering +between them. The array-based forms are also required so that JSON HCL +configurations can be produced with JSON producing libraries that are not +able to preserve property definition order and multiple definitions of +the same property. + +## Expressions + +JSON lacks a native expression syntax, so the HCL JSON syntax instead defines +a mapping for each of the JSON value types, including a special mapping for +strings that allows optional use of arbitrary expressions. + +### Objects + +When interpreted as an expression, a JSON object represents a value of a HCL +object type. + +Each property of the JSON object represents an attribute of the HCL object type. +The property name string given in the JSON input is interpreted as a string +expression as described below, and its result is converted to string as defined +by the syntax-agnostic information model. If such a conversion is not possible, +an error is produced and evaluation fails. + +An instance of the constructed object type is then created, whose values +are interpreted by again recursively applying the mapping rules defined in +this section to each of the property values. + +If any evaluated property name strings produce null values, an error is +produced and evaluation fails. If any produce _unknown_ values, the _entire +object's_ result is an unknown value of the dynamic pseudo-type, signalling +that the type of the object cannot be determined. + +It is an error to define the same property name multiple times within a single +JSON object interpreted as an expression. In full expression mode, this +constraint applies to the name expression results after conversion to string, +rather than the raw string that may contain interpolation expressions. + +### Arrays + +When interpreted as an expression, a JSON array represents a value of a HCL +tuple type. + +Each element of the JSON array represents an element of the HCL tuple type. +The tuple type is constructed by enumerationg the JSON array elements, creating +for each an element whose type is the result of recursively applying the +expression mapping rules. Correspondance is preserved between the array element +indices and the tuple element indices. + +An instance of the constructed tuple type is then created, whose values are +interpreted by again recursively applying the mapping rules defined in this +section. + +### Numbers + +When interpreted as an expression, a JSON number represents a HCL number value. + +HCL numbers are arbitrary-precision decimal values, so a JSON HCL parser must +be able to translate exactly the value given to a number of corresponding +precision, within the constraints set by the HCL syntax-agnostic information +model. + +In practice, off-the-shelf JSON serializers often do not support customizing the +processing of numbers, and instead force processing as 32-bit or 64-bit +floating point values. + +A _producer_ of JSON HCL that uses such a serializer can provide numeric values +as JSON strings where they have precision too great for representation in the +serializer's chosen numeric type in situations where the result will be +converted to number (using the standard conversion rules) by a calling +application. + +Alternatively, for expressions that are evaluated in full expression mode an +embedded template interpolation can be used to faithfully represent a number, +such as `"${1e150}"`, which will then be evaluated by the underlying HCL native +syntax expression evaluator. + +### Boolean Values + +The JSON boolean values `true` and `false`, when interpreted as expressions, +represent the corresponding HCL boolean values. + +### The Null Value + +The JSON value `null`, when interpreted as an expression, represents a +HCL null value of the dynamic pseudo-type. + +### Strings + +When intepreted as an expression, a JSON string may be interpreted in one of +two ways depending on the evaluation mode. + +If evaluating in literal-only mode (as defined by the syntax-agnostic +information model) the literal string is intepreted directly as a HCL string +value, by directly using the exact sequence of unicode characters represented. +Template interpolations and directives MUST NOT be processed in this mode, +allowing any characters that appear as introduction sequences to pass through +literally: + +```json +"Hello world! Template sequences like ${ are not intepreted here." +``` + +When evaluating in full expression mode (again, as defined by the syntax- +agnostic information model) the literal string is instead interpreted as a +_standalone template_ in the HCL Native Syntax. The expression evaluation +result is then the direct result of evaluating that template with the current +variable scope and function table. + +```json +"Hello, ${name}! Template sequences are interpreted in full expression mode." +``` + +In particular the _Template Interpolation Unwrapping_ requirement from the +HCL native syntax specification must be implemented, allowing the use of +single-interpolation templates to represent expressions that would not +otherwise be representable in JSON, such as the following example where +the result must be a number, rather than a string representation of a number: + +```json +"${ a + b }" +``` + +## Static Analysis + +The HCL static analysis operations are implemented for JSON values that +represent expressions, as described in the following sections. + +Due to the limited expressive power of the JSON syntax alone, use of these +static analyses functions rather than normal expression evaluation is used +as additional context for how a JSON value is to be interpreted, which means +that static analyses can result in a different interpretation of a given +expression than normal evaluation. + +### Static List + +An expression interpreted as a static list must be a JSON array. Each of the +values in the array is interpreted as an expression and returned. + +### Static Map + +An expression interpreted as a static map must be a JSON object. Each of the +key/value pairs in the object is presented as a pair of expressions. Since +object property names are always strings, evaluating the key expression with +a non-`nil` evaluation context will evaluate any template sequences given +in the property name. + +### Static Call + +An expression interpreted as a static call must be a string. The content of +the string is interpreted as a native syntax expression (not a _template_, +unlike normal evaluation) and then the static call analysis is delegated to +that expression. + +If the original expression is not a string or its contents cannot be parsed +as a native syntax expression then static call analysis is not supported. + +### Static Traversal + +An expression interpreted as a static traversal must be a string. The content +of the string is interpreted as a native syntax expression (not a _template_, +unlike normal evaluation) and then static traversal analysis is delegated +to that expression. + +If the original expression is not a string or its contents cannot be parsed +as a native syntax expression then static call analysis is not supported. + diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go new file mode 100644 index 0000000..28dcf52 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go @@ -0,0 +1,616 @@ +package json + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// body is the implementation of "Body" used for files processed with the JSON +// parser. +type body struct { + val node + + // If non-nil, the keys of this map cause the corresponding attributes to + // be treated as non-existing. This is used when Body.PartialContent is + // called, to produce the "remaining content" Body. + hiddenAttrs map[string]struct{} +} + +// expression is the implementation of "Expression" used for files processed +// with the JSON parser. +type expression struct { + src node +} + +func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, newBody, diags := b.PartialContent(schema) + + hiddenAttrs := newBody.(*body).hiddenAttrs + + var nameSuggestions []string + for _, attrS := range schema.Attributes { + if _, ok := hiddenAttrs[attrS.Name]; !ok { + // Only suggest an attribute name if we didn't use it already. + nameSuggestions = append(nameSuggestions, attrS.Name) + } + } + for _, blockS := range schema.Blocks { + // Blocks can appear multiple times, so we'll suggest their type + // names regardless of whether they've already been used. + nameSuggestions = append(nameSuggestions, blockS.Type) + } + + jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) + diags = append(diags, attrDiags...) + + for _, attr := range jsonAttrs { + k := attr.Name + if k == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, ok := hiddenAttrs[k]; !ok { + suggestion := nameSuggestion(k, nameSuggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous JSON object property", + Detail: fmt.Sprintf("No attribute or block type is named %q.%s", k, suggestion), + Subject: &attr.NameRange, + Context: attr.Range().Ptr(), + }) + } + } + + return content, diags +} + +func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + + jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) + diags = append(diags, attrDiags...) + + usedNames := map[string]struct{}{} + if b.hiddenAttrs != nil { + for k := range b.hiddenAttrs { + usedNames[k] = struct{}{} + } + } + + content := &hcl.BodyContent{ + Attributes: map[string]*hcl.Attribute{}, + Blocks: nil, + + MissingItemRange: b.MissingItemRange(), + } + + // Create some more convenient data structures for our work below. + attrSchemas := map[string]hcl.AttributeSchema{} + blockSchemas := map[string]hcl.BlockHeaderSchema{} + for _, attrS := range schema.Attributes { + attrSchemas[attrS.Name] = attrS + } + for _, blockS := range schema.Blocks { + blockSchemas[blockS.Type] = blockS + } + + for _, jsonAttr := range jsonAttrs { + attrName := jsonAttr.Name + if _, used := b.hiddenAttrs[attrName]; used { + continue + } + + if attrS, defined := attrSchemas[attrName]; defined { + if existing, exists := content.Attributes[attrName]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate attribute definition", + Detail: fmt.Sprintf("The attribute %q was already defined at %s.", attrName, existing.Range), + Subject: &jsonAttr.NameRange, + Context: jsonAttr.Range().Ptr(), + }) + continue + } + + content.Attributes[attrS.Name] = &hcl.Attribute{ + Name: attrS.Name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + usedNames[attrName] = struct{}{} + + } else if blockS, defined := blockSchemas[attrName]; defined { + bv := jsonAttr.Value + blockDiags := b.unpackBlock(bv, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks) + diags = append(diags, blockDiags...) + usedNames[attrName] = struct{}{} + } + + // We ignore anything that isn't defined because that's the + // PartialContent contract. The Content method will catch leftovers. + } + + // Make sure we got all the required attributes. + for _, attrS := range schema.Attributes { + if !attrS.Required { + continue + } + if _, defined := content.Attributes[attrS.Name]; !defined { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), + Subject: b.MissingItemRange().Ptr(), + }) + } + } + + unusedBody := &body{ + val: b.val, + hiddenAttrs: usedNames, + } + + return content, unusedBody, diags +} + +// JustAttributes for JSON bodies interprets all properties of the wrapped +// JSON object as attributes and returns them. +func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + var diags hcl.Diagnostics + attrs := make(map[string]*hcl.Attribute) + + obj, ok := b.val.(*objectVal) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "A JSON object is required here, defining the attributes for this block.", + Subject: b.val.StartRange().Ptr(), + }) + return attrs, diags + } + + for _, jsonAttr := range obj.Attrs { + name := jsonAttr.Name + if name == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + + if existing, exists := attrs[name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate attribute definition", + Detail: fmt.Sprintf("The attribute %q was already defined at %s.", name, existing.Range), + Subject: &jsonAttr.NameRange, + }) + continue + } + + attrs[name] = &hcl.Attribute{ + Name: name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + } + + // No diagnostics possible here, since the parser already took care of + // finding duplicates and every JSON value can be a valid attribute value. + return attrs, diags +} + +func (b *body) MissingItemRange() hcl.Range { + switch tv := b.val.(type) { + case *objectVal: + return tv.CloseRange + case *arrayVal: + return tv.OpenRange + default: + // Should not happen in correct operation, but might show up if the + // input is invalid and we are producing partial results. + return tv.StartRange() + } +} + +func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) { + if len(labelsLeft) > 0 { + labelName := labelsLeft[0] + jsonAttrs, attrDiags := b.collectDeepAttrs(v, &labelName) + diags = append(diags, attrDiags...) + + if len(jsonAttrs) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing block label", + Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName), + Subject: v.StartRange().Ptr(), + }) + return + } + labelsUsed := append(labelsUsed, "") + labelRanges := append(labelRanges, hcl.Range{}) + for _, p := range jsonAttrs { + pk := p.Name + labelsUsed[len(labelsUsed)-1] = pk + labelRanges[len(labelRanges)-1] = p.NameRange + diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...) + } + return + } + + // By the time we get here, we've peeled off all the labels and we're ready + // to deal with the block's actual content. + + // need to copy the label slices because their underlying arrays will + // continue to be mutated after we return. + labels := make([]string, len(labelsUsed)) + copy(labels, labelsUsed) + labelR := make([]hcl.Range, len(labelRanges)) + copy(labelR, labelRanges) + + switch tv := v.(type) { + case *objectVal: + // Single instance of the block + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + val: tv, + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + case *arrayVal: + // Multiple instances of the block + for _, av := range tv.Values { + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + val: av, // might be mistyped; we'll find out when content is requested for this body + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName), + Subject: v.StartRange().Ptr(), + }) + } + return +} + +// collectDeepAttrs takes either a single object or an array of objects and +// flattens it into a list of object attributes, collecting attributes from +// all of the objects in a given array. +// +// Ordering is preserved, so a list of objects that each have one property +// will result in those properties being returned in the same order as the +// objects appeared in the array. +// +// This is appropriate for use only for objects representing bodies or labels +// within a block. +// +// The labelName argument, if non-null, is used to tailor returned error +// messages to refer to block labels rather than attributes and child blocks. +// It has no other effect. +func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.Diagnostics) { + var diags hcl.Diagnostics + var attrs []*objectAttr + + switch tv := v.(type) { + + case *objectVal: + attrs = append(attrs, tv.Attrs...) + + case *arrayVal: + for _, ev := range tv.Values { + switch tev := ev.(type) { + case *objectVal: + attrs = append(attrs, tev.Attrs...) + default: + if labelName != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("A JSON object is required here, to specify %s labels for this block.", *labelName), + Subject: ev.StartRange().Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "A JSON object is required here, to define attributes and child blocks.", + Subject: ev.StartRange().Ptr(), + }) + } + } + } + + default: + if labelName != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("Either a JSON object or JSON array of objects is required here, to specify %s labels for this block.", *labelName), + Subject: v.StartRange().Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "Either a JSON object or JSON array of objects is required here, to define attributes and child blocks.", + Subject: v.StartRange().Ptr(), + }) + } + } + + return attrs, diags +} + +func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + switch v := e.src.(type) { + case *stringVal: + if ctx != nil { + // Parse string contents as a HCL native language expression. + // We only do this if we have a context, so passing a nil context + // is how the caller specifies that interpolations are not allowed + // and that the string should just be returned verbatim. + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the hclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + val, evalDiags := expr.Value(ctx) + diags = append(diags, evalDiags...) + return val, diags + } + + return cty.StringVal(v.Value), nil + case *numberVal: + return cty.NumberVal(v.Value), nil + case *booleanVal: + return cty.BoolVal(v.Value), nil + case *arrayVal: + vals := []cty.Value{} + for _, jsonVal := range v.Values { + val, _ := (&expression{src: jsonVal}).Value(ctx) + vals = append(vals, val) + } + return cty.TupleVal(vals), nil + case *objectVal: + var diags hcl.Diagnostics + attrs := map[string]cty.Value{} + attrRanges := map[string]hcl.Range{} + known := true + for _, jsonAttr := range v.Attrs { + // In this one context we allow keys to contain interpolation + // experessions too, assuming we're evaluating in interpolation + // mode. This achieves parity with the native syntax where + // object expressions can have dynamic keys, while block contents + // may not. + name, nameDiags := (&expression{src: &stringVal{ + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + }}).Value(ctx) + val, valDiags := (&expression{src: jsonAttr.Value}).Value(ctx) + diags = append(diags, nameDiags...) + diags = append(diags, valDiags...) + + var err error + name, err = convert.Convert(name, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key expression", + Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err), + Subject: &jsonAttr.NameRange, + }) + continue + } + if name.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key expression", + Detail: "Cannot use null value as an object key.", + Subject: &jsonAttr.NameRange, + }) + continue + } + if !name.IsKnown() { + // This is a bit of a weird case, since our usual rules require + // us to tolerate unknowns and just represent the result as + // best we can but if we don't know the key then we can't + // know the type of our object at all, and thus we must turn + // the whole thing into cty.DynamicVal. This is consistent with + // how this situation is handled in the native syntax. + // We'll keep iterating so we can collect other errors in + // subsequent attributes. + known = false + continue + } + nameStr := name.AsString() + if _, defined := attrs[nameStr]; defined { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate object attribute", + Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]), + Subject: &jsonAttr.NameRange, + }) + continue + } + attrs[nameStr] = val + attrRanges[nameStr] = jsonAttr.NameRange + } + if !known { + // We encountered an unknown key somewhere along the way, so + // we can't know what our type will eventually be. + return cty.DynamicVal, diags + } + return cty.ObjectVal(attrs), diags + default: + // Default to DynamicVal so that ASTs containing invalid nodes can + // still be partially-evaluated. + return cty.DynamicVal, nil + } +} + +func (e *expression) Variables() []hcl.Traversal { + var vars []hcl.Traversal + + switch v := e.src.(type) { + case *stringVal: + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the hclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return vars + } + return expr.Variables() + + case *arrayVal: + for _, jsonVal := range v.Values { + vars = append(vars, (&expression{src: jsonVal}).Variables()...) + } + case *objectVal: + for _, jsonAttr := range v.Attrs { + vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...) + } + } + + return vars +} + +func (e *expression) Range() hcl.Range { + return e.src.Range() +} + +func (e *expression) StartRange() hcl.Range { + return e.src.StartRange() +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *expression) AsTraversal() hcl.Traversal { + // In JSON-based syntax a traversal is given as a string containing + // traversal syntax as defined by hclsyntax.ParseTraversalAbs. + + switch v := e.src.(type) { + case *stringVal: + traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) + if diags.HasErrors() { + return nil + } + return traversal + default: + return nil + } +} + +// Implementation for hcl.ExprCall. +func (e *expression) ExprCall() *hcl.StaticCall { + // In JSON-based syntax a static call is given as a string containing + // an expression in the native syntax that also supports ExprCall. + + switch v := e.src.(type) { + case *stringVal: + expr, diags := hclsyntax.ParseExpression([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) + if diags.HasErrors() { + return nil + } + + call, diags := hcl.ExprCall(expr) + if diags.HasErrors() { + return nil + } + + return call + default: + return nil + } +} + +// Implementation for hcl.ExprList. +func (e *expression) ExprList() []hcl.Expression { + switch v := e.src.(type) { + case *arrayVal: + ret := make([]hcl.Expression, len(v.Values)) + for i, node := range v.Values { + ret[i] = &expression{src: node} + } + return ret + default: + return nil + } +} + +// Implementation for hcl.ExprMap. +func (e *expression) ExprMap() []hcl.KeyValuePair { + switch v := e.src.(type) { + case *objectVal: + ret := make([]hcl.KeyValuePair, len(v.Attrs)) + for i, jsonAttr := range v.Attrs { + ret[i] = hcl.KeyValuePair{ + Key: &expression{src: &stringVal{ + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + }}, + Value: &expression{src: jsonAttr.Value}, + } + } + return ret + default: + return nil + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go b/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go new file mode 100644 index 0000000..bbcce5b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT. + +package json + +import "strconv" + +const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF" + +var _tokenType_map = map[tokenType]string{ + 0: _tokenType_name[0:12], + 44: _tokenType_name[12:22], + 58: _tokenType_name[22:32], + 61: _tokenType_name[32:43], + 75: _tokenType_name[43:55], + 78: _tokenType_name[55:66], + 83: _tokenType_name[66:77], + 91: _tokenType_name[77:88], + 93: _tokenType_name[88:99], + 123: _tokenType_name[99:110], + 125: _tokenType_name[110:121], + 9220: _tokenType_name[121:129], +} + +func (i tokenType) String() string { + if str, ok := _tokenType_map[i]; ok { + return str + } + return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/merged.go b/vendor/github.com/hashicorp/hcl2/hcl/merged.go new file mode 100644 index 0000000..ca2b728 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/merged.go @@ -0,0 +1,226 @@ +package hcl + +import ( + "fmt" +) + +// MergeFiles combines the given files to produce a single body that contains +// configuration from all of the given files. +// +// The ordering of the given files decides the order in which contained +// elements will be returned. If any top-level attributes are defined with +// the same name across multiple files, a diagnostic will be produced from +// the Content and PartialContent methods describing this error in a +// user-friendly way. +func MergeFiles(files []*File) Body { + var bodies []Body + for _, file := range files { + bodies = append(bodies, file.Body) + } + return MergeBodies(bodies) +} + +// MergeBodies is like MergeFiles except it deals directly with bodies, rather +// than with entire files. +func MergeBodies(bodies []Body) Body { + if len(bodies) == 0 { + // Swap out for our singleton empty body, to reduce the number of + // empty slices we have hanging around. + return emptyBody + } + + // If any of the given bodies are already merged bodies, we'll unpack + // to flatten to a single mergedBodies, since that's conceptually simpler. + // This also, as a side-effect, eliminates any empty bodies, since + // empties are merged bodies with no inner bodies. + var newLen int + var flatten bool + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + newLen += len(children) + flatten = true + } else { + newLen++ + } + } + + if !flatten { // not just newLen == len, because we might have mergedBodies with single bodies inside + return mergedBodies(bodies) + } + + if newLen == 0 { + // Don't allocate a new empty when we already have one + return emptyBody + } + + new := make([]Body, 0, newLen) + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + new = append(new, children...) + } else { + new = append(new, body) + } + } + return mergedBodies(new) +} + +var emptyBody = mergedBodies([]Body{}) + +// EmptyBody returns a body with no content. This body can be used as a +// placeholder when a body is required but no body content is available. +func EmptyBody() Body { + return emptyBody +} + +type mergedBodies []Body + +// Content returns the content produced by applying the given schema to all +// of the merged bodies and merging the result. +// +// Although required attributes _are_ supported, they should be used sparingly +// with merged bodies since in this case there is no contextual information +// with which to return good diagnostics. Applications working with merged +// bodies may wish to mark all attributes as optional and then check for +// required attributes afterwards, to produce better diagnostics. +func (mb mergedBodies) Content(schema *BodySchema) (*BodyContent, Diagnostics) { + // the returned body will always be empty in this case, because mergedContent + // will only ever call Content on the child bodies. + content, _, diags := mb.mergedContent(schema, false) + return content, diags +} + +func (mb mergedBodies) PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) { + return mb.mergedContent(schema, true) +} + +func (mb mergedBodies) JustAttributes() (Attributes, Diagnostics) { + attrs := make(map[string]*Attribute) + var diags Diagnostics + + for _, body := range mb { + thisAttrs, thisDiags := body.JustAttributes() + + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisAttrs != nil { + for name, attr := range thisAttrs { + if existing := attrs[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate attribute", + Detail: fmt.Sprintf( + "Attribute %q was already assigned at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + + attrs[name] = attr + } + } + } + + return attrs, diags +} + +func (mb mergedBodies) MissingItemRange() Range { + if len(mb) == 0 { + // Nothing useful to return here, so we'll return some garbage. + return Range{ + Filename: "", + } + } + + // arbitrarily use the first body's missing item range + return mb[0].MissingItemRange() +} + +func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyContent, Body, Diagnostics) { + // We need to produce a new schema with none of the attributes marked as + // required, since _any one_ of our bodies can contribute an attribute value. + // We'll separately check that all required attributes are present at + // the end. + mergedSchema := &BodySchema{ + Blocks: schema.Blocks, + } + for _, attrS := range schema.Attributes { + mergedAttrS := attrS + mergedAttrS.Required = false + mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS) + } + + var mergedLeftovers []Body + content := &BodyContent{ + Attributes: map[string]*Attribute{}, + } + + var diags Diagnostics + for _, body := range mb { + var thisContent *BodyContent + var thisLeftovers Body + var thisDiags Diagnostics + + if partial { + thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema) + } else { + thisContent, thisDiags = body.Content(mergedSchema) + } + + if thisLeftovers != nil { + mergedLeftovers = append(mergedLeftovers) + } + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisContent.Attributes != nil { + for name, attr := range thisContent.Attributes { + if existing := content.Attributes[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate attribute", + Detail: fmt.Sprintf( + "Attribute %q was already assigned at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + content.Attributes[name] = attr + } + } + + if len(thisContent.Blocks) != 0 { + content.Blocks = append(content.Blocks, thisContent.Blocks...) + } + } + + // Finally, we check for required attributes. + for _, attrS := range schema.Attributes { + if !attrS.Required { + continue + } + + if content.Attributes[attrS.Name] == nil { + // We don't have any context here to produce a good diagnostic, + // which is why we warn in the Content docstring to minimize the + // use of required attributes on merged bodies. + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf( + "The attribute %q is required, but was not assigned.", + attrS.Name, + ), + }) + } + } + + leftoverBody := MergeBodies(mergedLeftovers) + return content, leftoverBody, diags +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/ops.go b/vendor/github.com/hashicorp/hcl2/hcl/ops.go new file mode 100644 index 0000000..f4e30b0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/ops.go @@ -0,0 +1,147 @@ +package hcl + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Index is a helper function that performs the same operation as the index +// operator in the HCL expression language. That is, the result is the +// same as it would be for collection[key] in a configuration expression. +// +// This is exported so that applications can perform indexing in a manner +// consistent with how the language does it, including handling of null and +// unknown values, etc. +// +// Diagnostics are produced if the given combination of values is not valid. +// Therefore a pointer to a source range must be provided to use in diagnostics, +// though nil can be provided if the calling application is going to +// ignore the subject of the returned diagnostics anyway. +func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) { + if collection.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to index null value", + Detail: "This value is null, so it does not have any indices.", + Subject: srcRange, + }, + } + } + if key.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "Can't use a null value as an indexing key.", + Subject: srcRange, + }, + } + } + ty := collection.Type() + kty := key.Type() + if kty == cty.DynamicPseudoType || ty == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + switch { + + case ty.IsListType() || ty.IsTupleType() || ty.IsMapType(): + var wantType cty.Type + switch { + case ty.IsListType() || ty.IsTupleType(): + wantType = cty.Number + case ty.IsMapType(): + wantType = cty.String + default: + // should never happen + panic("don't know what key type we want") + } + + key, keyErr := convert.Convert(key, wantType) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + + has := collection.HasIndex(key) + if !has.IsKnown() { + if ty.IsTupleType() { + return cty.DynamicVal, nil + } else { + return cty.UnknownVal(ty.ElementType()), nil + } + } + if has.False() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.Index(key), nil + + case ty.IsObjectType(): + key, keyErr := convert.Convert(key, cty.String) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + if !collection.IsKnown() { + return cty.DynamicVal, nil + } + if !key.IsKnown() { + return cty.DynamicVal, nil + } + + attrName := key.AsString() + + if !ty.HasAttribute(attrName) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.GetAttr(attrName), nil + + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "This value does not have any indices.", + Subject: srcRange, + }, + } + } + +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos.go b/vendor/github.com/hashicorp/hcl2/hcl/pos.go new file mode 100644 index 0000000..1a4b329 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/pos.go @@ -0,0 +1,262 @@ +package hcl + +import "fmt" + +// Pos represents a single position in a source file, by addressing the +// start byte of a unicode character encoded in UTF-8. +// +// Pos is generally used only in the context of a Range, which then defines +// which source file the position is within. +type Pos struct { + // Line is the source code line where this position points. Lines are + // counted starting at 1 and incremented for each newline character + // encountered. + Line int + + // Column is the source code column where this position points, in + // unicode characters, with counting starting at 1. + // + // Column counts characters as they appear visually, so for example a + // latin letter with a combining diacritic mark counts as one character. + // This is intended for rendering visual markers against source code in + // contexts where these diacritics would be rendered in a single character + // cell. Technically speaking, Column is counting grapheme clusters as + // used in unicode normalization. + Column int + + // Byte is the byte offset into the file where the indicated character + // begins. This is a zero-based offset to the first byte of the first + // UTF-8 codepoint sequence in the character, and thus gives a position + // that can be resolved _without_ awareness of Unicode characters. + Byte int +} + +// Range represents a span of characters between two positions in a source +// file. +// +// This struct is usually used by value in types that represent AST nodes, +// but by pointer in types that refer to the positions of other objects, +// such as in diagnostics. +type Range struct { + // Filename is the name of the file into which this range's positions + // point. + Filename string + + // Start and End represent the bounds of this range. Start is inclusive + // and End is exclusive. + Start, End Pos +} + +// RangeBetween returns a new range that spans from the beginning of the +// start range to the end of the end range. +// +// The result is meaningless if the two ranges do not belong to the same +// source file or if the end range appears before the start range. +func RangeBetween(start, end Range) Range { + return Range{ + Filename: start.Filename, + Start: start.Start, + End: end.End, + } +} + +// RangeOver returns a new range that covers both of the given ranges and +// possibly additional content between them if the two ranges do not overlap. +// +// If either range is empty then it is ignored. The result is empty if both +// given ranges are empty. +// +// The result is meaningless if the two ranges to not belong to the same +// source file. +func RangeOver(a, b Range) Range { + if a.Empty() { + return b + } + if b.Empty() { + return a + } + + var start, end Pos + if a.Start.Byte < b.Start.Byte { + start = a.Start + } else { + start = b.Start + } + if a.End.Byte > b.End.Byte { + end = a.End + } else { + end = b.End + } + return Range{ + Filename: a.Filename, + Start: start, + End: end, + } +} + +// ContainsOffset returns true if and only if the given byte offset is within +// the receiving Range. +func (r Range) ContainsOffset(offset int) bool { + return offset >= r.Start.Byte && offset < r.End.Byte +} + +// Ptr returns a pointer to a copy of the receiver. This is a convenience when +// ranges in places where pointers are required, such as in Diagnostic, but +// the range in question is returned from a method. Go would otherwise not +// allow one to take the address of a function call. +func (r Range) Ptr() *Range { + return &r +} + +// String returns a compact string representation of the receiver. +// Callers should generally prefer to present a range more visually, +// e.g. via markers directly on the relevant portion of source code. +func (r Range) String() string { + if r.Start.Line == r.End.Line { + return fmt.Sprintf( + "%s:%d,%d-%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Column, + ) + } else { + return fmt.Sprintf( + "%s:%d,%d-%d,%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Line, r.End.Column, + ) + } +} + +func (r Range) Empty() bool { + return r.Start.Byte == r.End.Byte +} + +// CanSliceBytes returns true if SliceBytes could return an accurate +// sub-slice of the given slice. +// +// This effectively tests whether the start and end offsets of the range +// are within the bounds of the slice, and thus whether SliceBytes can be +// trusted to produce an accurate start and end position within that slice. +func (r Range) CanSliceBytes(b []byte) bool { + switch { + case r.Start.Byte < 0 || r.Start.Byte > len(b): + return false + case r.End.Byte < 0 || r.End.Byte > len(b): + return false + case r.End.Byte < r.Start.Byte: + return false + default: + return true + } +} + +// SliceBytes returns a sub-slice of the given slice that is covered by the +// receiving range, assuming that the given slice is the source code of the +// file indicated by r.Filename. +// +// If the receiver refers to any byte offsets that are outside of the slice +// then the result is constrained to the overlapping portion only, to avoid +// a panic. Use CanSliceBytes to determine if the result is guaranteed to +// be an accurate span of the requested range. +func (r Range) SliceBytes(b []byte) []byte { + start := r.Start.Byte + end := r.End.Byte + if start < 0 { + start = 0 + } else if start > len(b) { + start = len(b) + } + if end < 0 { + end = 0 + } else if end > len(b) { + end = len(b) + } + if end < start { + end = start + } + return b[start:end] +} + +// Overlaps returns true if the receiver and the other given range share any +// characters in common. +func (r Range) Overlaps(other Range) bool { + switch { + case r.Filename != other.Filename: + // If the ranges are in different files then they can't possibly overlap + return false + case r.Empty() || other.Empty(): + // Empty ranges can never overlap + return false + case r.ContainsOffset(other.Start.Byte) || r.ContainsOffset(other.End.Byte): + return true + case other.ContainsOffset(r.Start.Byte) || other.ContainsOffset(r.End.Byte): + return true + default: + return false + } +} + +// Overlap finds a range that is either identical to or a sub-range of both +// the receiver and the other given range. It returns an empty range +// within the receiver if there is no overlap between the two ranges. +// +// A non-empty result is either identical to or a subset of the receiver. +func (r Range) Overlap(other Range) Range { + if !r.Overlaps(other) { + // Start == End indicates an empty range + return Range{ + Filename: r.Filename, + Start: r.Start, + End: r.Start, + } + } + + var start, end Pos + if r.Start.Byte > other.Start.Byte { + start = r.Start + } else { + start = other.Start + } + if r.End.Byte < other.End.Byte { + end = r.End + } else { + end = other.End + } + + return Range{ + Filename: r.Filename, + Start: start, + End: end, + } +} + +// PartitionAround finds the portion of the given range that overlaps with +// the reciever and returns three ranges: the portion of the reciever that +// precedes the overlap, the overlap itself, and then the portion of the +// reciever that comes after the overlap. +// +// If the two ranges do not overlap then all three returned ranges are empty. +// +// If the given range aligns with or extends beyond either extent of the +// reciever then the corresponding outer range will be empty. +func (r Range) PartitionAround(other Range) (before, overlap, after Range) { + overlap = r.Overlap(other) + if overlap.Empty() { + return overlap, overlap, overlap + } + + before = Range{ + Filename: r.Filename, + Start: r.Start, + End: overlap.Start, + } + after = Range{ + Filename: r.Filename, + Start: overlap.End, + End: r.End, + } + + return before, overlap, after +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go new file mode 100644 index 0000000..7c8f2df --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go @@ -0,0 +1,148 @@ +package hcl + +import ( + "bufio" + "bytes" + + "github.com/apparentlymart/go-textseg/textseg" +) + +// RangeScanner is a helper that will scan over a buffer using a bufio.SplitFunc +// and visit a source range for each token matched. +// +// For example, this can be used with bufio.ScanLines to find the source range +// for each line in the file, skipping over the actual newline characters, which +// may be useful when printing source code snippets as part of diagnostic +// messages. +// +// The line and column information in the returned ranges is produced by +// counting newline characters and grapheme clusters respectively, which +// mimics the behavior we expect from a parser when producing ranges. +type RangeScanner struct { + filename string + b []byte + cb bufio.SplitFunc + + pos Pos // position of next byte to process in b + cur Range // latest range + tok []byte // slice of b that is covered by cur + err error // error from last scan, if any +} + +// Create a new RangeScanner for the given buffer, producing ranges for the +// given filename. +// +// Since ranges have grapheme-cluster granularity rather than byte granularity, +// the scanner will produce incorrect results if the given SplitFunc creates +// tokens between grapheme cluster boundaries. In particular, it is incorrect +// to use RangeScanner with bufio.ScanRunes because it will produce tokens +// around individual UTF-8 sequences, which will split any multi-sequence +// grapheme clusters. +func NewRangeScanner(b []byte, filename string, cb bufio.SplitFunc) *RangeScanner { + return &RangeScanner{ + filename: filename, + b: b, + cb: cb, + pos: Pos{ + Byte: 0, + Line: 1, + Column: 1, + }, + } +} + +func (sc *RangeScanner) Scan() bool { + if sc.pos.Byte >= len(sc.b) || sc.err != nil { + // All done + return false + } + + // Since we're operating on an in-memory buffer, we always pass the whole + // remainder of the buffer to our SplitFunc and set isEOF to let it know + // that it has the whole thing. + advance, token, err := sc.cb(sc.b[sc.pos.Byte:], true) + + // Since we are setting isEOF to true this should never happen, but + // if it does we will just abort and assume the SplitFunc is misbehaving. + if advance == 0 && token == nil && err == nil { + return false + } + + if err != nil { + sc.err = err + sc.cur = Range{ + Filename: sc.filename, + Start: sc.pos, + End: sc.pos, + } + sc.tok = nil + return false + } + + sc.tok = token + start := sc.pos + end := sc.pos + new := sc.pos + + // adv is similar to token but it also includes any subsequent characters + // we're being asked to skip over by the SplitFunc. + // adv is a slice covering any additional bytes we are skipping over, based + // on what the SplitFunc told us to do with advance. + adv := sc.b[sc.pos.Byte : sc.pos.Byte+advance] + + // We now need to scan over our token to count the grapheme clusters + // so we can correctly advance Column, and count the newlines so we + // can correctly advance Line. + advR := bytes.NewReader(adv) + gsc := bufio.NewScanner(advR) + advanced := 0 + gsc.Split(textseg.ScanGraphemeClusters) + for gsc.Scan() { + gr := gsc.Bytes() + new.Byte += len(gr) + new.Column++ + + // We rely here on the fact that \r\n is considered a grapheme cluster + // and so we don't need to worry about miscounting additional lines + // on files with Windows-style line endings. + if len(gr) != 0 && (gr[0] == '\r' || gr[0] == '\n') { + new.Column = 1 + new.Line++ + } + + if advanced < len(token) { + // If we've not yet found the end of our token then we'll + // also push our "end" marker along. + // (if advance > len(token) then we'll stop moving "end" early + // so that the caller only sees the range covered by token.) + end = new + } + advanced += len(gr) + } + + sc.cur = Range{ + Filename: sc.filename, + Start: start, + End: end, + } + sc.pos = new + return true +} + +// Range returns a range that covers the latest token obtained after a call +// to Scan returns true. +func (sc *RangeScanner) Range() Range { + return sc.cur +} + +// Bytes returns the slice of the input buffer that is covered by the range +// that would be returned by Range. +func (sc *RangeScanner) Bytes() []byte { + return sc.tok +} + +// Err can be called after Scan returns false to determine if the latest read +// resulted in an error, and obtain that error if so. +func (sc *RangeScanner) Err() error { + return sc.err +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/schema.go b/vendor/github.com/hashicorp/hcl2/hcl/schema.go new file mode 100644 index 0000000..891257a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/schema.go @@ -0,0 +1,21 @@ +package hcl + +// BlockHeaderSchema represents the shape of a block header, and is +// used for matching blocks within bodies. +type BlockHeaderSchema struct { + Type string + LabelNames []string +} + +// AttributeSchema represents the requirements for an attribute, and is used +// for matching attributes within bodies. +type AttributeSchema struct { + Name string + Required bool +} + +// BodySchema represents the desired shallow structure of a body. +type BodySchema struct { + Attributes []AttributeSchema + Blocks []BlockHeaderSchema +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/spec.md new file mode 100644 index 0000000..58257bf --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/spec.md @@ -0,0 +1,691 @@ +# HCL Syntax-Agnostic Information Model + +This is the specification for the general information model (abstract types and +semantics) for hcl. HCL is a system for defining configuration languages for +applications. The HCL information model is designed to support multiple +concrete syntaxes for configuration, each with a mapping to the model defined +in this specification. + +The two primary syntaxes intended for use in conjunction with this model are +[the HCL native syntax](./hclsyntax/spec.md) and [the JSON syntax](./json/spec.md). +In principle other syntaxes are possible as long as either their language model +is sufficiently rich to express the concepts described in this specification +or the language targets a well-defined subset of the specification. + +## Structural Elements + +The primary structural element is the _body_, which is a container representing +a set of zero or more _attributes_ and a set of zero or more _blocks_. + +A _configuration file_ is the top-level object, and will usually be produced +by reading a file from disk and parsing it as a particular syntax. A +configuration file has its own _body_, representing the top-level attributes +and blocks. + +An _attribute_ is a name and value pair associated with a body. Attribute names +are unique within a given body. Attribute values are provided as _expressions_, +which are discussed in detail in a later section. + +A _block_ is a nested structure that has a _type name_, zero or more string +_labels_ (e.g. identifiers), and a nested body. + +Together the structural elements create a heirarchical data structure, with +attributes intended to represent the direct properties of a particular object +in the calling application, and blocks intended to represent child objects +of a particular object. + +## Body Content + +To support the expression of the HCL concepts in languages whose information +model is a subset of HCL's, such as JSON, a _body_ is an opaque container +whose content can only be accessed by providing information on the expected +structure of the content. + +The specification for each syntax must describe how its physical constructs +are mapped on to body content given a schema. For syntaxes that have +first-class syntax distinguishing attributes and bodies this can be relatively +straightforward, while more detailed mapping rules may be required in syntaxes +where the representation of attributes vs. blocks is ambiguous. + +### Schema-driven Processing + +Schema-driven processing is the primary way to access body content. +A _body schema_ is a description of what is expected within a particular body, +which can then be used to extract the _body content_, which then provides +access to the specific attributes and blocks requested. + +A _body schema_ consists of a list of _attribute schemata_ and +_block header schemata_: + +* An _attribute schema_ provides the name of an attribute and whether its + presence is required. + +* A _block header schema_ provides a block type name and the semantic names + assigned to each of the labels of that block type, if any. + +Within a schema, it is an error to request the same attribute name twice or +to request a block type whose name is also an attribute name. While this can +in principle be supported in some syntaxes, in other syntaxes the attribute +and block namespaces are combined and so an an attribute cannot coexist with +a block whose type name is identical to the attribute name. + +The result of applying a body schema to a body is _body content_, which +consists of an _attribute map_ and a _block sequence_: + +* The _attribute map_ is a map data structure whose keys are attribute names + and whose values are _expressions_ that represent the corresponding attribute + values. + +* The _block sequence_ is an ordered sequence of blocks, with each specifying + a block _type name_, the sequence of _labels_ specified for the block, + and the body object (not body _content_) representing the block's own body. + +After obtaining _body content_, the calling application may continue processing +by evaluating attribute expressions and/or recursively applying further +schema-driven processing to the child block bodies. + +**Note:** The _body schema_ is intentionally minimal, to reduce the set of +mapping rules that must be defined for each syntax. Higher-level utility +libraries may be provided to assist in the construction of a schema and +perform additional processing, such as automatically evaluating attribute +expressions and assigning their result values into a data structure, or +recursively applying a schema to child blocks. Such utilities are not part of +this core specification and will vary depending on the capabilities and idiom +of the implementation language. + +### _Dynamic Attributes_ Processing + +The _schema-driven_ processing model is useful when the expected structure +of a body is known a priori by the calling application. Some blocks are +instead more free-form, such as a user-provided set of arbitrary key/value +pairs. + +The alternative _dynamic attributes_ processing mode allows for this more +ad-hoc approach. Processing in this mode behaves as if a schema had been +constructed without any _block header schemata_ and with an attribute +schema for each distinct key provided within the physical representation +of the body. + +The means by which _distinct keys_ are identified is dependent on the +physical syntax; this processing mode assumes that the syntax has a way +to enumerate keys provided by the author and identify expressions that +correspond with those keys, but does not define the means by which this is +done. + +The result of _dynamic attributes_ processing is an _attribute map_ as +defined in the previous section. No _block sequence_ is produced in this +processing mode. + +### Partial Processing of Body Content + +Under _schema-driven processing_, by default the given schema is assumed +to be exhaustive, such that any attribute or block not matched by schema +elements is considered an error. This allows feedback about unsupported +attributes and blocks (such as typos) to be provided. + +An alternative is _partial processing_, where any additional elements within +the body are not considered an error. + +Under partial processing, the result is both body content as described +above _and_ a new body that represents any body elements that remain after +the schema has been processed. + +Specifically: + +* Any attribute whose name is specified in the schema is returned in body + content and elided from the new body. + +* Any block whose type is specified in the schema is returned in body content + and elided from the new body. + +* Any attribute or block _not_ meeting the above conditions is placed into + the new body, unmodified. + +The new body can then be recursively processed using any of the body +processing models. This facility allows different subsets of body content +to be processed by different parts of the calling application. + +Processing a body in two steps — first partial processing of a source body, +then exhaustive processing of the returned body — is equivalent to single-step +processing with a schema that is the union of the schemata used +across the two steps. + +## Expressions + +Attribute values are represented by _expressions_. Depending on the concrete +syntax in use, an expression may just be a literal value or it may describe +a computation in terms of literal values, variables, and functions. + +Each syntax defines its own representation of expressions. For syntaxes based +in languages that do not have any non-literal expression syntax, it is +recommended to embed the template language from +[the native syntax](./hclsyntax/spec.md) e.g. as a post-processing step on +string literals. + +### Expression Evaluation + +In order to obtain a concrete value, each expression must be _evaluated_. +Evaluation is performed in terms of an evaluation context, which +consists of the following: + +* An _evaluation mode_, which is defined below. +* A _variable scope_, which provides a set of named variables for use in + expressions. +* A _function table_, which provides a set of named functions for use in + expressions. + +The _evaluation mode_ allows for two different interpretations of an +expression: + +* In _literal-only mode_, variables and functions are not available and it + is assumed that the calling application's intent is to treat the attribute + value as a literal. + +* In _full expression mode_, variables and functions are defined and it is + assumed that the calling application wishes to provide a full expression + language for definition of the attribute value. + +The actual behavior of these two modes depends on the syntax in use. For +languages with first-class expression syntax, these two modes may be considered +equivalent, with _literal-only mode_ simply not defining any variables or +functions. For languages that embed arbitrary expressions via string templates, +_literal-only mode_ may disable such processing, allowing literal strings to +pass through without interpretation as templates. + +Since literal-only mode does not support variables and functions, it is an +error for the calling application to enable this mode and yet provide a +variable scope and/or function table. + +## Values and Value Types + +The result of expression evaluation is a _value_. Each value has a _type_, +which is dynamically determined during evaluation. The _variable scope_ in +the evaluation context is a map from variable name to value, using the same +definition of value. + +The type system for HCL values is intended to be of a level abstraction +suitable for configuration of various applications. A well-defined, +implementation-language-agnostic type system is defined to allow for +consistent processing of configuration across many implementation languages. +Concrete implementations may provide additional functionality to lower +HCL values and types to corresponding native language types, which may then +impose additional constraints on the values outside of the scope of this +specification. + +Two values are _equal_ if and only if they have identical types and their +values are equal according to the rules of their shared type. + +### Primitive Types + +The primitive types are _string_, _bool_, and _number_. + +A _string_ is a sequence of unicode characters. Two strings are equal if +NFC normalization ([UAX#15](http://unicode.org/reports/tr15/) +of each string produces two identical sequences of characters. +NFC normalization ensures that, for example, a precomposed combination of a +latin letter and a diacritic compares equal with the letter followed by +a combining diacritic. + +The _bool_ type has only two non-null values: _true_ and _false_. Two bool +values are equal if and only if they are either both true or both false. + +A _number_ is an arbitrary-precision floating point value. An implementation +_must_ make the full-precision values available to the calling application +for interpretation into any suitable number representation. An implementation +may in practice implement numbers with limited precision so long as the +following constraints are met: + +* Integers are represented with at least 256 bits. +* Non-integer numbers are represented as floating point values with a + mantissa of at least 256 bits and a signed binary exponent of at least + 16 bits. +* An error is produced if an integer value given in source cannot be + represented precisely. +* An error is produced if a non-integer value cannot be represented due to + overflow. +* A non-integer number is rounded to the nearest possible value when a + value is of too high a precision to be represented. + +The _number_ type also requires representation of both positive and negative +infinity. A "not a number" (NaN) value is _not_ provided nor used. + +Two number values are equal if they are numerically equal to the precision +associated with the number. Positive infinity and negative infinity are +equal to themselves but not to each other. Positive infinity is greater than +any other number value, and negative infinity is less than any other number +value. + +Some syntaxes may be unable to represent numeric literals of arbitrary +precision. This must be defined in the syntax specification as part of its +description of mapping numeric literals to HCL values. + +### Structural Types + +_Structural types_ are types that are constructed by combining other types. +Each distinct combination of other types is itself a distinct type. There +are two structural type _kinds_: + +* _Object types_ are constructed of a set of named attributes, each of which + has a type. Attribute names are always strings. (_Object_ attributes are a + distinct idea from _body_ attributes, though calling applications + may choose to blur the distinction by use of common naming schemes.) +* _Tuple tupes_ are constructed of a sequence of elements, each of which + has a type. + +Values of structural types are compared for equality in terms of their +attributes or elements. A structural type value is equal to another if and +only if all of the corresponding attributes or elements are equal. + +Two structural types are identical if they are of the same kind and +have attributes or elements with identical types. + +### Collection Types + +_Collection types_ are types that combine together an arbitrary number of +values of some other single type. There are three collection type _kinds_: + +* _List types_ represent ordered sequences of values of their element type. +* _Map types_ represent values of their element type accessed via string keys. +* _Set types_ represent unordered sets of distinct values of their element type. + +For each of these kinds and each distinct element type there is a distinct +collection type. For example, "list of string" is a distinct type from +"set of string", and "list of number" is a distinct type from "list of string". + +Values of collection types are compared for equality in terms of their +elements. A collection type value is equal to another if and only if both +have the same number of elements and their corresponding elements are equal. + +Two collection types are identical if they are of the same kind and have +the same element type. + +### Null values + +Each type has a null value. The null value of a type represents the absense +of a value, but with type information retained to allow for type checking. + +Null values are used primarily to represent the conditional absense of a +body attribute. In a syntax with a conditional operator, one of the result +values of that conditional may be null to indicate that the attribute should be +considered not present in that case. + +Calling applications _should_ consider an attribute with a null value as +equivalent to the value not being present at all. + +A null value of a particular type is equal to itself. + +### Unknown Values and the Dynamic Pseudo-type + +An _unknown value_ is a placeholder for a value that is not yet known. +Operations on unknown values themselves return unknown values that have a +type appropriate to the operation. For example, adding together two unknown +numbers yields an unknown number, while comparing two unknown values of any +type for equality yields an unknown bool. + +Each type has a distinct unknown value. For example, an unknown _number_ is +a distinct value from an unknown _string_. + +_The dynamic pseudo-type_ is a placeholder for a type that is not yet known. +The only values of this type are its null value and its unknown value. It is +referred to as a _pseudo-type_ because it should not be considered a type in +its own right, but rather as a placeholder for a type yet to be established. +The unknown value of the dynamic pseudo-type is referred to as _the dynamic +value_. + +Operations on values of the dynamic pseudo-type behave as if it is a value +of the expected type, optimistically assuming that once the value and type +are known they will be valid for the operation. For example, adding together +a number and the dynamic value produces an unknown number. + +Unknown values and the dynamic pseudo-type can be used as a mechanism for +partial type checking and semantic checking: by evaluating an expression with +all variables set to an unknown value, the expression can be evaluated to +produce an unknown value of a given type, or produce an error if any operation +is provably invalid with only type information. + +Unknown values and the dynamic pseudo-type must never be returned from +operations unless at least one operand is unknown or dynamic. Calling +applications are guaranteed that unless the global scope includes unknown +values, or the function table includes functions that return unknown values, +no expression will evaluate to an unknown value. The calling application is +thus in total control over the use and meaning of unknown values. + +The dynamic pseudo-type is identical only to itself. + +### Capsule Types + +A _capsule type_ is a custom type defined by the calling application. A value +of a capsule type is considered opaque to HCL, but may be accepted +by functions provided by the calling application. + +A particular capsule type is identical only to itself. The equality of two +values of the same capsule type is defined by the calling application. No +other operations are supported for values of capsule types. + +Support for capsule types in a HCL implementation is optional. Capsule types +are intended to allow calling applications to pass through values that are +not part of the standard type system. For example, an application that +deals with raw binary data may define a capsule type representing a byte +array, and provide functions that produce or operate on byte arrays. + +### Type Specifications + +In certain situations it is necessary to define expectations about the expected +type of a value. Whereas two _types_ have a commutative _identity_ relationship, +a type has a non-commutative _matches_ relationship with a _type specification_. +A type specification is, in practice, just a different interpretation of a +type such that: + +* Any type _matches_ any type that it is identical to. + +* Any type _matches_ the dynamic pseudo-type. + +For example, given a type specification "list of dynamic pseudo-type", the +concrete types "list of string" and "list of map" match, but the +type "set of string" does not. + +## Functions and Function Calls + +The evaluation context used to evaluate an expression includes a function +table, which represents an application-defined set of named functions +available for use in expressions. + +Each syntax defines whether function calls are supported and how they are +physically represented in source code, but the semantics of function calls are +defined here to ensure consistent results across syntaxes and to allow +applications to provide functions that are interoperable with all syntaxes. + +A _function_ is defined from the following elements: + +* Zero or more _positional parameters_, each with a name used for documentation, + a type specification for expected argument values, and a flag for whether + each of null values, unknown values, and values of the dynamic pseudo-type + are accepted. + +* Zero or one _variadic parameters_, with the same structure as the _positional_ + parameters, which if present collects any additional arguments provided at + the function call site. + +* A _result type definition_, which specifies the value type returned for each + valid sequence of argument values. + +* A _result value definition_, which specifies the value returned for each + valid sequence of argument values. + +A _function call_, regardless of source syntax, consists of a sequence of +argument values. The argument values are each mapped to a corresponding +parameter as follows: + +* For each of the function's positional parameters in sequence, take the next + argument. If there are no more arguments, the call is erroneous. + +* If the function has a variadic parameter, take all remaining arguments that + where not yet assigned to a positional parameter and collect them into + a sequence of variadic arguments that each correspond to the variadic + parameter. + +* If the function has _no_ variadic parameter, it is an error if any arguments + remain after taking one argument for each positional parameter. + +After mapping each argument to a parameter, semantic checking proceeds +for each argument: + +* If the argument value corresponding to a parameter does not match the + parameter's type specification, the call is erroneous. + +* If the argument value corresponding to a parameter is null and the parameter + is not specified as accepting nulls, the call is erroneous. + +* If the argument value corresponding to a parameter is the dynamic value + and the parameter is not specified as accepting values of the dynamic + pseudo-type, the call is valid but its _result type_ is forced to be the + dynamic pseudo type. + +* If neither of the above conditions holds for any argument, the call is + valid and the function's value type definition is used to determine the + call's _result type_. A function _may_ vary its result type depending on + the argument _values_ as well as the argument _types_; for example, a + function that decodes a JSON value will return a different result type + depending on the data structure described by the given JSON source code. + +If semantic checking succeeds without error, the call is _executed_: + +* For each argument, if its value is unknown and its corresponding parameter + is not specified as accepting unknowns, the _result value_ is forced to be an + unknown value of the result type. + +* If the previous condition does not apply, the function's result value + definition is used to determine the call's _result value_. + +The result of a function call expression is either an error, if one of the +erroenous conditions above applies, or the _result value_. + +## Type Conversions and Unification + +Values given in configuration may not always match the expectations of the +operations applied to them or to the calling application. In such situations, +automatic type conversion is attempted as a convenience to the user. + +Along with conversions to a _specified_ type, it is sometimes necessary to +ensure that a selection of values are all of the _same_ type, without any +constraint on which type that is. This is the process of _type unification_, +which attempts to find the most general type that all of the given types can +be converted to. + +Both type conversions and unification are defined in the syntax-agnostic +model to ensure consistency of behavior between syntaxes. + +Type conversions are broadly characterized into two categories: _safe_ and +_unsafe_. A conversion is "safe" if any distinct value of the source type +has a corresponding distinct value in the target type. A conversion is +"unsafe" if either the target type values are _not_ distinct (information +may be lost in conversion) or if some values of the source type do not have +any corresponding value in the target type. An unsafe conversion may result +in an error. + +A given type can always be converted to itself, which is a no-op. + +### Conversion of Null Values + +All null values are safely convertable to a null value of any other type, +regardless of other type-specific rules specified in the sections below. + +### Conversion to and from the Dynamic Pseudo-type + +Conversion _from_ the dynamic pseudo-type _to_ any other type always succeeds, +producing an unknown value of the target type. + +Conversion of any value _to_ the dynamic pseudo-type is a no-op. The result +is the input value, verbatim. This is the only situation where the conversion +result value is not of the the given target type. + +### Primitive Type Conversions + +Bidirectional conversions are available between the string and number types, +and between the string and boolean types. + +The bool value true corresponds to the string containing the characters "true", +while the bool value false corresponds to teh string containing the characters +"false". Conversion from bool to string is safe, while the converse is +unsafe. The strings "1" and "0" are alternative string representations +of true and false respectively. It is an error to convert a string other than +the four in this paragraph to type bool. + +A number value is converted to string by translating its integer portion +into a sequence of decimal digits (`0` through `9`), and then if it has a +non-zero fractional part, a period `.` followed by a sequence of decimal +digits representing its fractional part. No exponent portion is included. +The number is converted at its full precision. Conversion from number to +string is safe. + +A string is converted to a number value by reversing the above mapping. +No exponent portion is allowed. Conversion from string to number is unsafe. +It is an error to convert a string that does not comply with the expected +syntax to type number. + +No direct conversion is available between the bool and number types. + +### Collection and Structural Type Conversions + +Conversion from set types to list types is _safe_, as long as their +element types are safely convertable. If the element types are _unsafely_ +convertable, then the collection conversion is also unsafe. Each set element +becomes a corresponding list element, in an undefined order. Although no +particular ordering is required, implementations _should_ produce list +elements in a consistent order for a given input set, as a convenience +to calling applications. + +Conversion from list types to set types is _unsafe_, as long as their element +types are convertable. Each distinct list item becomes a distinct set item. +If two list items are equal, one of the two is lost in the conversion. + +Conversion from tuple types to list types permitted if all of the +tuple element types are convertable to the target list element type. +The safety of the conversion depends on the safety of each of the element +conversions. Each element in turn is converted to the list element type, +producing a list of identical length. + +Conversion from tuple types to set types is permitted, behaving as if the +tuple type was first converted to a list of the same element type and then +that list converted to the target set type. + +Conversion from object types to map types is permitted if all of the object +attribute types are convertable to the target map element type. The safety +of the conversion depends on the safety of each of the attribute conversions. +Each attribute in turn is converted to the map element type, and map element +keys are set to the name of each corresponding object attribute. + +Conversion from list and set types to tuple types is permitted, following +the opposite steps as the converse conversions. Such conversions are _unsafe_. +It is an error to convert a list or set to a tuple type whose number of +elements does not match the list or set length. + +Conversion from map types to object types is permitted if each map key +corresponds to an attribute in the target object type. It is an error to +convert from a map value whose set of keys does not exactly match the target +type's attributes. The conversion takes the opposite steps of the converse +conversion. + +Conversion from one object type to another is permitted as long as the +common attribute names have convertable types. Any attribute present in the +target type but not in the source type is populated with a null value of +the appropriate type. + +Conversion from one tuple type to another is permitted as long as the +tuples have the same length and the elements have convertable types. + +### Type Unification + +Type unification is an operation that takes a list of types and attempts +to find a single type to which they can all be converted. Since some +type pairs have bidirectional conversions, preference is given to _safe_ +conversions. In technical terms, all possible types are arranged into +a lattice, from which a most general supertype is selected where possible. + +The type resulting from type unification may be one of the input types, or +it may be an entirely new type produced by combination of two or more +input types. + +The following rules do not guarantee a valid result. In addition to these +rules, unification fails if any of the given types are not convertable +(per the above rules) to the selected result type. + +The following unification rules apply transitively. That is, if a rule is +defined from A to B, and one from B to C, then A can unify to C. + +Number and bool types both unify with string by preferring string. + +Two collection types of the same kind unify according to the unification +of their element types. + +List and set types unify by preferring the list type. + +Map and object types unify by preferring the object type. + +List, set and tuple types unify by preferring the tuple type. + +The dynamic pseudo-type unifies with any other type by selecting that other +type. The dynamic pseudo-type is the result type only if _all_ input types +are the dynamic pseudo-type. + +Two object types unify by constructing a new type whose attributes are +the union of those of the two input types. Any common attributes themselves +have their types unified. + +Two tuple types of the same length unify constructing a new type of the +same length whose elements are the unification of the corresponding elements +in the two input types. + +## Static Analysis + +In most applications, full expression evaluation is sufficient for understanding +the provided configuration. However, some specialized applications require more +direct access to the physical structures in the expressions, which can for +example allow the construction of new language constructs in terms of the +existing syntax elements. + +Since static analysis analyses the physical structure of configuration, the +details will vary depending on syntax. Each syntax must decide which of its +physical structures corresponds to the following analyses, producing error +diagnostics if they are applied to inappropriate expressions. + +The following are the required static analysis functions: + +* **Static List**: Require list/tuple construction syntax to be used and + return a list of expressions for each of the elements given. + +* **Static Map**: Require map/object construction syntax to be used and + return a list of key/value pairs -- both expressions -- for each of + the elements given. The usual constraint that a map key must be a string + must not apply to this analysis, thus allowing applications to interpret + arbitrary keys as they see fit. + +* **Static Call**: Require function call syntax to be used and return an + object describing the called function name and a list of expressions + representing each of the call arguments. + +* **Static Traversal**: Require a reference to a symbol in the variable + scope and return a description of the path from the root scope to the + accessed attribute or index. + +The intent of a calling application using these features is to require a more +rigid interpretation of the configuration than in expression evaluation. +Syntax implementations should make use of the extra contextual information +provided in order to make an intuitive mapping onto the constructs of the +underlying syntax, possibly interpreting the expression slightly differently +than it would be interpreted in normal evaluation. + +Each syntax must define which of its expression elements each of the analyses +above applies to, and how those analyses behave given those expression elements. + +## Implementation Considerations + +Implementations of this specification are free to adopt any strategy that +produces behavior consistent with the specification. This non-normative +section describes some possible implementation strategies that are consistent +with the goals of this specification. + +### Language-agnosticism + +The language-agnosticism of this specification assumes that certain behaviors +are implemented separately for each syntax: + +* Matching of a body schema with the physical elements of a body in the + source language, to determine correspondance between physical constructs + and schema elements. + +* Implementing the _dynamic attributes_ body processing mode by either + interpreting all physical constructs as attributes or producing an error + if non-attribute constructs are present. + +* Providing an evaluation function for all possible expressions that produces + a value given an evaluation context. + +* Providing the static analysis functionality described above in a manner that + makes sense within the convention of the syntax. + +The suggested implementation strategy is to use an implementation language's +closest concept to an _abstract type_, _virtual type_ or _interface type_ +to represent both Body and Expression. Each language-specific implementation +can then provide an implementation of each of these types wrapping AST nodes +or other physical constructs from the language parser. diff --git a/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go new file mode 100644 index 0000000..98ada87 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go @@ -0,0 +1,40 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +type staticExpr struct { + val cty.Value + rng Range +} + +// StaticExpr returns an Expression that always evaluates to the given value. +// +// This is useful to substitute default values for expressions that are +// not explicitly given in configuration and thus would otherwise have no +// Expression to return. +// +// Since expressions are expected to have a source range, the caller must +// provide one. Ideally this should be a real source range, but it can +// be a synthetic one (with an empty-string filename) if no suitable range +// is available. +func StaticExpr(val cty.Value, rng Range) Expression { + return staticExpr{val, rng} +} + +func (e staticExpr) Value(ctx *EvalContext) (cty.Value, Diagnostics) { + return e.val, nil +} + +func (e staticExpr) Variables() []Traversal { + return nil +} + +func (e staticExpr) Range() Range { + return e.rng +} + +func (e staticExpr) StartRange() Range { + return e.rng +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/structure.go new file mode 100644 index 0000000..b336f30 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/structure.go @@ -0,0 +1,151 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +// File is the top-level node that results from parsing a HCL file. +type File struct { + Body Body + Bytes []byte + + // Nav is used to integrate with the "hcled" editor integration package, + // and with diagnostic information formatters. It is not for direct use + // by a calling application. + Nav interface{} +} + +// Block represents a nested block within a Body. +type Block struct { + Type string + Labels []string + Body Body + + DefRange Range // Range that can be considered the "definition" for seeking in an editor + TypeRange Range // Range for the block type declaration specifically. + LabelRanges []Range // Ranges for the label values specifically. +} + +// Blocks is a sequence of Block. +type Blocks []*Block + +// Attributes is a set of attributes keyed by their names. +type Attributes map[string]*Attribute + +// Body is a container for attributes and blocks. It serves as the primary +// unit of heirarchical structure within configuration. +// +// The content of a body cannot be meaningfully intepreted without a schema, +// so Body represents the raw body content and has methods that allow the +// content to be extracted in terms of a given schema. +type Body interface { + // Content verifies that the entire body content conforms to the given + // schema and then returns it, and/or returns diagnostics. The returned + // body content is valid if non-nil, regardless of whether Diagnostics + // are provided, but diagnostics should still be eventually shown to + // the user. + Content(schema *BodySchema) (*BodyContent, Diagnostics) + + // PartialContent is like Content except that it permits the configuration + // to contain additional blocks or attributes not specified in the + // schema. If any are present, the returned Body is non-nil and contains + // the remaining items from the body that were not selected by the schema. + PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) + + // JustAttributes attempts to interpret all of the contents of the body + // as attributes, allowing for the contents to be accessed without a priori + // knowledge of the structure. + // + // The behavior of this method depends on the body's source language. + // Some languages, like JSON, can't distinguish between attributes and + // blocks without schema hints, but for languages that _can_ error + // diagnostics will be generated if any blocks are present in the body. + // + // Diagnostics may be produced for other reasons too, such as duplicate + // declarations of the same attribute. + JustAttributes() (Attributes, Diagnostics) + + // MissingItemRange returns a range that represents where a missing item + // might hypothetically be inserted. This is used when producing + // diagnostics about missing required attributes or blocks. Not all bodies + // will have an obvious single insertion point, so the result here may + // be rather arbitrary. + MissingItemRange() Range +} + +// BodyContent is the result of applying a BodySchema to a Body. +type BodyContent struct { + Attributes Attributes + Blocks Blocks + + MissingItemRange Range +} + +// Attribute represents an attribute from within a body. +type Attribute struct { + Name string + Expr Expression + + Range Range + NameRange Range +} + +// Expression is a literal value or an expression provided in the +// configuration, which can be evaluated within a scope to produce a value. +type Expression interface { + // Value returns the value resulting from evaluating the expression + // in the given evaluation context. + // + // The context may be nil, in which case the expression may contain + // only constants and diagnostics will be produced for any non-constant + // sub-expressions. (The exact definition of this depends on the source + // language.) + // + // The context may instead be set but have either its Variables or + // Functions maps set to nil, in which case only use of these features + // will return diagnostics. + // + // Different diagnostics are provided depending on whether the given + // context maps are nil or empty. In the former case, the message + // tells the user that variables/functions are not permitted at all, + // while in the latter case usage will produce a "not found" error for + // the specific symbol in question. + Value(ctx *EvalContext) (cty.Value, Diagnostics) + + // Variables returns a list of variables referenced in the receiving + // expression. These are expressed as absolute Traversals, so may include + // additional information about how the variable is used, such as + // attribute lookups, which the calling application can potentially use + // to only selectively populate the scope. + Variables() []Traversal + + Range() Range + StartRange() Range +} + +// OfType filters the receiving block sequence by block type name, +// returning a new block sequence including only the blocks of the +// requested type. +func (els Blocks) OfType(typeName string) Blocks { + ret := make(Blocks, 0) + for _, el := range els { + if el.Type == typeName { + ret = append(ret, el) + } + } + return ret +} + +// ByType transforms the receiving block sequence into a map from type +// name to block sequences of only that type. +func (els Blocks) ByType() map[string]Blocks { + ret := make(map[string]Blocks) + for _, el := range els { + ty := el.Type + if ret[ty] == nil { + ret[ty] = make(Blocks, 0, 1) + } + ret[ty] = append(ret[ty], el) + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go new file mode 100644 index 0000000..24f4c91 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go @@ -0,0 +1,352 @@ +package hcl + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// A Traversal is a description of traversing through a value through a +// series of operations such as attribute lookup, index lookup, etc. +// +// It is used to look up values in scopes, for example. +// +// The traversal operations are implementations of interface Traverser. +// This is a closed set of implementations, so the interface cannot be +// implemented from outside this package. +// +// A traversal can be absolute (its first value is a symbol name) or relative +// (starts from an existing value). +type Traversal []Traverser + +// TraversalJoin appends a relative traversal to an absolute traversal to +// produce a new absolute traversal. +func TraversalJoin(abs Traversal, rel Traversal) Traversal { + if abs.IsRelative() { + panic("first argument to TraversalJoin must be absolute") + } + if !rel.IsRelative() { + panic("second argument to TraversalJoin must be relative") + } + + ret := make(Traversal, len(abs)+len(rel)) + copy(ret, abs) + copy(ret[len(abs):], rel) + return ret +} + +// TraverseRel applies the receiving traversal to the given value, returning +// the resulting value. This is supported only for relative traversals, +// and will panic if applied to an absolute traversal. +func (t Traversal) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + if !t.IsRelative() { + panic("can't use TraverseRel on an absolute traversal") + } + + current := val + var diags Diagnostics + for _, tr := range t { + var newDiags Diagnostics + current, newDiags = tr.TraversalStep(current) + diags = append(diags, newDiags...) + if newDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + return current, diags +} + +// TraverseAbs applies the receiving traversal to the given eval context, +// returning the resulting value. This is supported only for absolute +// traversals, and will panic if applied to a relative traversal. +func (t Traversal) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + if t.IsRelative() { + panic("can't use TraverseAbs on a relative traversal") + } + + split := t.SimpleSplit() + root := split.Abs[0].(TraverseRoot) + name := root.Name + + thisCtx := ctx + hasNonNil := false + for thisCtx != nil { + if thisCtx.Variables == nil { + thisCtx = thisCtx.parent + continue + } + hasNonNil = true + val, exists := thisCtx.Variables[name] + if exists { + return split.Rel.TraverseRel(val) + } + thisCtx = thisCtx.parent + } + + if !hasNonNil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Variables not allowed", + Detail: "Variables may not be used here.", + Subject: &root.SrcRange, + }, + } + } + + suggestions := make([]string, 0, len(ctx.Variables)) + thisCtx = ctx + for thisCtx != nil { + for k := range thisCtx.Variables { + suggestions = append(suggestions, k) + } + thisCtx = thisCtx.parent + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unknown variable", + Detail: fmt.Sprintf("There is no variable named %q.%s", name, suggestion), + Subject: &root.SrcRange, + }, + } +} + +// IsRelative returns true if the receiver is a relative traversal, or false +// otherwise. +func (t Traversal) IsRelative() bool { + if len(t) == 0 { + return true + } + if _, firstIsRoot := t[0].(TraverseRoot); firstIsRoot { + return false + } + return true +} + +// SimpleSplit returns a TraversalSplit where the name lookup is the absolute +// part and the remainder is the relative part. Supported only for +// absolute traversals, and will panic if applied to a relative traversal. +// +// This can be used by applications that have a relatively-simple variable +// namespace where only the top-level is directly populated in the scope, with +// everything else handled by relative lookups from those initial values. +func (t Traversal) SimpleSplit() TraversalSplit { + if t.IsRelative() { + panic("can't use SimpleSplit on a relative traversal") + } + return TraversalSplit{ + Abs: t[0:1], + Rel: t[1:], + } +} + +// RootName returns the root name for a absolute traversal. Will panic if +// called on a relative traversal. +func (t Traversal) RootName() string { + if t.IsRelative() { + panic("can't use RootName on a relative traversal") + + } + return t[0].(TraverseRoot).Name +} + +// SourceRange returns the source range for the traversal. +func (t Traversal) SourceRange() Range { + if len(t) == 0 { + // Nothing useful to return here, but we'll return something + // that's correctly-typed at least. + return Range{} + } + + return RangeBetween(t[0].SourceRange(), t[len(t)-1].SourceRange()) +} + +// TraversalSplit represents a pair of traversals, the first of which is +// an absolute traversal and the second of which is relative to the first. +// +// This is used by calling applications that only populate prefixes of the +// traversals in the scope, with Abs representing the part coming from the +// scope and Rel representing the remaining steps once that part is +// retrieved. +type TraversalSplit struct { + Abs Traversal + Rel Traversal +} + +// TraverseAbs traverses from a scope to the value resulting from the +// absolute traversal. +func (t TraversalSplit) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + return t.Abs.TraverseAbs(ctx) +} + +// TraverseRel traverses from a given value, assumed to be the result of +// TraverseAbs on some scope, to a final result for the entire split traversal. +func (t TraversalSplit) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + return t.Rel.TraverseRel(val) +} + +// Traverse is a convenience function to apply TraverseAbs followed by +// TraverseRel. +func (t TraversalSplit) Traverse(ctx *EvalContext) (cty.Value, Diagnostics) { + v1, diags := t.TraverseAbs(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + v2, newDiags := t.TraverseRel(v1) + diags = append(diags, newDiags...) + return v2, diags +} + +// Join concatenates together the Abs and Rel parts to produce a single +// absolute traversal. +func (t TraversalSplit) Join() Traversal { + return TraversalJoin(t.Abs, t.Rel) +} + +// RootName returns the root name for the absolute part of the split. +func (t TraversalSplit) RootName() string { + return t.Abs.RootName() +} + +// A Traverser is a step within a Traversal. +type Traverser interface { + TraversalStep(cty.Value) (cty.Value, Diagnostics) + SourceRange() Range + isTraverserSigil() isTraverser +} + +// Embed this in a struct to declare it as a Traverser +type isTraverser struct { +} + +func (tr isTraverser) isTraverserSigil() isTraverser { + return isTraverser{} +} + +// TraverseRoot looks up a root name in a scope. It is used as the first step +// of an absolute Traversal, and cannot itself be traversed directly. +type TraverseRoot struct { + isTraverser + Name string + SrcRange Range +} + +// TraversalStep on a TraverseName immediately panics, because absolute +// traversals cannot be directly traversed. +func (tn TraverseRoot) TraversalStep(cty.Value) (cty.Value, Diagnostics) { + panic("Cannot traverse an absolute traversal") +} + +func (tn TraverseRoot) SourceRange() Range { + return tn.SrcRange +} + +// TraverseAttr looks up an attribute in its initial value. +type TraverseAttr struct { + isTraverser + Name string + SrcRange Range +} + +func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + if val.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to get attribute from null value", + Detail: "This value is null, so it does not have any attributes.", + Subject: &tn.SrcRange, + }, + } + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + if !ty.HasAttribute(tn.Name) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("This object does not have an attribute named %q.", tn.Name), + Subject: &tn.SrcRange, + }, + } + } + + if !val.IsKnown() { + return cty.UnknownVal(ty.AttributeType(tn.Name)), nil + } + + return val.GetAttr(tn.Name), nil + case ty.IsMapType(): + if !val.IsKnown() { + return cty.UnknownVal(ty.ElementType()), nil + } + + idx := cty.StringVal(tn.Name) + if val.HasIndex(idx).False() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Missing map element", + Detail: fmt.Sprintf("This map does not have an element with the key %q.", tn.Name), + Subject: &tn.SrcRange, + }, + } + } + + return val.Index(idx), nil + case ty == cty.DynamicPseudoType: + return cty.DynamicVal, nil + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: "This value does not have any attributes.", + Subject: &tn.SrcRange, + }, + } + } +} + +func (tn TraverseAttr) SourceRange() Range { + return tn.SrcRange +} + +// TraverseIndex applies the index operation to its initial value. +type TraverseIndex struct { + isTraverser + Key cty.Value + SrcRange Range +} + +func (tn TraverseIndex) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + return Index(val, tn.Key, &tn.SrcRange) +} + +func (tn TraverseIndex) SourceRange() Range { + return tn.SrcRange +} + +// TraverseSplat applies the splat operation to its initial value. +type TraverseSplat struct { + isTraverser + Each Traversal + SrcRange Range +} + +func (tn TraverseSplat) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + panic("TraverseSplat not yet implemented") +} + +func (tn TraverseSplat) SourceRange() Range { + return tn.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go new file mode 100644 index 0000000..5f52946 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go @@ -0,0 +1,121 @@ +package hcl + +// AbsTraversalForExpr attempts to interpret the given expression as +// an absolute traversal, or returns error diagnostic(s) if that is +// not possible for the given expression. +// +// A particular Expression implementation can support this function by +// offering a method called AsTraversal that takes no arguments and +// returns either a valid absolute traversal or nil to indicate that +// no traversal is possible. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +// +// In most cases the calling application is interested in the value +// that results from an expression, but in rarer cases the application +// needs to see the the name of the variable and subsequent +// attributes/indexes itself, for example to allow users to give references +// to the variables themselves rather than to their values. An implementer +// of this function should at least support attribute and index steps. +func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + type asTraversal interface { + AsTraversal() Traversal + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(asTraversal) + return supported + }) + + if asT, supported := physExpr.(asTraversal); supported { + if traversal := asT.AsTraversal(); traversal != nil { + return traversal, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static variable reference is required.", + Subject: expr.Range().Ptr(), + }, + } +} + +// RelTraversalForExpr is similar to AbsTraversalForExpr but it returns +// a relative traversal instead. Due to the nature of HCL expressions, the +// first element of the returned traversal is always a TraverseAttr, and +// then it will be followed by zero or more other expressions. +// +// Any expression accepted by AbsTraversalForExpr is also accepted by +// RelTraversalForExpr. +func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + traversal, diags := AbsTraversalForExpr(expr) + if len(traversal) > 0 { + root := traversal[0].(TraverseRoot) + traversal[0] = TraverseAttr{ + Name: root.Name, + SrcRange: root.SrcRange, + } + } + return traversal, diags +} + +// ExprAsKeyword attempts to interpret the given expression as a static keyword, +// returning the keyword string if possible, and the empty string if not. +// +// A static keyword, for the sake of this function, is a single identifier. +// For example, the following attribute has an expression that would produce +// the keyword "foo": +// +// example = foo +// +// This function is a variant of AbsTraversalForExpr, which uses the same +// interface on the given expression. This helper constrains the result +// further by requiring only a single root identifier. +// +// This function is intended to be used with the following idiom, to recognize +// situations where one of a fixed set of keywords is required and arbitrary +// expressions are not allowed: +// +// switch hcl.ExprAsKeyword(expr) { +// case "allow": +// // (take suitable action for keyword "allow") +// case "deny": +// // (take suitable action for keyword "deny") +// default: +// diags = append(diags, &hcl.Diagnostic{ +// // ... "invalid keyword" diagnostic message ... +// }) +// } +// +// The above approach will generate the same message for both the use of an +// unrecognized keyword and for not using a keyword at all, which is usually +// reasonable if the message specifies that the given value must be a keyword +// from that fixed list. +// +// Note that in the native syntax the keywords "true", "false", and "null" are +// recognized as literal values during parsing and so these reserved words +// cannot not be accepted as keywords by this function. +// +// Since interpreting an expression as a keyword bypasses usual expression +// evaluation, it should be used sparingly for situations where e.g. one of +// a fixed set of keywords is used in a structural way in a special attribute +// to affect the further processing of a block. +func ExprAsKeyword(expr Expression) string { + type asTraversal interface { + AsTraversal() Traversal + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(asTraversal) + return supported + }) + + if asT, supported := physExpr.(asTraversal); supported { + if traversal := asT.AsTraversal(); len(traversal) == 1 { + return traversal.RootName() + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go b/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go new file mode 100644 index 0000000..7e652e9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go @@ -0,0 +1,21 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +type blockLabel struct { + Value string + Range hcl.Range +} + +func labelsForBlock(block *hcl.Block) []blockLabel { + ret := make([]blockLabel, len(block.Labels)) + for i := range block.Labels { + ret[i] = blockLabel{ + Value: block.Labels[i], + Range: block.LabelRanges[i], + } + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/decode.go b/vendor/github.com/hashicorp/hcl2/hcldec/decode.go new file mode 100644 index 0000000..6cf93fe --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/decode.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) { + schema := ImpliedSchema(spec) + + var content *hcl.BodyContent + var diags hcl.Diagnostics + var leftovers hcl.Body + + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + + val, valDiags := spec.decode(content, blockLabels, ctx) + diags = append(diags, valDiags...) + + return val, leftovers, diags +} + +func impliedType(spec Spec) cty.Type { + return spec.impliedType() +} + +func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range { + schema := ImpliedSchema(spec) + content, _, _ := body.PartialContent(schema) + + return spec.sourceRange(content, blockLabels) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/doc.go b/vendor/github.com/hashicorp/hcl2/hcldec/doc.go new file mode 100644 index 0000000..23bfe54 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/doc.go @@ -0,0 +1,12 @@ +// Package hcldec provides a higher-level API for unpacking the content of +// HCL bodies, implemented in terms of the low-level "Content" API exposed +// by the bodies themselves. +// +// It allows decoding an entire nested configuration in a single operation +// by providing a description of the intended structure. +// +// For some applications it may be more convenient to use the "gohcl" +// package, which has a similar purpose but decodes directly into native +// Go data types. hcldec instead targets the cty type system, and thus allows +// a cty-driven application to remain within that type system. +package hcldec diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/gob.go b/vendor/github.com/hashicorp/hcl2/hcldec/gob.go new file mode 100644 index 0000000..e2027cf --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/gob.go @@ -0,0 +1,23 @@ +package hcldec + +import ( + "encoding/gob" +) + +func init() { + // Every Spec implementation should be registered with gob, so that + // specs can be sent over gob channels, such as using + // github.com/hashicorp/go-plugin with plugins that need to describe + // what shape of configuration they are expecting. + gob.Register(ObjectSpec(nil)) + gob.Register(TupleSpec(nil)) + gob.Register((*AttrSpec)(nil)) + gob.Register((*LiteralSpec)(nil)) + gob.Register((*ExprSpec)(nil)) + gob.Register((*BlockSpec)(nil)) + gob.Register((*BlockListSpec)(nil)) + gob.Register((*BlockSetSpec)(nil)) + gob.Register((*BlockMapSpec)(nil)) + gob.Register((*BlockLabelSpec)(nil)) + gob.Register((*DefaultSpec)(nil)) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/public.go b/vendor/github.com/hashicorp/hcl2/hcldec/public.go new file mode 100644 index 0000000..5d1f10a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/public.go @@ -0,0 +1,78 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// Decode interprets the given body using the given specification and returns +// the resulting value. If the given body is not valid per the spec, error +// diagnostics are returned and the returned value is likely to be incomplete. +// +// The ctx argument may be nil, in which case any references to variables or +// functions will produce error diagnostics. +func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, _, diags := decode(body, nil, ctx, spec, false) + return val, diags +} + +// PartialDecode is like Decode except that it permits "leftover" items in +// the top-level body, which are returned as a new body to allow for +// further processing. +// +// Any descendent block bodies are _not_ decoded partially and thus must +// be fully described by the given specification. +func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) { + return decode(body, nil, ctx, spec, true) +} + +// ImpliedType returns the value type that should result from decoding the +// given spec. +func ImpliedType(spec Spec) cty.Type { + return impliedType(spec) +} + +// SourceRange interprets the given body using the given specification and +// then returns the source range of the value that would be used to +// fulfill the spec. +// +// This can be used if application-level validation detects value errors, to +// obtain a reasonable SourceRange to use for generated diagnostics. It works +// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...) +// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will +// be less useful the broader the specification, so e.g. a spec that returns +// the entirety of all of the blocks of a given type is likely to be +// _particularly_ arbitrary and useless. +// +// If the given body is not valid per the given spec, the result is best-effort +// and may not actually be something ideal. It's expected that an application +// will already have used Decode or PartialDecode earlier and thus had an +// opportunity to detect and report spec violations. +func SourceRange(body hcl.Body, spec Spec) hcl.Range { + return sourceRange(body, nil, spec) +} + +// ChildBlockTypes returns a map of all of the child block types declared +// by the given spec, with block type names as keys and the associated +// nested body specs as values. +func ChildBlockTypes(spec Spec) map[string]Spec { + ret := map[string]Spec{} + + // visitSameBodyChildren walks through the spec structure, calling + // the given callback for each descendent spec encountered. We are + // interested in the specs that reference attributes and blocks. + var visit visitFunc + visit = func(s Spec) { + if bs, ok := s.(blockSpec); ok { + for _, blockS := range bs.blockHeaderSchemata() { + ret[blockS.Type] = bs.nestedSpec() + } + } + + s.visitSameBodyChildren(visit) + } + + visit(spec) + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/schema.go b/vendor/github.com/hashicorp/hcl2/hcldec/schema.go new file mode 100644 index 0000000..b57bd96 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/schema.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ImpliedSchema returns the *hcl.BodySchema implied by the given specification. +// This is the schema that the Decode function will use internally to +// access the content of a given body. +func ImpliedSchema(spec Spec) *hcl.BodySchema { + var attrs []hcl.AttributeSchema + var blocks []hcl.BlockHeaderSchema + + // visitSameBodyChildren walks through the spec structure, calling + // the given callback for each descendent spec encountered. We are + // interested in the specs that reference attributes and blocks. + var visit visitFunc + visit = func(s Spec) { + if as, ok := s.(attrSpec); ok { + attrs = append(attrs, as.attrSchemata()...) + } + + if bs, ok := s.(blockSpec); ok { + blocks = append(blocks, bs.blockHeaderSchemata()...) + } + + s.visitSameBodyChildren(visit) + } + + visit(spec) + + return &hcl.BodySchema{ + Attributes: attrs, + Blocks: blocks, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go new file mode 100644 index 0000000..25cafcd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go @@ -0,0 +1,998 @@ +package hcldec + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// A Spec is a description of how to decode a hcl.Body to a cty.Value. +// +// The various other types in this package whose names end in "Spec" are +// the spec implementations. The most common top-level spec is ObjectSpec, +// which decodes body content into a cty.Value of an object type. +type Spec interface { + // Perform the decode operation on the given body, in the context of + // the given block (which might be null), using the given eval context. + // + // "block" is provided only by the nested calls performed by the spec + // types that work on block bodies. + decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + + // Return the cty.Type that should be returned when decoding a body with + // this spec. + impliedType() cty.Type + + // Call the given callback once for each of the nested specs that would + // get decoded with the same body and block as the receiver. This should + // not descend into the nested specs used when decoding blocks. + visitSameBodyChildren(cb visitFunc) + + // Determine the source range of the value that would be returned for the + // spec in the given content, in the context of the given block + // (which might be null). If the corresponding item is missing, return + // a place where it might be inserted. + sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range +} + +type visitFunc func(spec Spec) + +// An ObjectSpec is a Spec that produces a cty.Value of an object type whose +// attributes correspond to the keys of the spec map. +type ObjectSpec map[string]Spec + +// attrSpec is implemented by specs that require attributes from the body. +type attrSpec interface { + attrSchemata() []hcl.AttributeSchema +} + +// blockSpec is implemented by specs that require blocks from the body. +type blockSpec interface { + blockHeaderSchemata() []hcl.BlockHeaderSchema + nestedSpec() Spec +} + +// specNeedingVariables is implemented by specs that can use variables +// from the EvalContext, to declare which variables they need. +type specNeedingVariables interface { + variablesNeeded(content *hcl.BodyContent) []hcl.Traversal +} + +func (s ObjectSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s ObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make(map[string]cty.Value, len(s)) + var diags hcl.Diagnostics + + for k, spec := range s { + var kd hcl.Diagnostics + vals[k], kd = spec.decode(content, blockLabels, ctx) + diags = append(diags, kd...) + } + + return cty.ObjectVal(vals), diags +} + +func (s ObjectSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyObject + } + + attrTypes := make(map[string]cty.Type) + for k, childSpec := range s { + attrTypes[k] = childSpec.impliedType() + } + return cty.Object(attrTypes) +} + +func (s ObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// A TupleSpec is a Spec that produces a cty.Value of a tuple type whose +// elements correspond to the elements of the spec slice. +type TupleSpec []Spec + +func (s TupleSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s TupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make([]cty.Value, len(s)) + var diags hcl.Diagnostics + + for i, spec := range s { + var ed hcl.Diagnostics + vals[i], ed = spec.decode(content, blockLabels, ctx) + diags = append(diags, ed...) + } + + return cty.TupleVal(vals), diags +} + +func (s TupleSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyTuple + } + + attrTypes := make([]cty.Type, len(s)) + for i, childSpec := range s { + attrTypes[i] = childSpec.impliedType() + } + return cty.Tuple(attrTypes) +} + +func (s TupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// An AttrSpec is a Spec that evaluates a particular attribute expression in +// the body and returns its resulting value converted to the requested type, +// or produces a diagnostic if the type is incorrect. +type AttrSpec struct { + Name string + Type cty.Type + Required bool +} + +func (s *AttrSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *AttrSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + attr, exists := content.Attributes[s.Name] + if !exists { + return nil + } + + return attr.Expr.Variables() +} + +// attrSpec implementation +func (s *AttrSpec) attrSchemata() []hcl.AttributeSchema { + return []hcl.AttributeSchema{ + { + Name: s.Name, + Required: s.Required, + }, + } +} + +func (s *AttrSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + attr, exists := content.Attributes[s.Name] + if !exists { + return content.MissingItemRange + } + + return attr.Expr.Range() +} + +func (s *AttrSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + attr, exists := content.Attributes[s.Name] + if !exists { + // We don't need to check required and emit a diagnostic here, because + // that would already have happened when building "content". + return cty.NullVal(s.Type), nil + } + + val, diags := attr.Expr.Value(ctx) + + convVal, err := convert.Convert(val, s.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect attribute value type", + Detail: fmt.Sprintf( + "Inappropriate value for attribute %q: %s.", + s.Name, err.Error(), + ), + Subject: attr.Expr.StartRange().Ptr(), + Context: hcl.RangeBetween(attr.NameRange, attr.Expr.StartRange()).Ptr(), + }) + // We'll return an unknown value of the _correct_ type so that the + // incomplete result can still be used for some analysis use-cases. + val = cty.UnknownVal(s.Type) + } else { + val = convVal + } + + return val, diags +} + +func (s *AttrSpec) impliedType() cty.Type { + return s.Type +} + +// A LiteralSpec is a Spec that produces the given literal value, ignoring +// the given body. +type LiteralSpec struct { + Value cty.Value +} + +func (s *LiteralSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *LiteralSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Value, nil +} + +func (s *LiteralSpec) impliedType() cty.Type { + return s.Value.Type() +} + +func (s *LiteralSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // No sensible range to return for a literal, so the caller had better + // ensure it doesn't cause any diagnostics. + return hcl.Range{ + Filename: "", + } +} + +// An ExprSpec is a Spec that evaluates the given expression, ignoring the +// given body. +type ExprSpec struct { + Expr hcl.Expression +} + +func (s *ExprSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *ExprSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + return s.Expr.Variables() +} + +func (s *ExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Expr.Value(ctx) +} + +func (s *ExprSpec) impliedType() cty.Type { + // We can't know the type of our expression until we evaluate it + return cty.DynamicPseudoType +} + +func (s *ExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + return s.Expr.Range() +} + +// A BlockSpec is a Spec that produces a cty.Value by decoding the contents +// of a single nested block of a given type, using a nested spec. +// +// If the Required flag is not set, the nested block may be omitted, in which +// case a null value is produced. If it _is_ set, an error diagnostic is +// produced if there are no nested blocks of the given type. +type BlockSpec struct { + TypeName string + Nested Spec + Required bool +} + +func (s *BlockSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return nil + } + + return Variables(childBlock.Body, s.Nested) +} + +func (s *BlockSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + if childBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "Only one block of type %q is allowed. Previous definition was at %s.", + s.TypeName, childBlock.DefRange.String(), + ), + Subject: &candidate.DefRange, + }) + break + } + + childBlock = candidate + } + + if childBlock == nil { + if s.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block of type %q is required here.", s.TypeName, + ), + Subject: &content.MissingItemRange, + }) + } + return cty.NullVal(s.Nested.impliedType()), diags + } + + if s.Nested == nil { + panic("BlockSpec with no Nested Spec") + } + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + return val, diags +} + +func (s *BlockSpec) impliedType() cty.Type { + return s.Nested.impliedType() +} + +func (s *BlockSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockListSpec is a Spec that produces a cty list of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockListSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockListSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockListSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockListSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockListSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockListSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.ListValEmpty(s.Nested.impliedType()) + } else { + ret = cty.ListVal(elems) + } + + return ret, diags +} + +func (s *BlockListSpec) impliedType() cty.Type { + return cty.List(s.Nested.impliedType()) +} + +func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockSetSpec is a Spec that produces a cty set of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockSetSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockSetSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSetSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockSetSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockSetSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockSetSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.SetValEmpty(s.Nested.impliedType()) + } else { + ret = cty.SetVal(elems) + } + + return ret, diags +} + +func (s *BlockSetSpec) impliedType() cty.Type { + return cty.Set(s.Nested.impliedType()) +} + +func (s *BlockSetSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockMapSpec is a Spec that produces a cty map of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// One level of map structure is created for each of the given label names. +// There must be at least one given label name. +type BlockMapSpec struct { + TypeName string + LabelNames []string + Nested Spec +} + +func (s *BlockMapSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockMapSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), + }, + } +} + +// blockSpec implementation +func (s *BlockMapSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockMapSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockSetSpec with no Nested Spec") + } + + elems := map[string]interface{}{} + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + childLabels := labelsForBlock(childBlock) + val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) + targetMap := elems + for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { + if _, exists := targetMap[key]; !exists { + targetMap[key] = make(map[string]interface{}) + } + targetMap = targetMap[key].(map[string]interface{}) + } + + diags = append(diags, childDiags...) + + key := childBlock.Labels[len(s.LabelNames)-1] + if _, exists := targetMap[key]; exists { + labelsBuf := bytes.Buffer{} + for _, label := range childBlock.Labels { + fmt.Fprintf(&labelsBuf, " %q", label) + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block for %s%s was already defined. The %s labels must be unique.", + s.TypeName, labelsBuf.String(), s.TypeName, + ), + Subject: &childBlock.DefRange, + }) + continue + } + + targetMap[key] = val + } + + if len(elems) == 0 { + return cty.MapValEmpty(s.Nested.impliedType()), diags + } + + var ctyMap func(map[string]interface{}, int) cty.Value + ctyMap = func(raw map[string]interface{}, depth int) cty.Value { + vals := make(map[string]cty.Value, len(raw)) + if depth == 1 { + for k, v := range raw { + vals[k] = v.(cty.Value) + } + } else { + for k, v := range raw { + vals[k] = ctyMap(v.(map[string]interface{}), depth-1) + } + } + return cty.MapVal(vals) + } + + return ctyMap(elems, len(s.LabelNames)), diags +} + +func (s *BlockMapSpec) impliedType() cty.Type { + ret := s.Nested.impliedType() + for _ = range s.LabelNames { + ret = cty.Map(ret) + } + return ret +} + +func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockLabelSpec is a Spec that returns a cty.String representing the +// label of the block its given body belongs to, if indeed its given body +// belongs to a block. It is a programming error to use this in a non-block +// context, so this spec will panic in that case. +// +// This spec only works in the nested spec within a BlockSpec, BlockListSpec, +// BlockSetSpec or BlockMapSpec. +// +// The full set of label specs used against a particular block must have a +// consecutive set of indices starting at zero. The maximum index found +// defines how many labels the corresponding blocks must have in cty source. +type BlockLabelSpec struct { + Index int + Name string +} + +func (s *BlockLabelSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *BlockLabelSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return cty.StringVal(blockLabels[s.Index].Value), nil +} + +func (s *BlockLabelSpec) impliedType() cty.Type { + return cty.String // labels are always strings +} + +func (s *BlockLabelSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return blockLabels[s.Index].Range +} + +func findLabelSpecs(spec Spec) []string { + maxIdx := -1 + var names map[int]string + + var visit visitFunc + visit = func(s Spec) { + if ls, ok := s.(*BlockLabelSpec); ok { + if maxIdx < ls.Index { + maxIdx = ls.Index + } + if names == nil { + names = make(map[int]string) + } + names[ls.Index] = ls.Name + } + s.visitSameBodyChildren(visit) + } + + visit(spec) + + if maxIdx < 0 { + return nil // no labels at all + } + + ret := make([]string, maxIdx+1) + for i := range ret { + name := names[i] + if name == "" { + // Should never happen if the spec is conformant, since we require + // consecutive indices starting at zero. + name = fmt.Sprintf("missing%02d", i) + } + ret[i] = name + } + + return ret +} + +// DefaultSpec is a spec that wraps two specs, evaluating the primary first +// and then evaluating the default if the primary returns a null value. +// +// The two specifications must have the same implied result type for correct +// operation. If not, the result is undefined. +type DefaultSpec struct { + Primary Spec + Default Spec +} + +func (s *DefaultSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Primary) + cb(s.Default) +} + +func (s *DefaultSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, diags := s.Primary.decode(content, blockLabels, ctx) + if val.IsNull() { + var moreDiags hcl.Diagnostics + val, moreDiags = s.Default.decode(content, blockLabels, ctx) + diags = append(diags, moreDiags...) + } + return val, diags +} + +func (s *DefaultSpec) impliedType() cty.Type { + return s.Primary.impliedType() +} + +func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We can't tell from here which of the two specs will ultimately be used + // in our result, so we'll just assume the first. This is usually the right + // choice because the default is often a literal spec that doesn't have a + // reasonable source range to return anyway. + return s.Primary.sourceRange(content, blockLabels) +} + +// TransformExprSpec is a spec that wraps another and then evaluates a given +// hcl.Expression on the result. +// +// The implied type of this spec is determined by evaluating the expression +// with an unknown value of the nested spec's implied type, which may cause +// the result to be imprecise. This spec should not be used in situations where +// precise result type information is needed. +type TransformExprSpec struct { + Wrapped Spec + Expr hcl.Expression + TransformCtx *hcl.EvalContext + VarName string +} + +func (s *TransformExprSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Wrapped) +} + +func (s *TransformExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) + if diags.HasErrors() { + // We won't try to run our function in this case, because it'll probably + // generate confusing additional errors that will distract from the + // root cause. + return cty.UnknownVal(s.impliedType()), diags + } + + chiCtx := s.TransformCtx.NewChild() + chiCtx.Variables = map[string]cty.Value{ + s.VarName: wrappedVal, + } + resultVal, resultDiags := s.Expr.Value(chiCtx) + diags = append(diags, resultDiags...) + return resultVal, diags +} + +func (s *TransformExprSpec) impliedType() cty.Type { + wrappedTy := s.Wrapped.impliedType() + chiCtx := s.TransformCtx.NewChild() + chiCtx.Variables = map[string]cty.Value{ + s.VarName: cty.UnknownVal(wrappedTy), + } + resultVal, _ := s.Expr.Value(chiCtx) + return resultVal.Type() +} + +func (s *TransformExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We'll just pass through our wrapped range here, even though that's + // not super-accurate, because there's nothing better to return. + return s.Wrapped.sourceRange(content, blockLabels) +} + +// TransformFuncSpec is a spec that wraps another and then evaluates a given +// cty function with the result. The given function must expect exactly one +// argument, where the result of the wrapped spec will be passed. +// +// The implied type of this spec is determined by type-checking the function +// with an unknown value of the nested spec's implied type, which may cause +// the result to be imprecise. This spec should not be used in situations where +// precise result type information is needed. +// +// If the given function produces an error when run, this spec will produce +// a non-user-actionable diagnostic message. It's the caller's responsibility +// to ensure that the given function cannot fail for any non-error result +// of the wrapped spec. +type TransformFuncSpec struct { + Wrapped Spec + Func function.Function +} + +func (s *TransformFuncSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Wrapped) +} + +func (s *TransformFuncSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) + if diags.HasErrors() { + // We won't try to run our function in this case, because it'll probably + // generate confusing additional errors that will distract from the + // root cause. + return cty.UnknownVal(s.impliedType()), diags + } + + resultVal, err := s.Func.Call([]cty.Value{wrappedVal}) + if err != nil { + // This is not a good example of a diagnostic because it is reporting + // a programming error in the calling application, rather than something + // an end-user could act on. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Transform function failed", + Detail: fmt.Sprintf("Decoder transform returned an error: %s", err), + Subject: s.sourceRange(content, blockLabels).Ptr(), + }) + return cty.UnknownVal(s.impliedType()), diags + } + + return resultVal, diags +} + +func (s *TransformFuncSpec) impliedType() cty.Type { + wrappedTy := s.Wrapped.impliedType() + resultTy, err := s.Func.ReturnType([]cty.Type{wrappedTy}) + if err != nil { + // Should never happen with a correctly-configured spec + return cty.DynamicPseudoType + } + + return resultTy +} + +func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We'll just pass through our wrapped range here, even though that's + // not super-accurate, because there's nothing better to return. + return s.Wrapped.sourceRange(content, blockLabels) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go new file mode 100644 index 0000000..427b0d0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go @@ -0,0 +1,34 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Variables processes the given body with the given spec and returns a +// list of the variable traversals that would be required to decode +// the same pairing of body and spec. +// +// This can be used to conditionally populate the variables in the EvalContext +// passed to Decode, for applications where a static scope is insufficient. +// +// If the given body is not compliant with the given schema, the result may +// be incomplete, but that's assumed to be okay because the eventual call +// to Decode will produce error diagnostics anyway. +func Variables(body hcl.Body, spec Spec) []hcl.Traversal { + schema := ImpliedSchema(spec) + + content, _, _ := body.PartialContent(schema) + + var vars []hcl.Traversal + + if vs, ok := spec.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + spec.visitSameBodyChildren(func(s Spec) { + if vs, ok := s.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + }) + + return vars +} diff --git a/vendor/github.com/hashicorp/hcl2/hclparse/parser.go b/vendor/github.com/hashicorp/hcl2/hclparse/parser.go new file mode 100644 index 0000000..6d47f12 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclparse/parser.go @@ -0,0 +1,123 @@ +package hclparse + +import ( + "fmt" + "io/ioutil" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/hcl2/hcl/json" +) + +// NOTE: This is the public interface for parsing. The actual parsers are +// in other packages alongside this one, with this package just wrapping them +// to provide a unified interface for the caller across all supported formats. + +// Parser is the main interface for parsing configuration files. As well as +// parsing files, a parser also retains a registry of all of the files it +// has parsed so that multiple attempts to parse the same file will return +// the same object and so the collected files can be used when printing +// diagnostics. +// +// Any diagnostics for parsing a file are only returned once on the first +// call to parse that file. Callers are expected to collect up diagnostics +// and present them together, so returning diagnostics for the same file +// multiple times would create a confusing result. +type Parser struct { + files map[string]*hcl.File +} + +// NewParser creates a new parser, ready to parse configuration files. +func NewParser() *Parser { + return &Parser{ + files: map[string]*hcl.File{}, + } +} + +// ParseHCL parses the given buffer (which is assumed to have been loaded from +// the given filename) as a native-syntax configuration file and returns the +// hcl.File object representing it. +func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) + p.files[filename] = file + return file, diags +} + +// ParseHCLFile reads the given filename and parses it as a native-syntax HCL +// configuration file. An error diagnostic is returned if the given file +// cannot be read. +func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + src, err := ioutil.ReadFile(filename) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The configuration file %q could not be read.", filename), + }, + } + } + + return p.ParseHCL(src, filename) +} + +// ParseJSON parses the given JSON buffer (which is assumed to have been loaded +// from the given filename) and returns the hcl.File object representing it. +func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.Parse(src, filename) + p.files[filename] = file + return file, diags +} + +// ParseJSONFile reads the given filename and parses it as JSON, similarly to +// ParseJSON. An error diagnostic is returned if the given file cannot be read. +func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.ParseFile(filename) + p.files[filename] = file + return file, diags +} + +// AddFile allows a caller to record in a parser a file that was parsed some +// other way, thus allowing it to be included in the registry of sources. +func (p *Parser) AddFile(filename string, file *hcl.File) { + p.files[filename] = file +} + +// Sources returns a map from filenames to the raw source code that was +// read from them. This is intended to be used, for example, to print +// diagnostics with contextual information. +// +// The arrays underlying the returned slices should not be modified. +func (p *Parser) Sources() map[string][]byte { + ret := make(map[string][]byte) + for fn, f := range p.files { + ret[fn] = f.Bytes + } + return ret +} + +// Files returns a map from filenames to the File objects produced from them. +// This is intended to be used, for example, to print diagnostics with +// contextual information. +// +// The returned map and all of the objects it refers to directly or indirectly +// must not be modified. +func (p *Parser) Files() map[string]*hcl.File { + return p.files +} diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go index bab86c6..86085de 100644 --- a/vendor/github.com/hashicorp/hil/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hil/scanner/scanner.go @@ -395,6 +395,12 @@ func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) { pos.Column = pos.Column + 2 litLen = litLen + 2 continue + } else if follow == '\\' { + // \\ escapes \ + // so we will consume both characters here. + pos.Column = pos.Column + 2 + litLen = litLen + 2 + continue } } } diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go index 5f4e89e..9d80c42 100644 --- a/vendor/github.com/hashicorp/terraform/config/append.go +++ b/vendor/github.com/hashicorp/terraform/config/append.go @@ -82,5 +82,11 @@ func Append(c1, c2 *Config) (*Config, error) { c.Variables = append(c.Variables, c2.Variables...) } + if len(c1.Locals) > 0 || len(c2.Locals) > 0 { + c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) + c.Locals = append(c.Locals, c1.Locals...) + c.Locals = append(c.Locals, c2.Locals...) + } + return c, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go index 3f756dc..1772fd7 100644 --- a/vendor/github.com/hashicorp/terraform/config/config.go +++ b/vendor/github.com/hashicorp/terraform/config/config.go @@ -8,11 +8,11 @@ import ( "strconv" "strings" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hil" + hcl2 "github.com/hashicorp/hcl2/hcl" "github.com/hashicorp/hil/ast" "github.com/hashicorp/terraform/helper/hilmapstructure" "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/reflectwalk" ) @@ -34,6 +34,7 @@ type Config struct { ProviderConfigs []*ProviderConfig Resources []*Resource Variables []*Variable + Locals []*Local Outputs []*Output // The fields below can be filled in by loaders for validation @@ -55,6 +56,8 @@ type AtlasConfig struct { type Module struct { Name string Source string + Version string + Providers map[string]string RawConfig *RawConfig } @@ -147,7 +150,7 @@ func (p *Provisioner) Copy() *Provisioner { } } -// Variable is a variable defined within the configuration. +// Variable is a module argument defined within the configuration. type Variable struct { Name string DeclaredType string `mapstructure:"type"` @@ -155,6 +158,12 @@ type Variable struct { Description string } +// Local is a local value defined within the configuration. +type Local struct { + Name string + RawConfig *RawConfig +} + // Output is an output defined within the configuration. An output is // resulting data that is highlighted by Terraform when finished. An // output marked Sensitive will be output in a masked form following @@ -222,7 +231,10 @@ func (r *Resource) Count() (int, error) { v, err := strconv.ParseInt(count, 0, 0) if err != nil { - return 0, err + return 0, fmt.Errorf( + "cannot parse %q as an integer", + count, + ) } return int(v), nil @@ -253,7 +265,9 @@ func (r *Resource) ProviderFullName() string { // the provider name is inferred from the resource type name. func ResourceProviderFullName(resourceType, explicitProvider string) string { if explicitProvider != "" { - return explicitProvider + // check for an explicit provider name, or return the original + parts := strings.SplitAfter(explicitProvider, "provider.") + return parts[len(parts)-1] } idx := strings.IndexRune(resourceType, '_') @@ -268,30 +282,35 @@ func ResourceProviderFullName(resourceType, explicitProvider string) string { } // Validate does some basic semantic checking of the configuration. -func (c *Config) Validate() error { +func (c *Config) Validate() tfdiags.Diagnostics { if c == nil { return nil } - var errs []error + var diags tfdiags.Diagnostics for _, k := range c.unknownKeys { - errs = append(errs, fmt.Errorf( - "Unknown root level key: %s", k)) + diags = diags.Append( + fmt.Errorf("Unknown root level key: %s", k), + ) } // Validate the Terraform config if tf := c.Terraform; tf != nil { - errs = append(errs, c.Terraform.Validate()...) + errs := c.Terraform.Validate() + for _, err := range errs { + diags = diags.Append(err) + } } vars := c.InterpolatedVariables() varMap := make(map[string]*Variable) for _, v := range c.Variables { if _, ok := varMap[v.Name]; ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Variable '%s': duplicate found. Variable names must be unique.", - v.Name)) + v.Name, + )) } varMap[v.Name] = v @@ -299,17 +318,19 @@ func (c *Config) Validate() error { for k, _ := range varMap { if !NameRegexp.MatchString(k) { - errs = append(errs, fmt.Errorf( - "variable %q: variable name must match regular expresion %s", - k, NameRegexp)) + diags = diags.Append(fmt.Errorf( + "variable %q: variable name must match regular expression %s", + k, NameRegexp, + )) } } for _, v := range c.Variables { if v.Type() == VariableTypeUnknown { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Variable '%s': must be a string or a map", - v.Name)) + v.Name, + )) continue } @@ -330,9 +351,10 @@ func (c *Config) Validate() error { if v.Default != nil { if err := reflectwalk.Walk(v.Default, w); err == nil { if interp { - errs = append(errs, fmt.Errorf( - "Variable '%s': cannot contain interpolations", - v.Name)) + diags = diags.Append(fmt.Errorf( + "variable %q: default may not contain interpolations", + v.Name, + )) } } } @@ -348,10 +370,11 @@ func (c *Config) Validate() error { } if _, ok := varMap[uv.Name]; !ok { - errs = append(errs, fmt.Errorf( - "%s: unknown variable referenced: '%s'. define it with 'variable' blocks", + diags = diags.Append(fmt.Errorf( + "%s: unknown variable referenced: '%s'; define it with a 'variable' block", source, - uv.Name)) + uv.Name, + )) } } } @@ -362,17 +385,19 @@ func (c *Config) Validate() error { switch v := rawV.(type) { case *CountVariable: if v.Type == CountValueInvalid { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: invalid count variable: %s", source, - v.FullKey())) + v.FullKey(), + )) } case *PathVariable: if v.Type == PathValueInvalid { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: invalid path variable: %s", source, - v.FullKey())) + v.FullKey(), + )) } } } @@ -380,27 +405,35 @@ func (c *Config) Validate() error { // Check that providers aren't declared multiple times and that their // version constraints, where present, are syntactically valid. - providerSet := make(map[string]struct{}) + providerSet := make(map[string]bool) for _, p := range c.ProviderConfigs { name := p.FullName() if _, ok := providerSet[name]; ok { - errs = append(errs, fmt.Errorf( - "provider.%s: declared multiple times, you can only declare a provider once", - name)) + diags = diags.Append(fmt.Errorf( + "provider.%s: multiple configurations present; only one configuration is allowed per provider", + name, + )) continue } if p.Version != "" { _, err := discovery.ConstraintStr(p.Version).Parse() if err != nil { - errs = append(errs, fmt.Errorf( - "provider.%s: invalid version constraint %q: %s", - name, p.Version, err, - )) + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid provider version constraint", + Detail: fmt.Sprintf( + "The value %q given for provider.%s is not a valid version constraint.", + p.Version, name, + ), + // TODO: include a "Subject" source reference in here, + // once the config loader is able to retain source + // location information. + }) } } - providerSet[name] = struct{}{} + providerSet[name] = true } // Check that all references to modules are valid @@ -412,9 +445,10 @@ func (c *Config) Validate() error { if _, ok := dupped[m.Id()]; !ok { dupped[m.Id()] = struct{}{} - errs = append(errs, fmt.Errorf( - "%s: module repeated multiple times", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module repeated multiple times", + m.Id(), + )) } // Already seen this module, just skip it @@ -428,21 +462,23 @@ func (c *Config) Validate() error { "root": m.Source, }) if err != nil { - errs = append(errs, fmt.Errorf( - "%s: module source error: %s", - m.Id(), err)) + diags = diags.Append(fmt.Errorf( + "module %q: module source error: %s", + m.Id(), err, + )) } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( - "%s: module source cannot contain interpolations", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module source cannot contain interpolations", + m.Id(), + )) } // Check that the name matches our regexp if !NameRegexp.Match([]byte(m.Name)) { - errs = append(errs, fmt.Errorf( - "%s: module name can only contain letters, numbers, "+ - "dashes, and underscores", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module name must be a letter or underscore followed by only letters, numbers, dashes, and underscores", + m.Id(), + )) } // Check that the configuration can all be strings, lists or maps @@ -466,30 +502,47 @@ func (c *Config) Validate() error { continue } - errs = append(errs, fmt.Errorf( - "%s: variable %s must be a string, list or map value", - m.Id(), k)) + diags = diags.Append(fmt.Errorf( + "module %q: argument %s must have a string, list, or map value", + m.Id(), k, + )) } // Check for invalid count variables for _, v := range m.RawConfig.Variables { switch v.(type) { case *CountVariable: - errs = append(errs, fmt.Errorf( - "%s: count variables are only valid within resources", m.Name)) + diags = diags.Append(fmt.Errorf( + "module %q: count variables are only valid within resources", + m.Name, + )) case *SelfVariable: - errs = append(errs, fmt.Errorf( - "%s: self variables are only valid within resources", m.Name)) + diags = diags.Append(fmt.Errorf( + "module %q: self variables are only valid within resources", + m.Name, + )) } } // Update the raw configuration to only contain the string values m.RawConfig, err = NewRawConfig(raw) if err != nil { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: can't initialize configuration: %s", - m.Id(), err)) + m.Id(), err, + )) } + + // check that all named providers actually exist + for _, p := range m.Providers { + if !providerSet[p] { + diags = diags.Append(fmt.Errorf( + "module %q: cannot pass non-existent provider %q", + m.Name, p, + )) + } + } + } dupped = nil @@ -503,10 +556,10 @@ func (c *Config) Validate() error { } if _, ok := modules[mv.Name]; !ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: unknown module referenced: %s", - source, - mv.Name)) + source, mv.Name, + )) } } } @@ -519,9 +572,10 @@ func (c *Config) Validate() error { if _, ok := dupped[r.Id()]; !ok { dupped[r.Id()] = struct{}{} - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource repeated multiple times", - r.Id())) + r.Id(), + )) } } @@ -535,53 +589,42 @@ func (c *Config) Validate() error { for _, v := range r.RawCount.Variables { switch v.(type) { case *CountVariable: - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource count can't reference count variable: %s", - n, - v.FullKey())) + n, v.FullKey(), + )) case *SimpleVariable: - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource count can't reference variable: %s", - n, - v.FullKey())) + n, v.FullKey(), + )) // Good case *ModuleVariable: case *ResourceVariable: case *TerraformVariable: case *UserVariable: + case *LocalVariable: default: - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Internal error. Unknown type in count var in %s: %T", - n, v)) + n, v, + )) } } - // Interpolate with a fixed number to verify that its a number. - r.RawCount.interpolate(func(root ast.Node) (interface{}, error) { - // Execute the node but transform the AST so that it returns - // a fixed value of "5" for all interpolations. - result, err := hil.Eval( - hil.FixedValueTransform( - root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), - nil) - if err != nil { - return "", err - } - - return result.Value, nil - }) - _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0) - if err != nil { - errs = append(errs, fmt.Errorf( - "%s: resource count must be an integer", - n)) + if !r.RawCount.couldBeInteger() { + diags = diags.Append(fmt.Errorf( + "%s: resource count must be an integer", n, + )) } r.RawCount.init() // Validate DependsOn - errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...) + for _, err := range c.validateDependsOn(n, r.DependsOn, resources, modules) { + diags = diags.Append(err) + } // Verify provisioners for _, p := range r.Provisioners { @@ -595,9 +638,10 @@ func (c *Config) Validate() error { } if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - errs = append(errs, fmt.Errorf( - "%s: connection info cannot contain splat variable "+ - "referencing itself", n)) + diags = diags.Append(fmt.Errorf( + "%s: connection info cannot contain splat variable referencing itself", + n, + )) break } } @@ -609,9 +653,10 @@ func (c *Config) Validate() error { } if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - errs = append(errs, fmt.Errorf( - "%s: connection info cannot contain splat variable "+ - "referencing itself", n)) + diags = diags.Append(fmt.Errorf( + "%s: connection info cannot contain splat variable referencing itself", + n, + )) break } } @@ -619,21 +664,24 @@ func (c *Config) Validate() error { // Check for invalid when/onFailure values, though this should be // picked up by the loader we check here just in case. if p.When == ProvisionerWhenInvalid { - errs = append(errs, fmt.Errorf( - "%s: provisioner 'when' value is invalid", n)) + diags = diags.Append(fmt.Errorf( + "%s: provisioner 'when' value is invalid", n, + )) } if p.OnFailure == ProvisionerOnFailureInvalid { - errs = append(errs, fmt.Errorf( - "%s: provisioner 'on_failure' value is invalid", n)) + diags = diags.Append(fmt.Errorf( + "%s: provisioner 'on_failure' value is invalid", n, + )) } } // Verify ignore_changes contains valid entries for _, v := range r.Lifecycle.IgnoreChanges { if strings.Contains(v, "*") && v != "*" { - errs = append(errs, fmt.Errorf( - "%s: ignore_changes does not support using a partial string "+ - "together with a wildcard: %s", n, v)) + diags = diags.Append(fmt.Errorf( + "%s: ignore_changes does not support using a partial string together with a wildcard: %s", + n, v, + )) } } @@ -642,21 +690,24 @@ func (c *Config) Validate() error { "root": r.Lifecycle.IgnoreChanges, }) if err != nil { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: lifecycle ignore_changes error: %s", - n, err)) + n, err, + )) } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: lifecycle ignore_changes cannot contain interpolations", - n)) + n, + )) } // If it is a data source then it can't have provisioners if r.Mode == DataResourceMode { if _, ok := r.RawConfig.Raw["provisioner"]; ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: data sources cannot have provisioners", - n)) + n, + )) } } } @@ -670,25 +721,50 @@ func (c *Config) Validate() error { id := rv.ResourceId() if _, ok := resources[id]; !ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: unknown resource '%s' referenced in variable %s", source, id, - rv.FullKey())) + rv.FullKey(), + )) continue } } } + // Check that all locals are valid + { + found := make(map[string]struct{}) + for _, l := range c.Locals { + if _, ok := found[l.Name]; ok { + diags = diags.Append(fmt.Errorf( + "%s: duplicate local. local value names must be unique", + l.Name, + )) + continue + } + found[l.Name] = struct{}{} + + for _, v := range l.RawConfig.Variables { + if _, ok := v.(*CountVariable); ok { + diags = diags.Append(fmt.Errorf( + "local %s: count variables are only valid within resources", l.Name, + )) + } + } + } + } + // Check that all outputs are valid { found := make(map[string]struct{}) for _, o := range c.Outputs { // Verify the output is new if _, ok := found[o.Name]; ok { - errs = append(errs, fmt.Errorf( - "%s: duplicate output. output names must be unique.", - o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: an output of this name was already defined", + o.Name, + )) continue } found[o.Name] = struct{}{} @@ -708,9 +784,10 @@ func (c *Config) Validate() error { continue } - errs = append(errs, fmt.Errorf( - "%s: value for 'sensitive' must be boolean", - o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: value for 'sensitive' must be boolean", + o.Name, + )) continue } if k == "description" { @@ -719,27 +796,78 @@ func (c *Config) Validate() error { continue } - errs = append(errs, fmt.Errorf( - "%s: value for 'description' must be string", - o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: value for 'description' must be string", + o.Name, + )) continue } invalidKeys = append(invalidKeys, k) } if len(invalidKeys) > 0 { - errs = append(errs, fmt.Errorf( - "%s: output has invalid keys: %s", - o.Name, strings.Join(invalidKeys, ", "))) + diags = diags.Append(fmt.Errorf( + "output %q: invalid keys: %s", + o.Name, strings.Join(invalidKeys, ", "), + )) } if !valueKeyFound { - errs = append(errs, fmt.Errorf( - "%s: output is missing required 'value' key", o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: missing required 'value' argument", o.Name, + )) } for _, v := range o.RawConfig.Variables { if _, ok := v.(*CountVariable); ok { - errs = append(errs, fmt.Errorf( - "%s: count variables are only valid within resources", o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: count variables are only valid within resources", + o.Name, + )) + } + } + + // Detect a common mistake of using a "count"ed resource in + // an output value without using the splat or index form. + // Prior to 0.11 this error was silently ignored, but outputs + // now have their errors checked like all other contexts. + // + // TODO: Remove this in 0.12. + for _, v := range o.RawConfig.Variables { + rv, ok := v.(*ResourceVariable) + if !ok { + continue + } + + // If the variable seems to be treating the referenced + // resource as a singleton (no count specified) then + // we'll check to make sure it is indeed a singleton. + // It's a warning if not. + + if rv.Multi || rv.Index != 0 { + // This reference is treating the resource as a + // multi-resource, so the warning doesn't apply. + continue + } + + for _, r := range c.Resources { + if r.Id() != rv.ResourceId() { + continue + } + + // We test specifically for the raw string "1" here + // because we _do_ want to generate this warning if + // the user has provided an expression that happens + // to return 1 right now, to catch situations where + // a count might dynamically be set to something + // other than 1 and thus splat syntax is still needed + // to be safe. + if r.RawCount != nil && r.RawCount.Raw != nil && r.RawCount.Raw["count"] != "1" && rv.Field != "count" { + diags = diags.Append(tfdiags.SimpleWarning(fmt.Sprintf( + "output %q: must use splat syntax to access %s attribute %q, because it has \"count\" set; use %s.*.%s to obtain a list of the attributes across all instances", + o.Name, + r.Id(), rv.Field, + r.Id(), rv.Field, + ))) + } } } } @@ -755,17 +883,15 @@ func (c *Config) Validate() error { for _, v := range rc.Variables { if _, ok := v.(*SelfVariable); ok { - errs = append(errs, fmt.Errorf( - "%s: cannot contain self-reference %s", source, v.FullKey())) + diags = diags.Append(fmt.Errorf( + "%s: cannot contain self-reference %s", + source, v.FullKey(), + )) } } } - if len(errs) > 0 { - return &multierror.Error{Errors: errs} - } - - return nil + return diags } // InterpolatedVariables is a helper that returns a mapping of all the interpolated diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go index 0b3abbc..a6933c2 100644 --- a/vendor/github.com/hashicorp/terraform/config/config_string.go +++ b/vendor/github.com/hashicorp/terraform/config/config_string.go @@ -143,6 +143,46 @@ func outputsStr(os []*Output) string { result += fmt.Sprintf(" %s: %s\n", kind, str) } } + + if o.Description != "" { + result += fmt.Sprintf(" description\n %s\n", o.Description) + } + } + + return strings.TrimSpace(result) +} + +func localsStr(ls []*Local) string { + ns := make([]string, 0, len(ls)) + m := make(map[string]*Local) + for _, l := range ls { + ns = append(ns, l.Name) + m[l.Name] = l + } + sort.Strings(ns) + + result := "" + for _, n := range ns { + l := m[n] + + result += fmt.Sprintf("%s\n", n) + + if len(l.RawConfig.Variables) > 0 { + result += fmt.Sprintf(" vars\n") + for _, rawV := range l.RawConfig.Variables { + kind := "unknown" + str := rawV.FullKey() + + switch rawV.(type) { + case *ResourceVariable: + kind = "resource" + case *UserVariable: + kind = "user" + } + + result += fmt.Sprintf(" %s: %s\n", kind, str) + } + } } return strings.TrimSpace(result) diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go new file mode 100644 index 0000000..2b1b0ca --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go @@ -0,0 +1,97 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +var mapLabelNames = []string{"key"} + +// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body +// using the facilities in the hcldec package. +// +// The returned specification is guaranteed to return a value of the same type +// returned by method ImpliedType, but it may contain null or unknown values if +// any of the block attributes are defined as optional and/or computed +// respectively. +func (b *Block) DecoderSpec() hcldec.Spec { + ret := hcldec.ObjectSpec{} + if b == nil { + return ret + } + + for name, attrS := range b.Attributes { + switch { + case attrS.Computed && attrS.Optional: + // In this special case we use an unknown value as a default + // to get the intended behavior that the result is computed + // unless it has been explicitly set in config. + ret[name] = &hcldec.DefaultSpec{ + Primary: &hcldec.AttrSpec{ + Name: name, + Type: attrS.Type, + }, + Default: &hcldec.LiteralSpec{ + Value: cty.UnknownVal(attrS.Type), + }, + } + case attrS.Computed: + ret[name] = &hcldec.LiteralSpec{ + Value: cty.UnknownVal(attrS.Type), + } + default: + ret[name] = &hcldec.AttrSpec{ + Name: name, + Type: attrS.Type, + Required: attrS.Required, + } + } + } + + for name, blockS := range b.BlockTypes { + if _, exists := ret[name]; exists { + // This indicates an invalid schema, since it's not valid to + // define both an attribute and a block type of the same name. + // However, we don't raise this here since it's checked by + // InternalValidate. + continue + } + + childSpec := blockS.Block.DecoderSpec() + + switch blockS.Nesting { + case NestingSingle: + ret[name] = &hcldec.BlockSpec{ + TypeName: name, + Nested: childSpec, + Required: blockS.MinItems == 1 && blockS.MaxItems >= 1, + } + case NestingList: + ret[name] = &hcldec.BlockListSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + case NestingSet: + ret[name] = &hcldec.BlockSetSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + case NestingMap: + ret[name] = &hcldec.BlockMapSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. + continue + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/doc.go b/vendor/github.com/hashicorp/terraform/config/configschema/doc.go new file mode 100644 index 0000000..caf8d73 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/doc.go @@ -0,0 +1,14 @@ +// Package configschema contains types for describing the expected structure +// of a configuration block whose shape is not known until runtime. +// +// For example, this is used to describe the expected contents of a resource +// configuration block, which is defined by the corresponding provider plugin +// and thus not compiled into Terraform core. +// +// A configschema primarily describes the shape of configuration, but it is +// also suitable for use with other structures derived from the configuration, +// such as the cached state of a resource or a resource diff. +// +// This package should not be confused with the package helper/schema, which +// is the higher-level helper library used to implement providers themselves. +package configschema diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go new file mode 100644 index 0000000..67324eb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go @@ -0,0 +1,21 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. Code that creates configschema.Block objects should be +// tested using the InternalValidate method to detect any inconsistencies +// that would cause this method to fall back on defaults and assumptions. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + return hcldec.ImpliedType(b.DecoderSpec()) +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go new file mode 100644 index 0000000..33cbe88 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go @@ -0,0 +1,92 @@ +package configschema + +import ( + "fmt" + "regexp" + + "github.com/zclconf/go-cty/cty" + + multierror "github.com/hashicorp/go-multierror" +) + +var validName = regexp.MustCompile(`^[a-z0-9_]+$`) + +// InternalValidate returns an error if the receiving block and its child +// schema definitions have any consistencies with the documented rules for +// valid schema. +// +// This is intended to be used within unit tests to detect when a given +// schema is invalid. +func (b *Block) InternalValidate() error { + if b == nil { + return fmt.Errorf("top-level block schema is nil") + } + return b.internalValidate("", nil) + +} + +func (b *Block) internalValidate(prefix string, err error) error { + for name, attrS := range b.Attributes { + if attrS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) + continue + } + if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { + err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) + } + if attrS.Optional && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) + } + if attrS.Computed && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) + } + if attrS.Type == cty.NilType { + err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) + } + } + + for name, blockS := range b.BlockTypes { + if blockS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) + continue + } + + if _, isAttr := b.Attributes[name]; isAttr { + err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) + } else if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + + if blockS.MinItems < 0 || blockS.MaxItems < 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) + } + + switch blockS.Nesting { + case NestingSingle: + switch { + case blockS.MinItems != blockS.MaxItems: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) + case blockS.MinItems < 0 || blockS.MinItems > 1: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) + } + case NestingList, NestingSet: + if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) + } + case NestingMap: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) + } + default: + err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) + } + + subPrefix := prefix + name + "." + err = blockS.Block.internalValidate(subPrefix, err) + } + + return err +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go new file mode 100644 index 0000000..6cb9313 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. + +package configschema + +import "strconv" + +const _NestingMode_name = "nestingModeInvalidNestingSingleNestingListNestingSetNestingMap" + +var _NestingMode_index = [...]uint8{0, 18, 31, 42, 52, 62} + +func (i NestingMode) String() string { + if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { + return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/schema.go b/vendor/github.com/hashicorp/terraform/config/configschema/schema.go new file mode 100644 index 0000000..9a8ee55 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/schema.go @@ -0,0 +1,107 @@ +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Block represents a configuration block. +// +// "Block" here is a logical grouping construct, though it happens to map +// directly onto the physical block syntax of Terraform's native configuration +// syntax. It may be a more a matter of convention in other syntaxes, such as +// JSON. +// +// When converted to a value, a Block always becomes an instance of an object +// type derived from its defined attributes and nested blocks +type Block struct { + // Attributes describes any attributes that may appear directly inside + // the block. + Attributes map[string]*Attribute + + // BlockTypes describes any nested block types that may appear directly + // inside the block. + BlockTypes map[string]*NestedBlock +} + +// Attribute represents a configuration attribute, within a block. +type Attribute struct { + // Type is a type specification that the attribute's value must conform to. + Type cty.Type + + // Required, if set to true, specifies that an omitted or null value is + // not permitted. + Required bool + + // Optional, if set to true, specifies that an omitted or null value is + // permitted. This field conflicts with Required. + Optional bool + + // Computed, if set to true, specifies that the value comes from the + // provider rather than from configuration. If combined with Optional, + // then the config may optionally provide an overridden value. + Computed bool + + // Sensitive, if set to true, indicates that an attribute may contain + // sensitive information. + // + // At present nothing is done with this information, but callers are + // encouraged to set it where appropriate so that it may be used in the + // future to help Terraform mask sensitive information. (Terraform + // currently achieves this in a limited sense via other mechanisms.) + Sensitive bool +} + +// NestedBlock represents the embedding of one block within another. +type NestedBlock struct { + // Block is the description of the block that's nested. + Block + + // Nesting provides the nesting mode for the child block, which determines + // how many instances of the block are allowed, how many labels it expects, + // and how the resulting data will be converted into a data structure. + Nesting NestingMode + + // MinItems and MaxItems set, for the NestingList and NestingSet nesting + // modes, lower and upper limits on the number of child blocks allowed + // of the given type. If both are left at zero, no limit is applied. + // + // As a special case, both values can be set to 1 for NestingSingle in + // order to indicate that a particular single block is required. + // + // These fields are ignored for other nesting modes and must both be left + // at zero. + MinItems, MaxItems int +} + +// NestingMode is an enumeration of modes for nesting blocks inside other +// blocks. +type NestingMode int + +//go:generate stringer -type=NestingMode + +const ( + nestingModeInvalid NestingMode = iota + + // NestingSingle indicates that only a single instance of a given + // block type is permitted, with no labels, and its content should be + // provided directly as an object value. + NestingSingle + + // NestingList indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a list. + NestingList + + // NestingSet indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a set. + NestingSet + + // NestingMap indicates that multiple blocks of the given type are + // permitted, each with a single label, and that their corresponding + // objects should be provided in a map whose keys are the labels. + // + // It's an error, therefore, to use the same label value on multiple + // blocks. + NestingMap +) diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go b/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go new file mode 100644 index 0000000..207d105 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go @@ -0,0 +1,134 @@ +package config + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/function/stdlib" + + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/terraform/config/hcl2shim" + + hcl2 "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// --------------------------------------------------------------------------- +// This file contains some helper functions that are used to shim between +// HCL2 concepts and HCL/HIL concepts, to help us mostly preserve the existing +// public API that was built around HCL/HIL-oriented approaches. +// --------------------------------------------------------------------------- + +func hcl2InterpolationFuncs() map[string]function.Function { + hcl2Funcs := map[string]function.Function{} + + for name, hilFunc := range Funcs() { + hcl2Funcs[name] = hcl2InterpolationFuncShim(hilFunc) + } + + // Some functions in the old world are dealt with inside langEvalConfig + // due to their legacy reliance on direct access to the symbol table. + // Since 0.7 they don't actually need it anymore and just ignore it, + // so we're cheating a bit here and exploiting that detail by passing nil. + hcl2Funcs["lookup"] = hcl2InterpolationFuncShim(interpolationFuncLookup(nil)) + hcl2Funcs["keys"] = hcl2InterpolationFuncShim(interpolationFuncKeys(nil)) + hcl2Funcs["values"] = hcl2InterpolationFuncShim(interpolationFuncValues(nil)) + + // As a bonus, we'll provide the JSON-handling functions from the cty + // function library since its "jsonencode" is more complete (doesn't force + // weird type conversions) and HIL's type system can't represent + // "jsondecode" at all. The result of jsondecode will eventually be forced + // to conform to the HIL type system on exit into the rest of Terraform due + // to our shimming right now, but it should be usable for decoding _within_ + // an expression. + hcl2Funcs["jsonencode"] = stdlib.JSONEncodeFunc + hcl2Funcs["jsondecode"] = stdlib.JSONDecodeFunc + + return hcl2Funcs +} + +func hcl2InterpolationFuncShim(hilFunc ast.Function) function.Function { + spec := &function.Spec{} + + for i, hilArgType := range hilFunc.ArgTypes { + spec.Params = append(spec.Params, function.Parameter{ + Type: hcl2shim.HCL2TypeForHILType(hilArgType), + Name: fmt.Sprintf("arg%d", i+1), // HIL args don't have names, so we'll fudge it + }) + } + + if hilFunc.Variadic { + spec.VarParam = &function.Parameter{ + Type: hcl2shim.HCL2TypeForHILType(hilFunc.VariadicType), + Name: "varargs", // HIL args don't have names, so we'll fudge it + } + } + + spec.Type = func(args []cty.Value) (cty.Type, error) { + return hcl2shim.HCL2TypeForHILType(hilFunc.ReturnType), nil + } + spec.Impl = func(args []cty.Value, retType cty.Type) (cty.Value, error) { + hilArgs := make([]interface{}, len(args)) + for i, arg := range args { + hilV := hcl2shim.HILVariableFromHCL2Value(arg) + + // Although the cty function system does automatic type conversions + // to match the argument types, cty doesn't distinguish int and + // float and so we may need to adjust here to ensure that the + // wrapped function gets exactly the Go type it was expecting. + var wantType ast.Type + if i < len(hilFunc.ArgTypes) { + wantType = hilFunc.ArgTypes[i] + } else { + wantType = hilFunc.VariadicType + } + switch { + case hilV.Type == ast.TypeInt && wantType == ast.TypeFloat: + hilV.Type = wantType + hilV.Value = float64(hilV.Value.(int)) + case hilV.Type == ast.TypeFloat && wantType == ast.TypeInt: + hilV.Type = wantType + hilV.Value = int(hilV.Value.(float64)) + } + + // HIL functions actually expect to have the outermost variable + // "peeled" but any nested values (in lists or maps) will + // still have their ast.Variable wrapping. + hilArgs[i] = hilV.Value + } + + hilResult, err := hilFunc.Callback(hilArgs) + if err != nil { + return cty.DynamicVal, err + } + + // Just as on the way in, we get back a partially-peeled ast.Variable + // which we need to re-wrap in order to convert it back into what + // we're calling a "config value". + rv := hcl2shim.HCL2ValueFromHILVariable(ast.Variable{ + Type: hilFunc.ReturnType, + Value: hilResult, + }) + + return convert.Convert(rv, retType) // if result is unknown we'll force the correct type here + } + return function.New(spec) +} + +func hcl2EvalWithUnknownVars(expr hcl2.Expression) (cty.Value, hcl2.Diagnostics) { + trs := expr.Variables() + vars := map[string]cty.Value{} + val := cty.DynamicVal + + for _, tr := range trs { + name := tr.RootName() + vars[name] = val + } + + ctx := &hcl2.EvalContext{ + Variables: vars, + Functions: hcl2InterpolationFuncs(), + } + return expr.Value(ctx) +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go new file mode 100644 index 0000000..19651c8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go @@ -0,0 +1,85 @@ +package hcl2shim + +import ( + "fmt" + + hcl2 "github.com/hashicorp/hcl2/hcl" +) + +// SingleAttrBody is a weird implementation of hcl2.Body that acts as if +// it has a single attribute whose value is the given expression. +// +// This is used to shim Resource.RawCount and Output.RawConfig to behave +// more like they do in the old HCL loader. +type SingleAttrBody struct { + Name string + Expr hcl2.Expression +} + +var _ hcl2.Body = SingleAttrBody{} + +func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + if !all { + // This should never happen because this body implementation should only + // be used by code that is aware that it's using a single-attr body. + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid attribute", + Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + return content, diags +} + +func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + var remain hcl2.Body + if all { + // If the request matched the one attribute we represent, then the + // remaining body is empty. + remain = hcl2.EmptyBody() + } else { + remain = b + } + return content, remain, diags +} + +func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { + ret := &hcl2.BodyContent{} + all := false + var diags hcl2.Diagnostics + + for _, attrS := range schema.Attributes { + if attrS.Name == b.Name { + attrs, _ := b.JustAttributes() + ret.Attributes = attrs + all = true + } else if attrS.Required { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Missing attribute", + Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + } + + return ret, all, diags +} + +func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { + return hcl2.Attributes{ + b.Name: { + Expr: b.Expr, + Name: b.Name, + NameRange: b.Expr.Range(), + Range: b.Expr.Range(), + }, + }, nil +} + +func (b SingleAttrBody) MissingItemRange() hcl2.Range { + return b.Expr.Range() +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go new file mode 100644 index 0000000..0b697a5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go @@ -0,0 +1,246 @@ +package hcl2shim + +import ( + "fmt" + "math/big" + + "github.com/hashicorp/hil/ast" + "github.com/zclconf/go-cty/cty" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a value type that matches what would've +// been produced from the HCL-based interpolator for an equivalent structure. +// +// This function will transform a cty null value into a Go nil value, which +// isn't a possible outcome of the HCL/HIL-based decoder and so callers may +// need to detect and reject any null values. +func ConfigValueFromHCL2(v cty.Value) interface{} { + if !v.IsKnown() { + return UnknownVariableValue + } + if v.IsNull() { + return nil + } + + switch v.Type() { + case cty.Bool: + return v.True() // like HCL.BOOL + case cty.String: + return v.AsString() // like HCL token.STRING or token.HEREDOC + case cty.Number: + // We can't match HCL _exactly_ here because it distinguishes between + // int and float values, but we'll get as close as we can by using + // an int if the number is exactly representable, and a float if not. + // The conversion to float will force precision to that of a float64, + // which is potentially losing information from the specific number + // given, but no worse than what HCL would've done in its own conversion + // to float. + + f := v.AsBigFloat() + if i, acc := f.Int64(); acc == big.Exact { + // if we're on a 32-bit system and the number is too big for 32-bit + // int then we'll fall through here and use a float64. + const MaxInt = int(^uint(0) >> 1) + const MinInt = -MaxInt - 1 + if i <= int64(MaxInt) && i >= int64(MinInt) { + return int(i) // Like HCL token.NUMBER + } + } + + f64, _ := f.Float64() + return f64 // like HCL token.FLOAT + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]interface{}, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, ConfigValueFromHCL2(ev)) + } + return l + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]interface{}) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + l[ek.AsString()] = ConfigValueFromHCL2(ev) + } + return l + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to config value", v)) +} + +// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes +// a value as would be returned from the old interpolator and turns it into +// a cty.Value so it can be used within, for example, an HCL2 EvalContext. +func HCL2ValueFromConfigValue(v interface{}) cty.Value { + if v == nil { + return cty.NullVal(cty.DynamicPseudoType) + } + if v == UnknownVariableValue { + return cty.DynamicVal + } + + switch tv := v.(type) { + case bool: + return cty.BoolVal(tv) + case string: + return cty.StringVal(tv) + case int: + return cty.NumberIntVal(int64(tv)) + case float64: + return cty.NumberFloatVal(tv) + case []interface{}: + vals := make([]cty.Value, len(tv)) + for i, ev := range tv { + vals[i] = HCL2ValueFromConfigValue(ev) + } + return cty.TupleVal(vals) + case map[string]interface{}: + vals := map[string]cty.Value{} + for k, ev := range tv { + vals[k] = HCL2ValueFromConfigValue(ev) + } + return cty.ObjectVal(vals) + default: + // HCL/HIL should never generate anything that isn't caught by + // the above, so if we get here something has gone very wrong. + panic(fmt.Errorf("can't convert %#v to cty.Value", v)) + } +} + +func HILVariableFromHCL2Value(v cty.Value) ast.Variable { + if v.IsNull() { + // Caller should guarantee/check this before calling + panic("Null values cannot be represented in HIL") + } + if !v.IsKnown() { + return ast.Variable{ + Type: ast.TypeUnknown, + Value: UnknownVariableValue, + } + } + + switch v.Type() { + case cty.Bool: + return ast.Variable{ + Type: ast.TypeBool, + Value: v.True(), + } + case cty.Number: + v := ConfigValueFromHCL2(v) + switch tv := v.(type) { + case int: + return ast.Variable{ + Type: ast.TypeInt, + Value: tv, + } + case float64: + return ast.Variable{ + Type: ast.TypeFloat, + Value: tv, + } + default: + // should never happen + panic("invalid return value for configValueFromHCL2") + } + case cty.String: + return ast.Variable{ + Type: ast.TypeString, + Value: v.AsString(), + } + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]ast.Variable, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, HILVariableFromHCL2Value(ev)) + } + // If we were given a tuple then this could actually produce an invalid + // list with non-homogenous types, which we expect to be caught inside + // HIL just like a user-supplied non-homogenous list would be. + return ast.Variable{ + Type: ast.TypeList, + Value: l, + } + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]ast.Variable) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + l[ek.AsString()] = HILVariableFromHCL2Value(ev) + } + // If we were given an object then this could actually produce an invalid + // map with non-homogenous types, which we expect to be caught inside + // HIL just like a user-supplied non-homogenous map would be. + return ast.Variable{ + Type: ast.TypeMap, + Value: l, + } + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to HIL variable", v)) +} + +func HCL2ValueFromHILVariable(v ast.Variable) cty.Value { + switch v.Type { + case ast.TypeList: + vals := make([]cty.Value, len(v.Value.([]ast.Variable))) + for i, ev := range v.Value.([]ast.Variable) { + vals[i] = HCL2ValueFromHILVariable(ev) + } + return cty.TupleVal(vals) + case ast.TypeMap: + vals := make(map[string]cty.Value, len(v.Value.(map[string]ast.Variable))) + for k, ev := range v.Value.(map[string]ast.Variable) { + vals[k] = HCL2ValueFromHILVariable(ev) + } + return cty.ObjectVal(vals) + default: + return HCL2ValueFromConfigValue(v.Value) + } +} + +func HCL2TypeForHILType(hilType ast.Type) cty.Type { + switch hilType { + case ast.TypeAny: + return cty.DynamicPseudoType + case ast.TypeUnknown: + return cty.DynamicPseudoType + case ast.TypeBool: + return cty.Bool + case ast.TypeInt: + return cty.Number + case ast.TypeFloat: + return cty.Number + case ast.TypeString: + return cty.String + case ast.TypeList: + return cty.List(cty.DynamicPseudoType) + case ast.TypeMap: + return cty.Map(cty.DynamicPseudoType) + default: + return cty.NilType // equilvalent to ast.TypeInvalid + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go index 37ec11a..08cbc77 100644 --- a/vendor/github.com/hashicorp/terraform/config/import_tree.go +++ b/vendor/github.com/hashicorp/terraform/config/import_tree.go @@ -1,8 +1,12 @@ package config import ( + "bufio" "fmt" "io" + "os" + + "github.com/hashicorp/errwrap" ) // configurable is an interface that must be implemented by any configuration @@ -27,15 +31,52 @@ type importTree struct { // imports. type fileLoaderFunc func(path string) (configurable, []string, error) +// Set this to a non-empty value at link time to enable the HCL2 experiment. +// This is not currently enabled for release builds. +// +// For example: +// go install -ldflags="-X github.com/hashicorp/terraform/config.enableHCL2Experiment=true" github.com/hashicorp/terraform +var enableHCL2Experiment = "" + // loadTree takes a single file and loads the entire importTree for that // file. This function detects what kind of configuration file it is an // executes the proper fileLoaderFunc. func loadTree(root string) (*importTree, error) { var f fileLoaderFunc - switch ext(root) { - case ".tf", ".tf.json": - f = loadFileHcl - default: + + // HCL2 experiment is currently activated at build time via the linker. + // See the comment on this variable for more information. + if enableHCL2Experiment == "" { + // Main-line behavior: always use the original HCL parser + switch ext(root) { + case ".tf", ".tf.json": + f = loadFileHcl + default: + } + } else { + // Experimental behavior: use the HCL2 parser if the opt-in comment + // is present. + switch ext(root) { + case ".tf": + // We need to sniff the file for the opt-in comment line to decide + // if the file is participating in the HCL2 experiment. + cf, err := os.Open(root) + if err != nil { + return nil, err + } + sc := bufio.NewScanner(cf) + for sc.Scan() { + if sc.Text() == "#terraform:hcl2" { + f = globalHCL2Loader.loadFile + } + } + if f == nil { + f = loadFileHcl + } + case ".tf.json": + f = loadFileHcl + default: + } } if f == nil { @@ -86,10 +127,7 @@ func (t *importTree) Close() error { func (t *importTree) ConfigTree() (*configTree, error) { config, err := t.Raw.Config() if err != nil { - return nil, fmt.Errorf( - "Error loading %s: %s", - t.Path, - err) + return nil, errwrap.Wrapf(fmt.Sprintf("Error loading %s: {{err}}", t.Path), err) } // Build our result diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go index bbb3555..599e5ec 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate.go @@ -5,6 +5,8 @@ import ( "strconv" "strings" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/hil/ast" ) @@ -14,6 +16,21 @@ import ( // variables can come from: user variables, resources, etc. type InterpolatedVariable interface { FullKey() string + SourceRange() tfdiags.SourceRange +} + +// varRange can be embedded into an InterpolatedVariable implementation to +// implement the SourceRange method. +type varRange struct { + rng tfdiags.SourceRange +} + +func (r varRange) SourceRange() tfdiags.SourceRange { + return r.rng +} + +func makeVarRange(rng tfdiags.SourceRange) varRange { + return varRange{rng} } // CountVariable is a variable for referencing information about @@ -21,6 +38,7 @@ type InterpolatedVariable interface { type CountVariable struct { Type CountValueType key string + varRange } // CountValueType is the type of the count variable that is referenced. @@ -37,6 +55,7 @@ type ModuleVariable struct { Name string Field string key string + varRange } // A PathVariable is a variable that references path information about the @@ -44,6 +63,7 @@ type ModuleVariable struct { type PathVariable struct { Type PathValueType key string + varRange } type PathValueType byte @@ -67,6 +87,7 @@ type ResourceVariable struct { Index int // Index for multi-variable: aws_instance.foo.1.id == 1 key string + varRange } // SelfVariable is a variable that is referencing the same resource @@ -75,6 +96,7 @@ type SelfVariable struct { Field string key string + varRange } // SimpleVariable is an unprefixed variable, which can show up when users have @@ -82,6 +104,7 @@ type SelfVariable struct { // internally. The template_file resource is an example of this. type SimpleVariable struct { Key string + varRange } // TerraformVariable is a "terraform."-prefixed variable used to access @@ -89,6 +112,7 @@ type SimpleVariable struct { type TerraformVariable struct { Field string key string + varRange } // A UserVariable is a variable that is referencing a user variable @@ -99,6 +123,14 @@ type UserVariable struct { Elem string key string + varRange +} + +// A LocalVariable is a variable that references a local value defined within +// the current module, via a "locals" block. This looks like "${local.foo}". +type LocalVariable struct { + Name string + varRange } func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { @@ -112,6 +144,8 @@ func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { return NewTerraformVariable(v) } else if strings.HasPrefix(v, "var.") { return NewUserVariable(v) + } else if strings.HasPrefix(v, "local.") { + return NewLocalVariable(v) } else if strings.HasPrefix(v, "module.") { return NewModuleVariable(v) } else if !strings.ContainsRune(v, '.') { @@ -276,7 +310,7 @@ func (v *SelfVariable) GoString() string { } func NewSimpleVariable(key string) (*SimpleVariable, error) { - return &SimpleVariable{key}, nil + return &SimpleVariable{Key: key}, nil } func (v *SimpleVariable) FullKey() string { @@ -331,6 +365,25 @@ func (v *UserVariable) GoString() string { return fmt.Sprintf("*%#v", *v) } +func NewLocalVariable(key string) (*LocalVariable, error) { + name := key[len("local."):] + if idx := strings.Index(name, "."); idx > -1 { + return nil, fmt.Errorf("Can't use dot (.) attribute access in local.%s; use square bracket indexing", name) + } + + return &LocalVariable{ + Name: name, + }, nil +} + +func (v *LocalVariable) FullKey() string { + return fmt.Sprintf("local.%s", v.Name) +} + +func (v *LocalVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + // DetectVariables takes an AST root and returns all the interpolated // variables that are detected in the AST tree. func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) { diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go index a298cf2..421edb0 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go @@ -1,17 +1,23 @@ package config import ( + "bytes" + "compress/gzip" "crypto/md5" + "crypto/rsa" "crypto/sha1" "crypto/sha256" "crypto/sha512" + "crypto/x509" "encoding/base64" "encoding/hex" "encoding/json" + "encoding/pem" "fmt" "io/ioutil" "math" "net" + "net/url" "path/filepath" "regexp" "sort" @@ -55,59 +61,74 @@ func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { // Funcs is the mapping of built-in functions for configuration. func Funcs() map[string]ast.Function { return map[string]ast.Function{ - "basename": interpolationFuncBasename(), - "base64decode": interpolationFuncBase64Decode(), - "base64encode": interpolationFuncBase64Encode(), - "base64sha256": interpolationFuncBase64Sha256(), - "base64sha512": interpolationFuncBase64Sha512(), - "bcrypt": interpolationFuncBcrypt(), - "ceil": interpolationFuncCeil(), - "chomp": interpolationFuncChomp(), - "cidrhost": interpolationFuncCidrHost(), - "cidrnetmask": interpolationFuncCidrNetmask(), - "cidrsubnet": interpolationFuncCidrSubnet(), - "coalesce": interpolationFuncCoalesce(), - "coalescelist": interpolationFuncCoalesceList(), - "compact": interpolationFuncCompact(), - "concat": interpolationFuncConcat(), - "contains": interpolationFuncContains(), - "dirname": interpolationFuncDirname(), - "distinct": interpolationFuncDistinct(), - "element": interpolationFuncElement(), - "file": interpolationFuncFile(), - "matchkeys": interpolationFuncMatchKeys(), - "floor": interpolationFuncFloor(), - "format": interpolationFuncFormat(), - "formatlist": interpolationFuncFormatList(), - "index": interpolationFuncIndex(), - "join": interpolationFuncJoin(), - "jsonencode": interpolationFuncJSONEncode(), - "length": interpolationFuncLength(), - "list": interpolationFuncList(), - "log": interpolationFuncLog(), - "lower": interpolationFuncLower(), - "map": interpolationFuncMap(), - "max": interpolationFuncMax(), - "md5": interpolationFuncMd5(), - "merge": interpolationFuncMerge(), - "min": interpolationFuncMin(), - "pathexpand": interpolationFuncPathExpand(), - "pow": interpolationFuncPow(), - "uuid": interpolationFuncUUID(), - "replace": interpolationFuncReplace(), - "sha1": interpolationFuncSha1(), - "sha256": interpolationFuncSha256(), - "sha512": interpolationFuncSha512(), - "signum": interpolationFuncSignum(), - "slice": interpolationFuncSlice(), - "sort": interpolationFuncSort(), - "split": interpolationFuncSplit(), - "substr": interpolationFuncSubstr(), - "timestamp": interpolationFuncTimestamp(), - "title": interpolationFuncTitle(), - "trimspace": interpolationFuncTrimSpace(), - "upper": interpolationFuncUpper(), - "zipmap": interpolationFuncZipMap(), + "abs": interpolationFuncAbs(), + "basename": interpolationFuncBasename(), + "base64decode": interpolationFuncBase64Decode(), + "base64encode": interpolationFuncBase64Encode(), + "base64gzip": interpolationFuncBase64Gzip(), + "base64sha256": interpolationFuncBase64Sha256(), + "base64sha512": interpolationFuncBase64Sha512(), + "bcrypt": interpolationFuncBcrypt(), + "ceil": interpolationFuncCeil(), + "chomp": interpolationFuncChomp(), + "cidrhost": interpolationFuncCidrHost(), + "cidrnetmask": interpolationFuncCidrNetmask(), + "cidrsubnet": interpolationFuncCidrSubnet(), + "coalesce": interpolationFuncCoalesce(), + "coalescelist": interpolationFuncCoalesceList(), + "compact": interpolationFuncCompact(), + "concat": interpolationFuncConcat(), + "contains": interpolationFuncContains(), + "dirname": interpolationFuncDirname(), + "distinct": interpolationFuncDistinct(), + "element": interpolationFuncElement(), + "chunklist": interpolationFuncChunklist(), + "file": interpolationFuncFile(), + "filebase64sha256": interpolationFuncMakeFileHash(interpolationFuncBase64Sha256()), + "filebase64sha512": interpolationFuncMakeFileHash(interpolationFuncBase64Sha512()), + "filemd5": interpolationFuncMakeFileHash(interpolationFuncMd5()), + "filesha1": interpolationFuncMakeFileHash(interpolationFuncSha1()), + "filesha256": interpolationFuncMakeFileHash(interpolationFuncSha256()), + "filesha512": interpolationFuncMakeFileHash(interpolationFuncSha512()), + "matchkeys": interpolationFuncMatchKeys(), + "flatten": interpolationFuncFlatten(), + "floor": interpolationFuncFloor(), + "format": interpolationFuncFormat(), + "formatlist": interpolationFuncFormatList(), + "indent": interpolationFuncIndent(), + "index": interpolationFuncIndex(), + "join": interpolationFuncJoin(), + "jsonencode": interpolationFuncJSONEncode(), + "length": interpolationFuncLength(), + "list": interpolationFuncList(), + "log": interpolationFuncLog(), + "lower": interpolationFuncLower(), + "map": interpolationFuncMap(), + "max": interpolationFuncMax(), + "md5": interpolationFuncMd5(), + "merge": interpolationFuncMerge(), + "min": interpolationFuncMin(), + "pathexpand": interpolationFuncPathExpand(), + "pow": interpolationFuncPow(), + "uuid": interpolationFuncUUID(), + "replace": interpolationFuncReplace(), + "rsadecrypt": interpolationFuncRsaDecrypt(), + "sha1": interpolationFuncSha1(), + "sha256": interpolationFuncSha256(), + "sha512": interpolationFuncSha512(), + "signum": interpolationFuncSignum(), + "slice": interpolationFuncSlice(), + "sort": interpolationFuncSort(), + "split": interpolationFuncSplit(), + "substr": interpolationFuncSubstr(), + "timestamp": interpolationFuncTimestamp(), + "timeadd": interpolationFuncTimeAdd(), + "title": interpolationFuncTitle(), + "transpose": interpolationFuncTranspose(), + "trimspace": interpolationFuncTrimSpace(), + "upper": interpolationFuncUpper(), + "urlencode": interpolationFuncURLEncode(), + "zipmap": interpolationFuncZipMap(), } } @@ -669,6 +690,21 @@ func interpolationFuncFormatList() ast.Function { } } +// interpolationFuncIndent indents a multi-line string with the +// specified number of spaces +func interpolationFuncIndent() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + spaces := args[0].(int) + data := args[1].(string) + pad := strings.Repeat(" ", spaces) + return strings.Replace(data, "\n", "\n"+pad, -1), nil + }, + } +} + // interpolationFuncIndex implements the "index" function that allows one to // find the index of a specific element in a list func interpolationFuncIndex() ast.Function { @@ -823,8 +859,7 @@ func interpolationFuncJoin() ast.Function { } // interpolationFuncJSONEncode implements the "jsonencode" function that encodes -// a string, list, or map as its JSON representation. For now, values in the -// list or map may only be strings. +// a string, list, or map as its JSON representation. func interpolationFuncJSONEncode() ast.Function { return ast.Function{ ArgTypes: []ast.Type{ast.TypeAny}, @@ -837,28 +872,36 @@ func interpolationFuncJSONEncode() ast.Function { toEncode = typedArg case []ast.Variable: - // We preallocate the list here. Note that it's important that in - // the length 0 case, we have an empty list rather than nil, as - // they encode differently. - // XXX It would be nice to support arbitrarily nested data here. Is - // there an inverse of hil.InterfaceToVariable? strings := make([]string, len(typedArg)) for i, v := range typedArg { if v.Type != ast.TypeString { - return "", fmt.Errorf("list elements must be strings") + variable, _ := hil.InterfaceToVariable(typedArg) + toEncode, _ = hil.VariableToInterface(variable) + + jEnc, err := json.Marshal(toEncode) + if err != nil { + return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode) + } + return string(jEnc), nil + } strings[i] = v.Value.(string) } toEncode = strings case map[string]ast.Variable: - // XXX It would be nice to support arbitrarily nested data here. Is - // there an inverse of hil.InterfaceToVariable? stringMap := make(map[string]string) for k, v := range typedArg { if v.Type != ast.TypeString { - return "", fmt.Errorf("map values must be strings") + variable, _ := hil.InterfaceToVariable(typedArg) + toEncode, _ = hil.VariableToInterface(variable) + + jEnc, err := json.Marshal(toEncode) + if err != nil { + return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode) + } + return string(jEnc), nil } stringMap[k] = v.Value.(string) } @@ -1098,6 +1141,56 @@ func interpolationFuncElement() ast.Function { } } +// returns the `list` items chunked by `size`. +func interpolationFuncChunklist() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeList, // inputList + ast.TypeInt, // size + }, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + output := make([]ast.Variable, 0) + + values, _ := args[0].([]ast.Variable) + size, _ := args[1].(int) + + // errors if size is negative + if size < 0 { + return nil, fmt.Errorf("The size argument must be positive") + } + + // if size is 0, returns a list made of the initial list + if size == 0 { + output = append(output, ast.Variable{ + Type: ast.TypeList, + Value: values, + }) + return output, nil + } + + variables := make([]ast.Variable, 0) + chunk := ast.Variable{ + Type: ast.TypeList, + Value: variables, + } + l := len(values) + for i, v := range values { + variables = append(variables, v) + + // Chunk when index isn't 0, or when reaching the values's length + if (i+1)%size == 0 || (i+1) == l { + chunk.Value = variables + output = append(output, chunk) + variables = make([]ast.Variable, 0) + } + } + + return output, nil + }, + } +} + // interpolationFuncKeys implements the "keys" function that yields a list of // keys of map types within a Terraform configuration. func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function { @@ -1197,6 +1290,32 @@ func interpolationFuncBase64Decode() ast.Function { } } +// interpolationFuncBase64Gzip implements the "gzip" function that allows gzip +// compression encoding the result using base64 +func interpolationFuncBase64Gzip() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(s)); err != nil { + return "", fmt.Errorf("failed to write gzip raw data: '%s'", s) + } + if err := gz.Flush(); err != nil { + return "", fmt.Errorf("failed to flush gzip writer: '%s'", s) + } + if err := gz.Close(); err != nil { + return "", fmt.Errorf("failed to close gzip writer: '%s'", s) + } + + return base64.StdEncoding.EncodeToString(b.Bytes()), nil + }, + } +} + // interpolationFuncLower implements the "lower" function that does // string lower casing. func interpolationFuncLower() ast.Function { @@ -1396,6 +1515,29 @@ func interpolationFuncTimestamp() ast.Function { } } +func interpolationFuncTimeAdd() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // input timestamp string in RFC3339 format + ast.TypeString, // duration to add to input timestamp that should be parsable by time.ParseDuration + }, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + + ts, err := time.Parse(time.RFC3339, args[0].(string)) + if err != nil { + return nil, err + } + duration, err := time.ParseDuration(args[1].(string)) + if err != nil { + return nil, err + } + + return ts.Add(duration).Format(time.RFC3339), nil + }, + } +} + // interpolationFuncTitle implements the "title" function that returns a copy of the // string in which first characters of all the words are capitalized. func interpolationFuncTitle() ast.Function { @@ -1441,7 +1583,7 @@ func interpolationFuncSubstr() ast.Function { return nil, fmt.Errorf("length should be a non-negative integer") } - if offset > len(str) { + if offset > len(str) || offset < 0 { return nil, fmt.Errorf("offset cannot be larger than the length of the string") } @@ -1453,3 +1595,160 @@ func interpolationFuncSubstr() ast.Function { }, } } + +// Flatten until it's not ast.TypeList +func flattener(finalList []ast.Variable, flattenList []ast.Variable) []ast.Variable { + for _, val := range flattenList { + if val.Type == ast.TypeList { + finalList = flattener(finalList, val.Value.([]ast.Variable)) + } else { + finalList = append(finalList, val) + } + } + return finalList +} + +// Flatten to single list +func interpolationFuncFlatten() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + + var outputList []ast.Variable + return flattener(outputList, inputList), nil + }, + } +} + +func interpolationFuncURLEncode() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + return url.QueryEscape(s), nil + }, + } +} + +// interpolationFuncTranspose implements the "transpose" function +// that converts a map (string,list) to a map (string,list) where +// the unique values of the original lists become the keys of the +// new map and the keys of the original map become values for the +// corresponding new keys. +func interpolationFuncTranspose() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeMap, + Callback: func(args []interface{}) (interface{}, error) { + + inputMap := args[0].(map[string]ast.Variable) + outputMap := make(map[string]ast.Variable) + tmpMap := make(map[string][]string) + + for inKey, inVal := range inputMap { + if inVal.Type != ast.TypeList { + return nil, fmt.Errorf("transpose requires a map of lists of strings") + } + values := inVal.Value.([]ast.Variable) + for _, listVal := range values { + if listVal.Type != ast.TypeString { + return nil, fmt.Errorf("transpose requires the given map values to be lists of strings") + } + outKey := listVal.Value.(string) + if _, ok := tmpMap[outKey]; !ok { + tmpMap[outKey] = make([]string, 0) + } + outVal := tmpMap[outKey] + outVal = append(outVal, inKey) + sort.Strings(outVal) + tmpMap[outKey] = outVal + } + } + + for outKey, outVal := range tmpMap { + values := make([]ast.Variable, 0) + for _, v := range outVal { + values = append(values, ast.Variable{Type: ast.TypeString, Value: v}) + } + outputMap[outKey] = ast.Variable{Type: ast.TypeList, Value: values} + } + return outputMap, nil + }, + } +} + +// interpolationFuncAbs returns the absolute value of a given float. +func interpolationFuncAbs() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return math.Abs(args[0].(float64)), nil + }, + } +} + +// interpolationFuncRsaDecrypt implements the "rsadecrypt" function that does +// RSA decryption. +func interpolationFuncRsaDecrypt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + key := args[1].(string) + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", fmt.Errorf("Failed to decode input %q: cipher text must be base64-encoded", s) + } + + block, _ := pem.Decode([]byte(key)) + if block == nil { + return "", fmt.Errorf("Failed to read key %q: no key found", key) + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return "", fmt.Errorf( + "Failed to read key %q: password protected keys are\n"+ + "not supported. Please decrypt the key prior to use.", key) + } + + x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return "", err + } + + out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) + if err != nil { + return "", err + } + + return string(out), nil + }, + } +} + +// interpolationFuncMakeFileHash constructs a function that hashes the contents +// of a file by combining the implementations of the file(...) function and +// a given other function that is assumed to take a single string argument and +// return a hash value. +func interpolationFuncMakeFileHash(hashFunc ast.Function) ast.Function { + fileFunc := interpolationFuncFile() + + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + filename := args[0].(string) + contents, err := fileFunc.Callback([]interface{}{filename}) + if err != nil { + return nil, err + } + return hashFunc.Callback([]interface{}{contents}) + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go index ead3d10..66a677d 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go @@ -271,9 +271,7 @@ func (w *interpolationWalker) splitSlice() { result = append(result, val.Value) } case []interface{}: - for _, element := range val { - result = append(result, element) - } + result = append(result, val...) default: result = append(result, v) } diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go index 5dd7d46..6e34781 100644 --- a/vendor/github.com/hashicorp/terraform/config/loader.go +++ b/vendor/github.com/hashicorp/terraform/config/loader.go @@ -80,7 +80,7 @@ func LoadDir(root string) (*Config, error) { if err != nil { return nil, err } - if len(files) == 0 { + if len(files) == 0 && len(overrides) == 0 { return nil, &ErrNoConfigsFound{Dir: root} } @@ -112,6 +112,9 @@ func LoadDir(root string) (*Config, error) { result = c } } + if len(files) == 0 { + result = &Config{} + } // Load all the overrides, and merge them into the config for _, f := range overrides { diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go index e85e493..68cffe2 100644 --- a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go +++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go @@ -17,10 +17,20 @@ type hclConfigurable struct { Root *ast.File } +var ReservedDataSourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + var ReservedResourceFields = []string{ "connection", "count", "depends_on", + "id", "lifecycle", "provider", "provisioner", @@ -35,6 +45,7 @@ func (t *hclConfigurable) Config() (*Config, error) { validKeys := map[string]struct{}{ "atlas": struct{}{}, "data": struct{}{}, + "locals": struct{}{}, "module": struct{}{}, "output": struct{}{}, "provider": struct{}{}, @@ -70,6 +81,15 @@ func (t *hclConfigurable) Config() (*Config, error) { } } + // Build local values + if locals := list.Filter("locals"); len(locals.Items) > 0 { + var err error + config.Locals, err = loadLocalsHcl(locals) + if err != nil { + return nil, err + } + } + // Get Atlas configuration if atlas := list.Filter("atlas"); len(atlas.Items) > 0 { var err error @@ -373,9 +393,6 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { err) } - // Remove the fields we handle specially - delete(config, "source") - rawConfig, err := NewRawConfig(config) if err != nil { return nil, fmt.Errorf( @@ -384,7 +401,11 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { err) } - // If we have a count, then figure it out + // Remove the fields we handle specially + delete(config, "source") + delete(config, "version") + delete(config, "providers") + var source string if o := listVal.Filter("source"); len(o.Items) > 0 { err = hcl.DecodeObject(&source, o.Items[0].Val) @@ -396,9 +417,33 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { } } + var version string + if o := listVal.Filter("version"); len(o.Items) > 0 { + err = hcl.DecodeObject(&version, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version for %s: %s", + k, + err) + } + } + + var providers map[string]string + if o := listVal.Filter("providers"); len(o.Items) > 0 { + err = hcl.DecodeObject(&providers, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing providers for %s: %s", + k, + err) + } + } + result = append(result, &Module{ Name: k, Source: source, + Version: version, + Providers: providers, RawConfig: rawConfig, }) } @@ -406,6 +451,59 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { return result, nil } +// loadLocalsHcl recurses into the given HCL object turns it into +// a list of locals. +func loadLocalsHcl(list *ast.ObjectList) ([]*Local, error) { + + result := make([]*Local, 0, len(list.Items)) + + for _, block := range list.Items { + if len(block.Keys) > 0 { + return nil, fmt.Errorf( + "locals block at %s should not have label %q", + block.Pos(), block.Keys[0].Token.Value(), + ) + } + + blockObj, ok := block.Val.(*ast.ObjectType) + if !ok { + return nil, fmt.Errorf("locals value at %s should be a block", block.Val.Pos()) + } + + // blockObj now contains directly our local decls + for _, item := range blockObj.List.Items { + if len(item.Keys) != 1 { + return nil, fmt.Errorf("local declaration at %s may not be a block", item.Val.Pos()) + } + + // By the time we get here there can only be one item left, but + // we'll decode into a map anyway because it's a convenient way + // to extract both the key and the value robustly. + kv := map[string]interface{}{} + hcl.DecodeObject(&kv, item) + for k, v := range kv { + rawConfig, err := NewRawConfig(map[string]interface{}{ + "value": v, + }) + + if err != nil { + return nil, fmt.Errorf( + "error parsing local value %q at %s: %s", + k, item.Val.Pos(), err, + ) + } + + result = append(result, &Local{ + Name: k, + RawConfig: rawConfig, + }) + } + } + } + + return result, nil +} + // LoadOutputsHcl recurses into the given HCL object and turns // it into a mapping of outputs. func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { @@ -434,6 +532,7 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { // Delete special keys delete(config, "depends_on") + delete(config, "description") rawConfig, err := NewRawConfig(config) if err != nil { @@ -455,10 +554,23 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { } } + // If we have a description field, then filter that + var description string + if o := listVal.Filter("description"); len(o.Items) > 0 { + err := hcl.DecodeObject(&description, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading description for output %q: %s", + n, + err) + } + } + result = append(result, &Output{ - Name: n, - RawConfig: rawConfig, - DependsOn: dependsOn, + Name: n, + RawConfig: rawConfig, + DependsOn: dependsOn, + Description: description, }) } diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go new file mode 100644 index 0000000..4f9f129 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go @@ -0,0 +1,473 @@ +package config + +import ( + "fmt" + "sort" + "strings" + + gohcl2 "github.com/hashicorp/hcl2/gohcl" + hcl2 "github.com/hashicorp/hcl2/hcl" + hcl2parse "github.com/hashicorp/hcl2/hclparse" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/zclconf/go-cty/cty" +) + +// hcl2Configurable is an implementation of configurable that knows +// how to turn a HCL Body into a *Config object. +type hcl2Configurable struct { + SourceFilename string + Body hcl2.Body +} + +// hcl2Loader is a wrapper around a HCL parser that provides a fileLoaderFunc. +type hcl2Loader struct { + Parser *hcl2parse.Parser +} + +// For the moment we'll just have a global loader since we don't have anywhere +// better to stash this. +// TODO: refactor the loader API so that it uses some sort of object we can +// stash the parser inside. +var globalHCL2Loader = newHCL2Loader() + +// newHCL2Loader creates a new hcl2Loader containing a new HCL Parser. +// +// HCL parsers retain information about files that are loaded to aid in +// producing diagnostic messages, so all files within a single configuration +// should be loaded with the same parser to ensure the availability of +// full diagnostic information. +func newHCL2Loader() hcl2Loader { + return hcl2Loader{ + Parser: hcl2parse.NewParser(), + } +} + +// loadFile is a fileLoaderFunc that knows how to read a HCL2 file and turn it +// into a hcl2Configurable. +func (l hcl2Loader) loadFile(filename string) (configurable, []string, error) { + var f *hcl2.File + var diags hcl2.Diagnostics + if strings.HasSuffix(filename, ".json") { + f, diags = l.Parser.ParseJSONFile(filename) + } else { + f, diags = l.Parser.ParseHCLFile(filename) + } + if diags.HasErrors() { + // Return diagnostics as an error; callers may type-assert this to + // recover the original diagnostics, if it doesn't end up wrapped + // in another error. + return nil, nil, diags + } + + return &hcl2Configurable{ + SourceFilename: filename, + Body: f.Body, + }, nil, nil +} + +func (t *hcl2Configurable) Config() (*Config, error) { + config := &Config{} + + // these structs are used only for the initial shallow decoding; we'll + // expand this into the main, public-facing config structs afterwards. + type atlas struct { + Name string `hcl:"name"` + Include *[]string `hcl:"include"` + Exclude *[]string `hcl:"exclude"` + } + type provider struct { + Name string `hcl:"name,label"` + Alias *string `hcl:"alias,attr"` + Version *string `hcl:"version,attr"` + Config hcl2.Body `hcl:",remain"` + } + type module struct { + Name string `hcl:"name,label"` + Source string `hcl:"source,attr"` + Version *string `hcl:"version,attr"` + Providers *map[string]string `hcl:"providers,attr"` + Config hcl2.Body `hcl:",remain"` + } + type resourceLifecycle struct { + CreateBeforeDestroy *bool `hcl:"create_before_destroy,attr"` + PreventDestroy *bool `hcl:"prevent_destroy,attr"` + IgnoreChanges *[]string `hcl:"ignore_changes,attr"` + } + type connection struct { + Config hcl2.Body `hcl:",remain"` + } + type provisioner struct { + Type string `hcl:"type,label"` + + When *string `hcl:"when,attr"` + OnFailure *string `hcl:"on_failure,attr"` + + Connection *connection `hcl:"connection,block"` + Config hcl2.Body `hcl:",remain"` + } + type managedResource struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + + CountExpr hcl2.Expression `hcl:"count,attr"` + Provider *string `hcl:"provider,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + + Lifecycle *resourceLifecycle `hcl:"lifecycle,block"` + Provisioners []provisioner `hcl:"provisioner,block"` + Connection *connection `hcl:"connection,block"` + + Config hcl2.Body `hcl:",remain"` + } + type dataResource struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + + CountExpr hcl2.Expression `hcl:"count,attr"` + Provider *string `hcl:"provider,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + + Config hcl2.Body `hcl:",remain"` + } + type variable struct { + Name string `hcl:"name,label"` + + DeclaredType *string `hcl:"type,attr"` + Default *cty.Value `hcl:"default,attr"` + Description *string `hcl:"description,attr"` + Sensitive *bool `hcl:"sensitive,attr"` + } + type output struct { + Name string `hcl:"name,label"` + + ValueExpr hcl2.Expression `hcl:"value,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + Description *string `hcl:"description,attr"` + Sensitive *bool `hcl:"sensitive,attr"` + } + type locals struct { + Definitions hcl2.Attributes `hcl:",remain"` + } + type backend struct { + Type string `hcl:"type,label"` + Config hcl2.Body `hcl:",remain"` + } + type terraform struct { + RequiredVersion *string `hcl:"required_version,attr"` + Backend *backend `hcl:"backend,block"` + } + type topLevel struct { + Atlas *atlas `hcl:"atlas,block"` + Datas []dataResource `hcl:"data,block"` + Modules []module `hcl:"module,block"` + Outputs []output `hcl:"output,block"` + Providers []provider `hcl:"provider,block"` + Resources []managedResource `hcl:"resource,block"` + Terraform *terraform `hcl:"terraform,block"` + Variables []variable `hcl:"variable,block"` + Locals []*locals `hcl:"locals,block"` + } + + var raw topLevel + diags := gohcl2.DecodeBody(t.Body, nil, &raw) + if diags.HasErrors() { + // Do some minimal decoding to see if we can at least get the + // required Terraform version, which might help explain why we + // couldn't parse the rest. + if raw.Terraform != nil && raw.Terraform.RequiredVersion != nil { + config.Terraform = &Terraform{ + RequiredVersion: *raw.Terraform.RequiredVersion, + } + } + + // We return the diags as an implementation of error, which the + // caller than then type-assert if desired to recover the individual + // diagnostics. + // FIXME: The current API gives us no way to return warnings in the + // absense of any errors. + return config, diags + } + + if raw.Terraform != nil { + var reqdVersion string + var backend *Backend + + if raw.Terraform.RequiredVersion != nil { + reqdVersion = *raw.Terraform.RequiredVersion + } + if raw.Terraform.Backend != nil { + backend = new(Backend) + backend.Type = raw.Terraform.Backend.Type + + // We don't permit interpolations or nested blocks inside the + // backend config, so we can decode the config early here and + // get direct access to the values, which is important for the + // config hashing to work as expected. + var config map[string]string + configDiags := gohcl2.DecodeBody(raw.Terraform.Backend.Config, nil, &config) + diags = append(diags, configDiags...) + + raw := make(map[string]interface{}, len(config)) + for k, v := range config { + raw[k] = v + } + + var err error + backend.RawConfig, err = NewRawConfig(raw) + if err != nil { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid backend configuration", + Detail: fmt.Sprintf("Error in backend configuration: %s", err), + }) + } + } + + config.Terraform = &Terraform{ + RequiredVersion: reqdVersion, + Backend: backend, + } + } + + if raw.Atlas != nil { + var include, exclude []string + if raw.Atlas.Include != nil { + include = *raw.Atlas.Include + } + if raw.Atlas.Exclude != nil { + exclude = *raw.Atlas.Exclude + } + config.Atlas = &AtlasConfig{ + Name: raw.Atlas.Name, + Include: include, + Exclude: exclude, + } + } + + for _, rawM := range raw.Modules { + m := &Module{ + Name: rawM.Name, + Source: rawM.Source, + RawConfig: NewRawConfigHCL2(rawM.Config), + } + + if rawM.Version != nil { + m.Version = *rawM.Version + } + + if rawM.Providers != nil { + m.Providers = *rawM.Providers + } + + config.Modules = append(config.Modules, m) + } + + for _, rawV := range raw.Variables { + v := &Variable{ + Name: rawV.Name, + } + if rawV.DeclaredType != nil { + v.DeclaredType = *rawV.DeclaredType + } + if rawV.Default != nil { + v.Default = hcl2shim.ConfigValueFromHCL2(*rawV.Default) + } + if rawV.Description != nil { + v.Description = *rawV.Description + } + + config.Variables = append(config.Variables, v) + } + + for _, rawO := range raw.Outputs { + o := &Output{ + Name: rawO.Name, + } + + if rawO.Description != nil { + o.Description = *rawO.Description + } + if rawO.DependsOn != nil { + o.DependsOn = *rawO.DependsOn + } + if rawO.Sensitive != nil { + o.Sensitive = *rawO.Sensitive + } + + // The result is expected to be a map like map[string]interface{}{"value": something}, + // so we'll fake that with our hcl2shim.SingleAttrBody shim. + o.RawConfig = NewRawConfigHCL2(hcl2shim.SingleAttrBody{ + Name: "value", + Expr: rawO.ValueExpr, + }) + + config.Outputs = append(config.Outputs, o) + } + + for _, rawR := range raw.Resources { + r := &Resource{ + Mode: ManagedResourceMode, + Type: rawR.Type, + Name: rawR.Name, + } + if rawR.Lifecycle != nil { + var l ResourceLifecycle + if rawR.Lifecycle.CreateBeforeDestroy != nil { + l.CreateBeforeDestroy = *rawR.Lifecycle.CreateBeforeDestroy + } + if rawR.Lifecycle.PreventDestroy != nil { + l.PreventDestroy = *rawR.Lifecycle.PreventDestroy + } + if rawR.Lifecycle.IgnoreChanges != nil { + l.IgnoreChanges = *rawR.Lifecycle.IgnoreChanges + } + r.Lifecycle = l + } + if rawR.Provider != nil { + r.Provider = *rawR.Provider + } + if rawR.DependsOn != nil { + r.DependsOn = *rawR.DependsOn + } + + var defaultConnInfo *RawConfig + if rawR.Connection != nil { + defaultConnInfo = NewRawConfigHCL2(rawR.Connection.Config) + } + + for _, rawP := range rawR.Provisioners { + p := &Provisioner{ + Type: rawP.Type, + } + + switch { + case rawP.When == nil: + p.When = ProvisionerWhenCreate + case *rawP.When == "create": + p.When = ProvisionerWhenCreate + case *rawP.When == "destroy": + p.When = ProvisionerWhenDestroy + default: + p.When = ProvisionerWhenInvalid + } + + switch { + case rawP.OnFailure == nil: + p.OnFailure = ProvisionerOnFailureFail + case *rawP.When == "fail": + p.OnFailure = ProvisionerOnFailureFail + case *rawP.When == "continue": + p.OnFailure = ProvisionerOnFailureContinue + default: + p.OnFailure = ProvisionerOnFailureInvalid + } + + if rawP.Connection != nil { + p.ConnInfo = NewRawConfigHCL2(rawP.Connection.Config) + } else { + p.ConnInfo = defaultConnInfo + } + + p.RawConfig = NewRawConfigHCL2(rawP.Config) + + r.Provisioners = append(r.Provisioners, p) + } + + // The old loader records the count expression as a weird RawConfig with + // a single-element map inside. Since the rest of the world is assuming + // that, we'll mimic it here. + { + countBody := hcl2shim.SingleAttrBody{ + Name: "count", + Expr: rawR.CountExpr, + } + + r.RawCount = NewRawConfigHCL2(countBody) + r.RawCount.Key = "count" + } + + r.RawConfig = NewRawConfigHCL2(rawR.Config) + + config.Resources = append(config.Resources, r) + + } + + for _, rawR := range raw.Datas { + r := &Resource{ + Mode: DataResourceMode, + Type: rawR.Type, + Name: rawR.Name, + } + + if rawR.Provider != nil { + r.Provider = *rawR.Provider + } + if rawR.DependsOn != nil { + r.DependsOn = *rawR.DependsOn + } + + // The old loader records the count expression as a weird RawConfig with + // a single-element map inside. Since the rest of the world is assuming + // that, we'll mimic it here. + { + countBody := hcl2shim.SingleAttrBody{ + Name: "count", + Expr: rawR.CountExpr, + } + + r.RawCount = NewRawConfigHCL2(countBody) + r.RawCount.Key = "count" + } + + r.RawConfig = NewRawConfigHCL2(rawR.Config) + + config.Resources = append(config.Resources, r) + } + + for _, rawP := range raw.Providers { + p := &ProviderConfig{ + Name: rawP.Name, + } + + if rawP.Alias != nil { + p.Alias = *rawP.Alias + } + if rawP.Version != nil { + p.Version = *rawP.Version + } + + // The result is expected to be a map like map[string]interface{}{"value": something}, + // so we'll fake that with our hcl2shim.SingleAttrBody shim. + p.RawConfig = NewRawConfigHCL2(rawP.Config) + + config.ProviderConfigs = append(config.ProviderConfigs, p) + } + + for _, rawL := range raw.Locals { + names := make([]string, 0, len(rawL.Definitions)) + for n := range rawL.Definitions { + names = append(names, n) + } + sort.Strings(names) + for _, n := range names { + attr := rawL.Definitions[n] + l := &Local{ + Name: n, + RawConfig: NewRawConfigHCL2(hcl2shim.SingleAttrBody{ + Name: "value", + Expr: attr.Expr, + }), + } + config.Locals = append(config.Locals, l) + } + } + + // FIXME: The current API gives us no way to return warnings in the + // absense of any errors. + var err error + if diags.HasErrors() { + err = diags + } + + return config, err +} diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go index db214be..55fc864 100644 --- a/vendor/github.com/hashicorp/terraform/config/merge.go +++ b/vendor/github.com/hashicorp/terraform/config/merge.go @@ -137,6 +137,17 @@ func Merge(c1, c2 *Config) (*Config, error) { } } + // Local Values + // These are simpler than the other config elements because they are just + // flat values and so no deep merging is required. + if localsCount := len(c1.Locals) + len(c2.Locals); localsCount != 0 { + // Explicit length check above because we want c.Locals to remain + // nil if the result would be empty. + c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) + c.Locals = append(c.Locals, c1.Locals...) + c.Locals = append(c.Locals, c2.Locals...) + } + return c, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go index 96b4a63..5073d0d 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/get.go +++ b/vendor/github.com/hashicorp/terraform/config/module/get.go @@ -3,6 +3,7 @@ package module import ( "io/ioutil" "os" + "path/filepath" "github.com/hashicorp/go-getter" ) @@ -37,13 +38,10 @@ func GetCopy(dst, src string) error { if err != nil { return err } - // FIXME: This isn't completely safe. Creating and removing our temp path - // exposes where to race to inject files. - if err := os.RemoveAll(tmpDir); err != nil { - return err - } defer os.RemoveAll(tmpDir) + tmpDir = filepath.Join(tmpDir, "module") + // Get to that temporary dir if err := getter.Get(tmpDir, src); err != nil { return err @@ -57,15 +55,3 @@ func GetCopy(dst, src string) error { // Copy to the final location return copyDir(dst, tmpDir) } - -func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) { - // Get the module with the level specified if we were told to. - if mode > GetModeNone { - if err := s.Get(key, src, mode == GetModeUpdate); err != nil { - return "", false, err - } - } - - // Get the directory where the module is. - return s.Dir(key) -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go index 8603ee2..da520ab 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/inode.go +++ b/vendor/github.com/hashicorp/terraform/config/module/inode.go @@ -1,4 +1,4 @@ -// +build linux darwin openbsd netbsd solaris +// +build linux darwin openbsd netbsd solaris dragonfly package module diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go index f8649f6..7dc8fcc 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/module.go +++ b/vendor/github.com/hashicorp/terraform/config/module/module.go @@ -2,6 +2,8 @@ package module // Module represents the metadata for a single module. type Module struct { - Name string - Source string + Name string + Source string + Version string + Providers map[string]string } diff --git a/vendor/github.com/hashicorp/terraform/config/module/storage.go b/vendor/github.com/hashicorp/terraform/config/module/storage.go new file mode 100644 index 0000000..58e3a10 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/storage.go @@ -0,0 +1,365 @@ +package module + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + + getter "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/mitchellh/cli" +) + +const manifestName = "modules.json" + +// moduleManifest is the serialization structure used to record the stored +// module's metadata. +type moduleManifest struct { + Modules []moduleRecord +} + +// moduleRecords represents the stored module's metadata. +// This is compared for equality using '==', so all fields needs to remain +// comparable. +type moduleRecord struct { + // Source is the module source string from the config, minus any + // subdirectory. + Source string + + // Key is the locally unique identifier for this module. + Key string + + // Version is the exact version string for the stored module. + Version string + + // Dir is the directory name returned by the FileStorage. This is what + // allows us to correlate a particular module version with the location on + // disk. + Dir string + + // Root is the root directory containing the module. If the module is + // unpacked from an archive, and not located in the root directory, this is + // used to direct the loader to the correct subdirectory. This is + // independent from any subdirectory in the original source string, which + // may traverse further into the module tree. + Root string + + // url is the location of the module source + url string + + // Registry is true if this module is sourced from a registry + registry bool +} + +// Storage implements methods to manage the storage of modules. +// This is used by Tree.Load to query registries, authenticate requests, and +// store modules locally. +type Storage struct { + // StorageDir is the full path to the directory where all modules will be + // stored. + StorageDir string + + // Ui is an optional cli.Ui for user output + Ui cli.Ui + + // Mode is the GetMode that will be used for various operations. + Mode GetMode + + registry *registry.Client +} + +// NewStorage returns a new initialized Storage object. +func NewStorage(dir string, services *disco.Disco) *Storage { + regClient := registry.NewClient(services, nil) + + return &Storage{ + StorageDir: dir, + registry: regClient, + } +} + +// loadManifest returns the moduleManifest file from the parent directory. +func (s Storage) loadManifest() (moduleManifest, error) { + manifest := moduleManifest{} + + manifestPath := filepath.Join(s.StorageDir, manifestName) + data, err := ioutil.ReadFile(manifestPath) + if err != nil && !os.IsNotExist(err) { + return manifest, err + } + + if len(data) == 0 { + return manifest, nil + } + + if err := json.Unmarshal(data, &manifest); err != nil { + return manifest, err + } + + for i, rec := range manifest.Modules { + // If the path was recorded before we changed to always using a + // slash as separator, we delete the record from the manifest so + // it can be discovered again and will be recorded using a slash. + if strings.Contains(rec.Dir, "\\") { + manifest.Modules[i] = manifest.Modules[len(manifest.Modules)-1] + manifest.Modules = manifest.Modules[:len(manifest.Modules)-1] + continue + } + + // Make sure we use the correct path separator. + rec.Dir = filepath.FromSlash(rec.Dir) + } + + return manifest, nil +} + +// Store the location of the module, along with the version used and the module +// root directory. The storage method loads the entire file and rewrites it +// each time. This is only done a few times during init, so efficiency is +// not a concern. +func (s Storage) recordModule(rec moduleRecord) error { + manifest, err := s.loadManifest() + if err != nil { + // if there was a problem with the file, we will attempt to write a new + // one. Any non-data related error should surface there. + log.Printf("[WARN] error reading module manifest: %s", err) + } + + // do nothing if we already have the exact module + for i, stored := range manifest.Modules { + if rec == stored { + return nil + } + + // they are not equal, but if the storage path is the same we need to + // remove this rec to be replaced. + if rec.Dir == stored.Dir { + manifest.Modules[i] = manifest.Modules[len(manifest.Modules)-1] + manifest.Modules = manifest.Modules[:len(manifest.Modules)-1] + break + } + } + + // Make sure we always use a slash separator. + rec.Dir = filepath.ToSlash(rec.Dir) + + manifest.Modules = append(manifest.Modules, rec) + + js, err := json.Marshal(manifest) + if err != nil { + panic(err) + } + + manifestPath := filepath.Join(s.StorageDir, manifestName) + return ioutil.WriteFile(manifestPath, js, 0644) +} + +// load the manifest from dir, and return all module versions matching the +// provided source. Records with no version info will be skipped, as they need +// to be uniquely identified by other means. +func (s Storage) moduleVersions(source string) ([]moduleRecord, error) { + manifest, err := s.loadManifest() + if err != nil { + return manifest.Modules, err + } + + var matching []moduleRecord + + for _, m := range manifest.Modules { + if m.Source == source && m.Version != "" { + log.Printf("[DEBUG] found local version %q for module %s", m.Version, m.Source) + matching = append(matching, m) + } + } + + return matching, nil +} + +func (s Storage) moduleDir(key string) (string, error) { + manifest, err := s.loadManifest() + if err != nil { + return "", err + } + + for _, m := range manifest.Modules { + if m.Key == key { + return m.Dir, nil + } + } + + return "", nil +} + +// return only the root directory of the module stored in dir. +func (s Storage) getModuleRoot(dir string) (string, error) { + manifest, err := s.loadManifest() + if err != nil { + return "", err + } + + for _, mod := range manifest.Modules { + if mod.Dir == dir { + return mod.Root, nil + } + } + return "", nil +} + +// record only the Root directory for the module stored at dir. +func (s Storage) recordModuleRoot(dir, root string) error { + rec := moduleRecord{ + Dir: dir, + Root: root, + } + + return s.recordModule(rec) +} + +func (s Storage) output(msg string) { + if s.Ui == nil || s.Mode == GetModeNone { + return + } + s.Ui.Output(msg) +} + +func (s Storage) getStorage(key string, src string) (string, bool, error) { + storage := &getter.FolderStorage{ + StorageDir: s.StorageDir, + } + + log.Printf("[DEBUG] fetching module from %s", src) + + // Get the module with the level specified if we were told to. + if s.Mode > GetModeNone { + log.Printf("[DEBUG] fetching %q with key %q", src, key) + if err := storage.Get(key, src, s.Mode == GetModeUpdate); err != nil { + return "", false, err + } + } + + // Get the directory where the module is. + dir, found, err := storage.Dir(key) + log.Printf("[DEBUG] found %q in %q: %t", src, dir, found) + return dir, found, err +} + +// find a stored module that's not from a registry +func (s Storage) findModule(key string) (string, error) { + if s.Mode == GetModeUpdate { + return "", nil + } + + return s.moduleDir(key) +} + +// GetModule fetches a module source into the specified directory. This is used +// as a convenience function by the CLI to initialize a configuration. +func (s Storage) GetModule(dst, src string) error { + // reset this in case the caller was going to re-use it + mode := s.Mode + s.Mode = GetModeUpdate + defer func() { + s.Mode = mode + }() + + rec, err := s.findRegistryModule(src, anyVersion) + if err != nil { + return err + } + + pwd, err := os.Getwd() + if err != nil { + return err + } + + source := rec.url + if source == "" { + source, err = getter.Detect(src, pwd, getter.Detectors) + if err != nil { + return fmt.Errorf("module %s: %s", src, err) + } + } + + if source == "" { + return fmt.Errorf("module %q not found", src) + } + + return GetCopy(dst, source) +} + +// find a registry module +func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, error) { + rec := moduleRecord{ + Source: mSource, + } + // detect if we have a registry source + mod, err := regsrc.ParseModuleSource(mSource) + switch err { + case nil: + //ok + case regsrc.ErrInvalidModuleSource: + return rec, nil + default: + return rec, err + } + rec.registry = true + + log.Printf("[TRACE] %q is a registry module", mod.Display()) + + versions, err := s.moduleVersions(mod.String()) + if err != nil { + log.Printf("[ERROR] error looking up versions for %q: %s", mod.Display(), err) + return rec, err + } + + match, err := newestRecord(versions, constraint) + if err != nil { + log.Printf("[INFO] no matching version for %q<%s>, %s", mod.Display(), constraint, err) + } + log.Printf("[DEBUG] matched %q version %s for %s", mod, match.Version, constraint) + + rec.Dir = match.Dir + rec.Version = match.Version + found := rec.Dir != "" + + // we need to lookup available versions + // Only on Get if it's not found, on unconditionally on Update + if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) { + resp, err := s.registry.Versions(mod) + if err != nil { + return rec, err + } + + if len(resp.Modules) == 0 { + return rec, fmt.Errorf("module %q not found in registry", mod.Display()) + } + + match, err := newestVersion(resp.Modules[0].Versions, constraint) + if err != nil { + return rec, err + } + + if match == nil { + return rec, fmt.Errorf("no versions for %q found matching %q", mod.Display(), constraint) + } + + rec.Version = match.Version + + rec.url, err = s.registry.Location(mod, rec.Version) + if err != nil { + return rec, err + } + + // we've already validated this by now + host, _ := mod.SvcHost() + s.output(fmt.Sprintf(" Found version %s of %s on %s", rec.Version, mod.Module(), host.ForDisplay())) + + } + return rec, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/testing.go b/vendor/github.com/hashicorp/terraform/config/module/testing.go index fc9e733..6f1ff05 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/testing.go +++ b/vendor/github.com/hashicorp/terraform/config/module/testing.go @@ -4,8 +4,6 @@ import ( "io/ioutil" "os" "testing" - - "github.com/hashicorp/go-getter" ) // TestTree loads a module at the given path and returns the tree as well @@ -26,8 +24,8 @@ func TestTree(t *testing.T, path string) (*Tree, func()) { } // Get the child modules - s := &getter.FolderStorage{StorageDir: dir} - if err := mod.Load(s, GetModeGet); err != nil { + s := &Storage{StorageDir: dir, Mode: GetModeGet} + if err := mod.Load(s); err != nil { t.Fatalf("err: %s", err) return nil, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go index 4b0b153..f56d69b 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/tree.go +++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go @@ -4,11 +4,14 @@ import ( "bufio" "bytes" "fmt" + "log" "path/filepath" "strings" "sync" - "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/tfdiags" + + getter "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" ) @@ -26,6 +29,17 @@ type Tree struct { children map[string]*Tree path []string lock sync.RWMutex + + // version is the final version of the config loaded for the Tree's module + version string + // source is the "source" string used to load this module. It's possible + // for a module source to change, but the path remains the same, preventing + // it from being reloaded. + source string + // parent allows us to walk back up the tree and determine if there are any + // versioned ancestor modules which may effect the stored location of + // submodules + parent *Tree } // NewTree returns a new Tree for the given config structure. @@ -40,7 +54,7 @@ func NewEmptyTree() *Tree { // We do this dummy load so that the tree is marked as "loaded". It // should never fail because this is just about a no-op. If it does fail // we panic so we can know its a bug. - if err := t.Load(nil, GetModeGet); err != nil { + if err := t.Load(&Storage{Mode: GetModeGet}); err != nil { panic(err) } @@ -126,8 +140,10 @@ func (t *Tree) Modules() []*Module { result := make([]*Module, len(t.config.Modules)) for i, m := range t.config.Modules { result[i] = &Module{ - Name: m.Name, - Source: m.Source, + Name: m.Name, + Version: m.Version, + Source: m.Source, + Providers: m.Providers, } } @@ -155,81 +171,178 @@ func (t *Tree) Name() string { // module trees inherently require the configuration to be in a reasonably // sane state: no circular dependencies, proper module sources, etc. A full // suite of validations can be done by running Validate (after loading). -func (t *Tree) Load(s getter.Storage, mode GetMode) error { +func (t *Tree) Load(s *Storage) error { t.lock.Lock() defer t.lock.Unlock() - // Reset the children if we have any - t.children = nil + children, err := t.getChildren(s) + if err != nil { + return err + } + + // Go through all the children and load them. + for _, c := range children { + if err := c.Load(s); err != nil { + return err + } + } + + // Set our tree up + t.children = children - modules := t.Modules() + return nil +} + +func (t *Tree) getChildren(s *Storage) (map[string]*Tree, error) { children := make(map[string]*Tree) // Go through all the modules and get the directory for them. - for _, m := range modules { + for _, m := range t.Modules() { if _, ok := children[m.Name]; ok { - return fmt.Errorf( + return nil, fmt.Errorf( "module %s: duplicated. module names must be unique", m.Name) } // Determine the path to this child - path := make([]string, len(t.path), len(t.path)+1) - copy(path, t.path) - path = append(path, m.Name) + modPath := make([]string, len(t.path), len(t.path)+1) + copy(modPath, t.path) + modPath = append(modPath, m.Name) - // Split out the subdir if we have one - source, subDir := getter.SourceDirSubdir(m.Source) + log.Printf("[TRACE] module source: %q", m.Source) - source, err := getter.Detect(source, t.config.Dir, getter.Detectors) + // add the module path to help indicate where modules with relative + // paths are being loaded from + s.output(fmt.Sprintf("- module.%s", strings.Join(modPath, "."))) + + // Lookup the local location of the module. + // dir is the local directory where the module is stored + mod, err := s.findRegistryModule(m.Source, m.Version) if err != nil { - return fmt.Errorf("module %s: %s", m.Name, err) + return nil, err } + // The key is the string that will be used to uniquely id the Source in + // the local storage. The prefix digit can be incremented to + // invalidate the local module storage. + key := "1." + t.versionedPathKey(m) + if mod.Version != "" { + key += "." + mod.Version + } + + // Check for the exact key if it's not a registry module + if !mod.registry { + mod.Dir, err = s.findModule(key) + if err != nil { + return nil, err + } + } + + if mod.Dir != "" && s.Mode != GetModeUpdate { + // We found it locally, but in order to load the Tree we need to + // find out if there was another subDir stored from detection. + subDir, err := s.getModuleRoot(mod.Dir) + if err != nil { + // If there's a problem with the subdir record, we'll let the + // recordSubdir method fix it up. Any other filesystem errors + // will turn up again below. + log.Println("[WARN] error reading subdir record:", err) + } + + fullDir := filepath.Join(mod.Dir, subDir) + + child, err := NewTreeModule(m.Name, fullDir) + if err != nil { + return nil, fmt.Errorf("module %s: %s", m.Name, err) + } + child.path = modPath + child.parent = t + child.version = mod.Version + child.source = m.Source + children[m.Name] = child + continue + } + + // Split out the subdir if we have one. + // Terraform keeps the entire requested tree, so that modules can + // reference sibling modules from the same archive or repo. + rawSource, subDir := getter.SourceDirSubdir(m.Source) + + // we haven't found a source, so fallback to the go-getter detectors + source := mod.url + if source == "" { + source, err = getter.Detect(rawSource, t.config.Dir, getter.Detectors) + if err != nil { + return nil, fmt.Errorf("module %s: %s", m.Name, err) + } + } + + log.Printf("[TRACE] detected module source %q", source) + // Check if the detector introduced something new. - source, subDir2 := getter.SourceDirSubdir(source) - if subDir2 != "" { - subDir = filepath.Join(subDir2, subDir) + // For example, the registry always adds a subdir of `//*`, + // indicating that we need to strip off the first component from the + // tar archive, though we may not yet know what it is called. + source, detectedSubDir := getter.SourceDirSubdir(source) + if detectedSubDir != "" { + subDir = filepath.Join(detectedSubDir, subDir) + } + + output := "" + switch s.Mode { + case GetModeUpdate: + output = fmt.Sprintf(" Updating source %q", m.Source) + default: + output = fmt.Sprintf(" Getting source %q", m.Source) } + s.output(output) - // Get the directory where this module is so we can load it - key := strings.Join(path, ".") - key = fmt.Sprintf("root.%s-%s", key, m.Source) - dir, ok, err := getStorage(s, key, source, mode) + dir, ok, err := s.getStorage(key, source) if err != nil { - return err + return nil, err } if !ok { - return fmt.Errorf( - "module %s: not found, may need to be downloaded using 'terraform get'", m.Name) + return nil, fmt.Errorf("module %s: not found, may need to run 'terraform init'", m.Name) } - // If we have a subdirectory, then merge that in + log.Printf("[TRACE] %q stored in %q", source, dir) + + // expand and record the subDir for later + fullDir := dir if subDir != "" { - dir = filepath.Join(dir, subDir) - } + fullDir, err = getter.SubdirGlob(dir, subDir) + if err != nil { + return nil, err + } - // Load the configurations.Dir(source) - children[m.Name], err = NewTreeModule(m.Name, dir) - if err != nil { - return fmt.Errorf( - "module %s: %s", m.Name, err) + // +1 to account for the pathsep + if len(dir)+1 > len(fullDir) { + return nil, fmt.Errorf("invalid module storage path %q", fullDir) + } + subDir = fullDir[len(dir)+1:] } - // Set the path of this child - children[m.Name].path = path - } + // add new info to the module record + mod.Key = key + mod.Dir = dir + mod.Root = subDir - // Go through all the children and load them. - for _, c := range children { - if err := c.Load(s, mode); err != nil { - return err + // record the module in our manifest + if err := s.recordModule(mod); err != nil { + return nil, err } - } - // Set our tree up - t.children = children + child, err := NewTreeModule(m.Name, fullDir) + if err != nil { + return nil, fmt.Errorf("module %s: %s", m.Name, err) + } + child.path = modPath + child.parent = t + child.version = mod.Version + child.source = m.Source + children[m.Name] = child + } - return nil + return children, nil } // Path is the full path to this tree. @@ -272,32 +385,35 @@ func (t *Tree) String() string { // as verifying things such as parameters/outputs between the various modules. // // Load must be called prior to calling Validate or an error will be returned. -func (t *Tree) Validate() error { +func (t *Tree) Validate() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if !t.Loaded() { - return fmt.Errorf("tree must be loaded before calling Validate") + diags = diags.Append(fmt.Errorf( + "tree must be loaded before calling Validate", + )) + return diags } - // If something goes wrong, here is our error template - newErr := &treeError{Name: []string{t.Name()}} - // Terraform core does not handle root module children named "root". // We plan to fix this in the future but this bug was brought up in // the middle of a release and we don't want to introduce wide-sweeping // changes at that time. if len(t.path) == 1 && t.name == "root" { - return fmt.Errorf("root module cannot contain module named 'root'") + diags = diags.Append(fmt.Errorf( + "root module cannot contain module named 'root'", + )) + return diags } // Validate our configuration first. - if err := t.config.Validate(); err != nil { - newErr.Add(err) - } + diags = diags.Append(t.config.Validate()) // If we're the root, we do extra validation. This validation usually // requires the entire tree (since children don't have parent pointers). if len(t.path) == 0 { if err := t.validateProviderAlias(); err != nil { - newErr.Add(err) + diags = diags.Append(err) } } @@ -306,20 +422,11 @@ func (t *Tree) Validate() error { // Validate all our children for _, c := range children { - err := c.Validate() - if err == nil { + childDiags := c.Validate() + diags = diags.Append(childDiags) + if diags.HasErrors() { continue } - - verr, ok := err.(*treeError) - if !ok { - // Unknown error, just return... - return err - } - - // Append ourselves to the error and then return - verr.Name = append(verr.Name, t.Name()) - newErr.AddChild(verr) } // Go over all the modules and verify that any parameters are valid @@ -345,9 +452,10 @@ func (t *Tree) Validate() error { // Compare to the keys in our raw config for the module for k, _ := range m.RawConfig.Raw { if _, ok := varMap[k]; !ok { - newErr.Add(fmt.Errorf( - "module %s: %s is not a valid parameter", - m.Name, k)) + diags = diags.Append(fmt.Errorf( + "module %q: %q is not a valid argument", + m.Name, k, + )) } // Remove the required @@ -356,9 +464,10 @@ func (t *Tree) Validate() error { // If we have any required left over, they aren't set. for k, _ := range requiredMap { - newErr.Add(fmt.Errorf( - "module %s: required variable %q not set", - m.Name, k)) + diags = diags.Append(fmt.Errorf( + "module %q: missing required argument %q", + m.Name, k, + )) } } @@ -373,9 +482,10 @@ func (t *Tree) Validate() error { tree, ok := children[mv.Name] if !ok { - newErr.Add(fmt.Errorf( - "%s: undefined module referenced %s", - source, mv.Name)) + diags = diags.Append(fmt.Errorf( + "%s: reference to undefined module %q", + source, mv.Name, + )) continue } @@ -387,14 +497,56 @@ func (t *Tree) Validate() error { } } if !found { - newErr.Add(fmt.Errorf( - "%s: %s is not a valid output for module %s", - source, mv.Field, mv.Name)) + diags = diags.Append(fmt.Errorf( + "%s: %q is not a valid output for module %q", + source, mv.Field, mv.Name, + )) } } } - return newErr.ErrOrNil() + return diags +} + +// versionedPathKey returns a path string with every levels full name, version +// and source encoded. This is to provide a unique key for our module storage, +// since submodules need to know which versions of their ancestor modules they +// are loaded from. +// For example, if module A has a subdirectory B, if module A's source or +// version is updated B's storage key must reflect this change in order for the +// correct version of B's source to be loaded. +func (t *Tree) versionedPathKey(m *Module) string { + path := make([]string, len(t.path)+1) + path[len(path)-1] = m.Name + ";" + m.Source + // We're going to load these in order for easier reading and debugging, but + // in practice they only need to be unique and consistent. + + p := t + i := len(path) - 2 + for ; i >= 0; i-- { + if p == nil { + break + } + // we may have been loaded under a blank Tree, so always check for a name + // too. + if p.name == "" { + break + } + seg := p.name + if p.version != "" { + seg += "#" + p.version + } + + if p.source != "" { + seg += ";" + p.source + } + + path[i] = seg + p = p.parent + } + + key := strings.Join(path, "|") + return key } // treeError is an error use by Tree.Validate to accumulates all diff --git a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go index 090d4f7..f203556 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go +++ b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go @@ -67,7 +67,7 @@ func (t *Tree) validateProviderAlias() error { // We didn't find the alias, error! err = multierror.Append(err, fmt.Errorf( - "module %s: provider alias must be defined by the module or a parent: %s", + "module %s: provider alias must be defined by the module: %s", strings.Join(pv.Path, "."), k)) } } diff --git a/vendor/github.com/hashicorp/terraform/config/module/versions.go b/vendor/github.com/hashicorp/terraform/config/module/versions.go new file mode 100644 index 0000000..8348d4b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/versions.go @@ -0,0 +1,95 @@ +package module + +import ( + "errors" + "fmt" + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/registry/response" +) + +const anyVersion = ">=0.0.0" + +// return the newest version that satisfies the provided constraint +func newest(versions []string, constraint string) (string, error) { + if constraint == "" { + constraint = anyVersion + } + cs, err := version.NewConstraint(constraint) + if err != nil { + return "", err + } + + switch len(versions) { + case 0: + return "", errors.New("no versions found") + case 1: + v, err := version.NewVersion(versions[0]) + if err != nil { + return "", err + } + + if !cs.Check(v) { + return "", fmt.Errorf("no version found matching constraint %q", constraint) + } + return versions[0], nil + } + + sort.Slice(versions, func(i, j int) bool { + // versions should have already been validated + // sort invalid version strings to the end + iv, err := version.NewVersion(versions[i]) + if err != nil { + return true + } + jv, err := version.NewVersion(versions[j]) + if err != nil { + return true + } + return iv.GreaterThan(jv) + }) + + // versions are now in order, so just find the first which satisfies the + // constraint + for i := range versions { + v, err := version.NewVersion(versions[i]) + if err != nil { + continue + } + if cs.Check(v) { + return versions[i], nil + } + } + + return "", nil +} + +// return the newest *moduleVersion that matches the given constraint +// TODO: reconcile these two types and newest* functions +func newestVersion(moduleVersions []*response.ModuleVersion, constraint string) (*response.ModuleVersion, error) { + var versions []string + modules := make(map[string]*response.ModuleVersion) + + for _, m := range moduleVersions { + versions = append(versions, m.Version) + modules[m.Version] = m + } + + match, err := newest(versions, constraint) + return modules[match], err +} + +// return the newest moduleRecord that matches the given constraint +func newestRecord(moduleVersions []moduleRecord, constraint string) (moduleRecord, error) { + var versions []string + modules := make(map[string]moduleRecord) + + for _, m := range moduleVersions { + versions = append(versions, m.Version) + modules[m.Version] = m + } + + match, err := newest(versions, constraint) + return modules[match], err +} diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go index f8498d8..1854a8b 100644 --- a/vendor/github.com/hashicorp/terraform/config/raw_config.go +++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go @@ -3,8 +3,14 @@ package config import ( "bytes" "encoding/gob" + "errors" + "strconv" "sync" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + hcl2 "github.com/hashicorp/hcl2/hcl" "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" "github.com/mitchellh/copystructure" @@ -27,8 +33,24 @@ const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" // RawConfig supports a query-like interface to request // information from deep within the structure. type RawConfig struct { - Key string - Raw map[string]interface{} + Key string + + // Only _one_ of Raw and Body may be populated at a time. + // + // In the normal case, Raw is populated and Body is nil. + // + // When the experimental HCL2 parsing mode is enabled, "Body" + // is populated and RawConfig serves only to transport the hcl2.Body + // through the rest of Terraform core so we can ultimately decode it + // once its schema is known. + // + // Once we transition to HCL2 as the primary representation, RawConfig + // should be removed altogether and the hcl2.Body should be passed + // around directly. + + Raw map[string]interface{} + Body hcl2.Body + Interpolations []ast.Node Variables map[string]InterpolatedVariable @@ -48,6 +70,26 @@ func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) { return result, nil } +// NewRawConfigHCL2 creates a new RawConfig that is serving as a capsule +// to transport a hcl2.Body. In this mode, the publicly-readable struct +// fields are not populated since all operations should instead be diverted +// to the HCL2 body. +// +// For a RawConfig object constructed with this function, the only valid use +// is to later retrieve the Body value and call its own methods. Callers +// may choose to set and then later handle the Key field, in a manner +// consistent with how it is handled by the Value method, but the Value +// method itself must not be used. +// +// This is an experimental codepath to be used only by the HCL2 config loader. +// Non-experimental parsing should _always_ use NewRawConfig to produce a +// fully-functional RawConfig object. +func NewRawConfigHCL2(body hcl2.Body) *RawConfig { + return &RawConfig{ + Body: body, + } +} + // RawMap returns a copy of the RawConfig.Raw map. func (r *RawConfig) RawMap() map[string]interface{} { r.lock.Lock() @@ -69,6 +111,10 @@ func (r *RawConfig) Copy() *RawConfig { r.lock.Lock() defer r.lock.Unlock() + if r.Body != nil { + return NewRawConfigHCL2(r.Body) + } + newRaw := make(map[string]interface{}) for k, v := range r.Raw { newRaw[k] = v @@ -223,6 +269,13 @@ func (r *RawConfig) init() error { } func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error { + if r.Body != nil { + // For RawConfigs created for the HCL2 experiement, callers must + // use the HCL2 Body API directly rather than interpolating via + // the RawConfig. + return errors.New("this feature is not yet supported under the HCL2 experiment") + } + config, err := copystructure.Copy(r.Raw) if err != nil { return err @@ -268,6 +321,74 @@ func (r *RawConfig) merge(r2 *RawConfig) *RawConfig { return result } +// couldBeInteger is a helper that determines if the represented value could +// result in an integer. +// +// This function only works for RawConfigs that have "Key" set, meaning that +// a single result can be produced. Calling this function will overwrite +// the Config and Value results to be a test value. +// +// This function is conservative. If there is some doubt about whether the +// result could be an integer -- for example, if it depends on a variable +// whose type we don't know yet -- it will still return true. +func (r *RawConfig) couldBeInteger() bool { + if r.Key == "" { + // un-keyed RawConfigs can never produce numbers + return false + } + if r.Body == nil { + // Normal path: using the interpolator in this package + // Interpolate with a fixed number to verify that its a number. + r.interpolate(func(root ast.Node) (interface{}, error) { + // Execute the node but transform the AST so that it returns + // a fixed value of "5" for all interpolations. + result, err := hil.Eval( + hil.FixedValueTransform( + root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), + nil) + if err != nil { + return "", err + } + + return result.Value, nil + }) + _, err := strconv.ParseInt(r.Value().(string), 0, 0) + return err == nil + } else { + // HCL2 experiment path: using the HCL2 API via shims + // + // This path catches fewer situations because we have to assume all + // variables are entirely unknown in HCL2, rather than the assumption + // above that all variables can be numbers because names like "var.foo" + // are considered a single variable rather than an attribute access. + // This is fine in practice, because we get a definitive answer + // during the graph walk when we have real values to work with. + attrs, diags := r.Body.JustAttributes() + if diags.HasErrors() { + // This body is not just a single attribute with a value, so + // this can't be a number. + return false + } + attr, hasAttr := attrs[r.Key] + if !hasAttr { + return false + } + result, diags := hcl2EvalWithUnknownVars(attr.Expr) + if diags.HasErrors() { + // We'll conservatively assume that this error is a result of + // us not being ready to fully-populate the scope, and catch + // any further problems during the main graph walk. + return true + } + + // If the result is convertable to number then we'll allow it. + // We do this because an unknown string is optimistically convertable + // to number (might be "5") but a _known_ string "hello" is not. + _, err := convert.Convert(result, cty.Number) + return err == nil + } +} + // UnknownKeys returns the keys of the configuration that are unknown // because they had interpolated variables that must be computed. func (r *RawConfig) UnknownKeys() []string { diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go index ea68b4f..8a55e06 100644 --- a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go +++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go @@ -2,7 +2,7 @@ package config -import "fmt" +import "strconv" const _ResourceMode_name = "ManagedResourceModeDataResourceMode" @@ -10,7 +10,7 @@ var _ResourceMode_index = [...]uint8{0, 19, 35} func (i ResourceMode) String() string { if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { - return fmt.Sprintf("ResourceMode(%d)", i) + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" } return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go index f7bfadd..831fc77 100644 --- a/vendor/github.com/hashicorp/terraform/config/testing.go +++ b/vendor/github.com/hashicorp/terraform/config/testing.go @@ -6,6 +6,8 @@ import ( // TestRawConfig is used to create a RawConfig for testing. func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig { + t.Helper() + cfg, err := NewRawConfig(c) if err != nil { t.Fatalf("err: %s", err) diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go index f8776bc..b7eb10c 100644 --- a/vendor/github.com/hashicorp/terraform/dag/dag.go +++ b/vendor/github.com/hashicorp/terraform/dag/dag.go @@ -106,7 +106,7 @@ func (g *AcyclicGraph) TransitiveReduction() { uTargets := g.DownEdges(u) vs := AsVertexList(g.DownEdges(u)) - g.DepthFirstWalk(vs, func(v Vertex, d int) error { + g.depthFirstWalk(vs, false, func(v Vertex, d int) error { shared := uTargets.Intersection(g.DownEdges(v)) for _, vPrime := range AsVertexList(shared) { g.RemoveEdge(BasicEdge(u, vPrime)) @@ -187,9 +187,18 @@ type vertexAtDepth struct { } // depthFirstWalk does a depth-first walk of the graph starting from -// the vertices in start. This is not exported now but it would make sense -// to export this publicly at some point. +// the vertices in start. func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + return g.depthFirstWalk(start, true, f) +} + +// This internal method provides the option of not sorting the vertices during +// the walk, which we use for the Transitive reduction. +// Some configurations can lead to fully-connected subgraphs, which makes our +// transitive reduction algorithm O(n^3). This is still passable for the size +// of our graphs, but the additional n^2 sort operations would make this +// uncomputable in a reasonable amount of time. +func (g *AcyclicGraph) depthFirstWalk(start []Vertex, sorted bool, f DepthWalkFunc) error { defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") seen := make(map[Vertex]struct{}) @@ -219,7 +228,11 @@ func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { // Visit targets of this in a consistent order. targets := AsVertexList(g.DownEdges(current.Vertex)) - sort.Sort(byVertexName(targets)) + + if sorted { + sort.Sort(byVertexName(targets)) + } + for _, t := range targets { frontier = append(frontier, &vertexAtDepth{ Vertex: t, diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go index 16d5dd6..c567d27 100644 --- a/vendor/github.com/hashicorp/terraform/dag/marshal.go +++ b/vendor/github.com/hashicorp/terraform/dag/marshal.go @@ -273,6 +273,9 @@ func (e *encoder) Encode(i interface{}) { } func (e *encoder) Add(v Vertex) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, AddVertex: newMarshalVertex(v), @@ -281,6 +284,9 @@ func (e *encoder) Add(v Vertex) { // Remove records the removal of Vertex v. func (e *encoder) Remove(v Vertex) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, RemoveVertex: newMarshalVertex(v), @@ -288,6 +294,9 @@ func (e *encoder) Remove(v Vertex) { } func (e *encoder) Connect(edge Edge) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, AddEdge: newMarshalEdge(edge), @@ -295,6 +304,9 @@ func (e *encoder) Connect(edge Edge) { } func (e *encoder) RemoveEdge(edge Edge) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, RemoveEdge: newMarshalEdge(edge), diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go index 23c87ad..f03b100 100644 --- a/vendor/github.com/hashicorp/terraform/dag/walk.go +++ b/vendor/github.com/hashicorp/terraform/dag/walk.go @@ -166,7 +166,7 @@ func (w *Walker) Update(g *AcyclicGraph) { w.wait.Add(1) // Add to our own set so we know about it already - log.Printf("[DEBUG] dag/walk: added new vertex: %q", VertexName(v)) + log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v)) w.vertices.Add(raw) // Initialize the vertex info @@ -198,7 +198,7 @@ func (w *Walker) Update(g *AcyclicGraph) { // Delete it out of the map delete(w.vertexMap, v) - log.Printf("[DEBUG] dag/walk: removed vertex: %q", VertexName(v)) + log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v)) w.vertices.Delete(raw) } @@ -229,7 +229,7 @@ func (w *Walker) Update(g *AcyclicGraph) { changedDeps.Add(waiter) log.Printf( - "[DEBUG] dag/walk: added edge: %q waiting on %q", + "[TRACE] dag/walk: added edge: %q waiting on %q", VertexName(waiter), VertexName(dep)) w.edges.Add(raw) } @@ -253,7 +253,7 @@ func (w *Walker) Update(g *AcyclicGraph) { changedDeps.Add(waiter) log.Printf( - "[DEBUG] dag/walk: removed edge: %q waiting on %q", + "[TRACE] dag/walk: removed edge: %q waiting on %q", VertexName(waiter), VertexName(dep)) w.edges.Delete(raw) } @@ -296,7 +296,7 @@ func (w *Walker) Update(g *AcyclicGraph) { info.depsCancelCh = cancelCh log.Printf( - "[DEBUG] dag/walk: dependencies changed for %q, sending new deps", + "[TRACE] dag/walk: dependencies changed for %q, sending new deps", VertexName(v)) // Start the waiter @@ -383,10 +383,10 @@ func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { // Run our callback or note that our upstream failed var err error if depsSuccess { - log.Printf("[DEBUG] dag/walk: walking %q", VertexName(v)) + log.Printf("[TRACE] dag/walk: walking %q", VertexName(v)) err = w.Callback(v) } else { - log.Printf("[DEBUG] dag/walk: upstream errored, not walking %q", VertexName(v)) + log.Printf("[TRACE] dag/walk: upstream errored, not walking %q", VertexName(v)) err = errWalkUpstream } @@ -423,7 +423,7 @@ func (w *Walker) waitDeps( return case <-time.After(time.Second * 5): - log.Printf("[DEBUG] dag/walk: vertex %q, waiting for: %q", + log.Printf("[TRACE] dag/walk: vertex %q, waiting for: %q", VertexName(v), VertexName(dep)) } } diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go deleted file mode 100644 index 18b8837..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go +++ /dev/null @@ -1,154 +0,0 @@ -// experiment package contains helper functions for tracking experimental -// features throughout Terraform. -// -// This package should be used for creating, enabling, querying, and deleting -// experimental features. By unifying all of that onto a single interface, -// we can have the Go compiler help us by enforcing every place we touch -// an experimental feature. -// -// To create a new experiment: -// -// 1. Add the experiment to the global vars list below, prefixed with X_ -// -// 2. Add the experiment variable to the All listin the init() function -// -// 3. Use it! -// -// To remove an experiment: -// -// 1. Delete the experiment global var. -// -// 2. Try to compile and fix all the places where the var was referenced. -// -// To use an experiment: -// -// 1. Use Flag() if you want the experiment to be available from the CLI. -// -// 2. Use Enabled() to check whether it is enabled. -// -// As a general user: -// -// 1. The `-Xexperiment-name` flag -// 2. The `TF_X_` env var. -// 3. The `TF_X_FORCE` env var can be set to force an experimental feature -// without human verifications. -// -package experiment - -import ( - "flag" - "fmt" - "os" - "strconv" - "strings" - "sync" -) - -// The experiments that are available are listed below. Any package in -// Terraform defining an experiment should define the experiments below. -// By keeping them all within the experiment package we force a single point -// of definition and use. This allows the compiler to enforce references -// so it becomes easy to remove the features. -var ( - // Shadow graph. This is already on by default. Disabling it will be - // allowed for awhile in order for it to not block operations. - X_shadow = newBasicID("shadow", "SHADOW", false) -) - -// Global variables this package uses because we are a package -// with global state. -var ( - // all is the list of all experiements. Do not modify this. - All []ID - - // enabled keeps track of what flags have been enabled - enabled map[string]bool - enabledLock sync.Mutex - - // Hidden "experiment" that forces all others to be on without verification - x_force = newBasicID("force", "FORCE", false) -) - -func init() { - // The list of all experiments, update this when an experiment is added. - All = []ID{ - X_shadow, - x_force, - } - - // Load - reload() -} - -// reload is used by tests to reload the global state. This is called by -// init publicly. -func reload() { - // Initialize - enabledLock.Lock() - enabled = make(map[string]bool) - enabledLock.Unlock() - - // Set defaults and check env vars - for _, id := range All { - // Get the default value - def := id.Default() - - // If we set it in the env var, default it to true - key := fmt.Sprintf("TF_X_%s", strings.ToUpper(id.Env())) - if v := os.Getenv(key); v != "" { - def = v != "0" - } - - // Set the default - SetEnabled(id, def) - } -} - -// Enabled returns whether an experiment has been enabled or not. -func Enabled(id ID) bool { - enabledLock.Lock() - defer enabledLock.Unlock() - return enabled[id.Flag()] -} - -// SetEnabled sets an experiment to enabled/disabled. Please check with -// the experiment docs for when calling this actually affects the experiment. -func SetEnabled(id ID, v bool) { - enabledLock.Lock() - defer enabledLock.Unlock() - enabled[id.Flag()] = v -} - -// Force returns true if the -Xforce of TF_X_FORCE flag is present, which -// advises users of this package to not verify with the user that they want -// experimental behavior and to just continue with it. -func Force() bool { - return Enabled(x_force) -} - -// Flag configures the given FlagSet with the flags to configure -// all active experiments. -func Flag(fs *flag.FlagSet) { - for _, id := range All { - desc := id.Flag() - key := fmt.Sprintf("X%s", id.Flag()) - fs.Var(&idValue{X: id}, key, desc) - } -} - -// idValue implements flag.Value for setting the enabled/disabled state -// of an experiment from the CLI. -type idValue struct { - X ID -} - -func (v *idValue) IsBoolFlag() bool { return true } -func (v *idValue) String() string { return strconv.FormatBool(Enabled(v.X)) } -func (v *idValue) Set(raw string) error { - b, err := strconv.ParseBool(raw) - if err == nil { - SetEnabled(v.X, b) - } - - return err -} diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go deleted file mode 100644 index 8e2f707..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go +++ /dev/null @@ -1,34 +0,0 @@ -package experiment - -// ID represents an experimental feature. -// -// The global vars defined on this package should be used as ID values. -// This interface is purposely not implement-able outside of this package -// so that we can rely on the Go compiler to enforce all experiment references. -type ID interface { - Env() string - Flag() string - Default() bool - - unexported() // So the ID can't be implemented externally. -} - -// basicID implements ID. -type basicID struct { - EnvValue string - FlagValue string - DefaultValue bool -} - -func newBasicID(flag, env string, def bool) ID { - return &basicID{ - EnvValue: env, - FlagValue: flag, - DefaultValue: def, - } -} - -func (id *basicID) Env() string { return id.EnvValue } -func (id *basicID) Flag() string { return id.FlagValue } -func (id *basicID) Default() bool { return id.DefaultValue } -func (id *basicID) unexported() {} diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go index 64d8263..6ccc523 100644 --- a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go +++ b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go @@ -1,6 +1,8 @@ package hashcode import ( + "bytes" + "fmt" "hash/crc32" ) @@ -20,3 +22,14 @@ func String(s string) int { // v == MinInt return 0 } + +// Strings hashes a list of strings to a unique hashcode. +func Strings(strings []string) string { + var buf bytes.Buffer + + for _, s := range strings { + buf.WriteString(fmt.Sprintf("%s-", s)) + } + + return fmt.Sprintf("%d", String(buf.String())) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go index 433cd77..6bd92f7 100644 --- a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go +++ b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go @@ -18,7 +18,7 @@ const ( EnvLogFile = "TF_LOG_PATH" // Set to a file ) -var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} +var ValidLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} // LogOutput determines where we should send logs (if anywhere) and the log level. func LogOutput() (logOutput io.Writer, err error) { @@ -40,7 +40,7 @@ func LogOutput() (logOutput io.Writer, err error) { // This was the default since the beginning logOutput = &logutils.LevelFilter{ - Levels: validLevels, + Levels: ValidLevels, MinLevel: logutils.LogLevel(logLevel), Writer: logOutput, } @@ -77,7 +77,7 @@ func LogLevel() string { logLevel = strings.ToUpper(envLevel) } else { log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", - envLevel, validLevels) + envLevel, ValidLevels) } return logLevel @@ -90,7 +90,7 @@ func IsDebugOrHigher() bool { } func isValidLogLevel(level string) bool { - for _, l := range validLevels { + for _, l := range ValidLevels { if strings.ToUpper(level) == string(l) { return true } diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go index 4477924..bddabe6 100644 --- a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go +++ b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go @@ -1,9 +1,12 @@ package logging import ( + "bytes" + "encoding/json" "log" "net/http" "net/http/httputil" + "strings" ) type transport struct { @@ -15,7 +18,7 @@ func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { if IsDebugOrHigher() { reqData, err := httputil.DumpRequestOut(req, true) if err == nil { - log.Printf("[DEBUG] "+logReqMsg, t.name, string(reqData)) + log.Printf("[DEBUG] "+logReqMsg, t.name, prettyPrintJsonLines(reqData)) } else { log.Printf("[ERROR] %s API Request error: %#v", t.name, err) } @@ -29,7 +32,7 @@ func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { if IsDebugOrHigher() { respData, err := httputil.DumpResponse(resp, true) if err == nil { - log.Printf("[DEBUG] "+logRespMsg, t.name, string(respData)) + log.Printf("[DEBUG] "+logRespMsg, t.name, prettyPrintJsonLines(respData)) } else { log.Printf("[ERROR] %s API Response error: %#v", t.name, err) } @@ -42,6 +45,20 @@ func NewTransport(name string, t http.RoundTripper) *transport { return &transport{name, t} } +// prettyPrintJsonLines iterates through a []byte line-by-line, +// transforming any lines that are complete json into pretty-printed json. +func prettyPrintJsonLines(b []byte) string { + parts := strings.Split(string(b), "\n") + for i, p := range parts { + if b := []byte(p); json.Valid(b) { + var out bytes.Buffer + json.Indent(&out, b, "", " ") + parts[i] = out.String() + } + } + return strings.Join(parts, "\n") +} + const logReqMsg = `%s API Request Details: ---[ REQUEST ]--------------------------------------- %s diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go index 1cde67c..4494955 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/id.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/id.go @@ -18,6 +18,11 @@ func UniqueId() string { return PrefixedUniqueId(UniqueIdPrefix) } +// UniqueIDSuffixLength is the string length of the suffix generated by +// PrefixedUniqueId. This can be used by length validation functions to +// ensure prefixes are the correct length for the target field. +const UniqueIDSuffixLength = 26 + // Helper for a resource to generate a unique identifier w/ given prefix // // After the prefix, the ID consists of an incrementing 26 digit value (to match diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go index 37c586a..c34e21b 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/state.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go @@ -46,7 +46,7 @@ type StateChangeConf struct { // If the Timeout is exceeded before reaching the Target state, return an // error. // -// Otherwise, result the result of the first call to the Refresh function to +// Otherwise, the result is the result of the first call to the Refresh function to // reach the target state. func (conf *StateChangeConf) WaitForState() (interface{}, error) { log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go index d7de1a0..b97673f 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go @@ -11,11 +11,13 @@ import ( "reflect" "regexp" "strings" + "syscall" "testing" "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/go-getter" + "github.com/hashicorp/errwrap" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/logutils" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/helper/logging" "github.com/hashicorp/terraform/terraform" @@ -186,6 +188,10 @@ type TestCheckFunc func(*terraform.State) error // ImportStateCheckFunc is the check function for ImportState tests type ImportStateCheckFunc func([]*terraform.InstanceState) error +// ImportStateIdFunc is an ID generation function to help with complex ID +// generation for ImportState tests. +type ImportStateIdFunc func(*terraform.State) (string, error) + // TestCase is a single acceptance test case used to test the apply/destroy // lifecycle of a resource in a specific configuration. // @@ -260,6 +266,15 @@ type TestStep struct { // below. PreConfig func() + // Taint is a list of resource addresses to taint prior to the execution of + // the step. Be sure to only include this at a step where the referenced + // address will be present in state, as it will fail the test if the resource + // is missing. + // + // This option is ignored on ImportState tests, and currently only works for + // resources in the root module path. + Taint []string + //--------------------------------------------------------------- // Test modes. One of the following groups of settings must be // set to determine what the test step will do. Ideally we would've @@ -304,10 +319,19 @@ type TestStep struct { // no-op plans PlanOnly bool + // PreventDiskCleanup can be set to true for testing terraform modules which + // require access to disk at runtime. Note that this will leave files in the + // temp folder + PreventDiskCleanup bool + // PreventPostDestroyRefresh can be set to true for cases where data sources // are tested alongside real resources PreventPostDestroyRefresh bool + // SkipFunc is called before applying config, but after PreConfig + // This is useful for defining test steps with platform-dependent checks + SkipFunc func() (bool, error) + //--------------------------------------------------------------- // ImportState testing //--------------------------------------------------------------- @@ -329,6 +353,12 @@ type TestStep struct { // the unset ImportStateId field. ImportStateIdPrefix string + // ImportStateIdFunc is a function that can be used to dynamically generate + // the ID for the ImportState tests. It is sent the state, which can be + // checked to derive the attributes necessary and generate the string in the + // desired format. + ImportStateIdFunc ImportStateIdFunc + // ImportStateCheck checks the results of ImportState. It should be // used to verify that the resulting value of ImportState has the // proper resources, IDs, and attributes. @@ -345,6 +375,60 @@ type TestStep struct { ImportStateVerifyIgnore []string } +// Set to a file mask in sprintf format where %s is test name +const EnvLogPathMask = "TF_LOG_PATH_MASK" + +func LogOutput(t TestT) (logOutput io.Writer, err error) { + logOutput = ioutil.Discard + + logLevel := logging.LogLevel() + if logLevel == "" { + return + } + + logOutput = os.Stderr + + if logPath := os.Getenv(logging.EnvLogFile); logPath != "" { + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { + // Escape special characters which may appear if we have subtests + testName := strings.Replace(t.Name(), "/", "__", -1) + + logPath := fmt.Sprintf(logPathMask, testName) + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + // This was the default since the beginning + logOutput = &logutils.LevelFilter{ + Levels: logging.ValidLevels, + MinLevel: logutils.LogLevel(logLevel), + Writer: logOutput, + } + + return +} + +// ParallelTest performs an acceptance test on a resource, allowing concurrency +// with other ParallelTest. +// +// Tests will fail if they do not properly handle conditions to allow multiple +// tests to occur against the same resource or service (e.g. random naming). +// All other requirements of the Test function also apply to this function. +func ParallelTest(t TestT, c TestCase) { + t.Parallel() + Test(t, c) +} + // Test performs an acceptance test on a resource. // // Tests are not run unless an environmental variable "TF_ACC" is @@ -366,7 +450,7 @@ func Test(t TestT, c TestCase) { return } - logWriter, err := logging.LogOutput() + logWriter, err := LogOutput(t) if err != nil { t.Error(fmt.Errorf("error setting up logging: %s", err)) } @@ -398,7 +482,18 @@ func Test(t TestT, c TestCase) { errored := false for i, step := range c.Steps { var err error - log.Printf("[WARN] Test: Executing step %d", i) + log.Printf("[DEBUG] Test: Executing step %d", i) + + if step.SkipFunc != nil { + skip, err := step.SkipFunc() + if err != nil { + t.Fatal(err) + } + if skip { + log.Printf("[WARN] Skipping step %d", i) + continue + } + } if step.Config == "" && !step.ImportState { err = fmt.Errorf( @@ -418,6 +513,15 @@ func Test(t TestT, c TestCase) { } } + // If we expected an error, but did not get one, fail + if err == nil && step.ExpectError != nil { + errored = true + t.Error(fmt.Sprintf( + "Step %d, no error received, but expected a match to:\n\n%s\n\n", + i, step.ExpectError)) + break + } + // If there was an error, exit if err != nil { // Perhaps we expected an error? Check if it matches @@ -485,6 +589,7 @@ func Test(t TestT, c TestCase) { Config: lastStep.Config, Check: c.CheckDestroy, Destroy: true, + PreventDiskCleanup: lastStep.PreventDiskCleanup, PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, } @@ -593,18 +698,12 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r if err != nil { return err } - if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { - if len(es) > 0 { - estrs := make([]string, len(es)) - for i, e := range es { - estrs[i] = e.Error() - } - return fmt.Errorf( - "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v", - ws, estrs) + if diags := ctx.Validate(); len(diags) > 0 { + if diags.HasErrors() { + return errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) } - log.Printf("[WARN] Config warnings: %#v", ws) + log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error()) } // Refresh! @@ -657,9 +756,7 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r return nil } -func testModule( - opts terraform.ContextOpts, - step TestStep) (*module.Tree, error) { +func testModule(opts terraform.ContextOpts, step TestStep) (*module.Tree, error) { if step.PreConfig != nil { step.PreConfig() } @@ -669,7 +766,12 @@ func testModule( return nil, fmt.Errorf( "Error creating temporary directory for config: %s", err) } - defer os.RemoveAll(cfgPath) + + if step.PreventDiskCleanup { + log.Printf("[INFO] Skipping defer os.RemoveAll call") + } else { + defer os.RemoveAll(cfgPath) + } // Write the configuration cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf")) @@ -693,10 +795,11 @@ func testModule( } // Load the modules - modStorage := &getter.FolderStorage{ + modStorage := &module.Storage{ StorageDir: filepath.Join(cfgPath, ".tfmodules"), + Mode: module.GetModeGet, } - err = mod.Load(modStorage, module.GetModeGet) + err = mod.Load(modStorage) if err != nil { return nil, fmt.Errorf("Error downloading modules: %s", err) } @@ -771,12 +874,29 @@ func TestCheckResourceAttrSet(name, key string) TestCheckFunc { return err } - if val, ok := is.Attributes[key]; ok && val != "" { - return nil + return testCheckResourceAttrSet(is, name, key) + } +} + +// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with +// support for non-root modules +func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mp, name) + if err != nil { + return err } + return testCheckResourceAttrSet(is, name, key) + } +} + +func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { + if val, ok := is.Attributes[key]; !ok || val == "" { return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) } + + return nil } // TestCheckResourceAttr is a TestCheckFunc which validates @@ -788,21 +908,37 @@ func TestCheckResourceAttr(name, key, value string) TestCheckFunc { return err } - if v, ok := is.Attributes[key]; !ok || v != value { - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", name, key) - } + return testCheckResourceAttr(is, name, key, value) + } +} - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - name, - key, - value, - v) +// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with +// support for non-root modules +func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mp, name) + if err != nil { + return err } - return nil + return testCheckResourceAttr(is, name, key, value) + } +} + +func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { + if v, ok := is.Attributes[key]; !ok || v != value { + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + name, + key, + value, + v) } + return nil } // TestCheckNoResourceAttr is a TestCheckFunc which ensures that @@ -814,14 +950,31 @@ func TestCheckNoResourceAttr(name, key string) TestCheckFunc { return err } - if _, ok := is.Attributes[key]; ok { - return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + return testCheckNoResourceAttr(is, name, key) + } +} + +// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with +// support for non-root modules +func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mp, name) + if err != nil { + return err } - return nil + return testCheckNoResourceAttr(is, name, key) } } +func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { + if _, ok := is.Attributes[key]; ok { + return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + } + + return nil +} + // TestMatchResourceAttr is a TestCheckFunc which checks that the value // in state for the given name/key combination matches the given regex. func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { @@ -831,17 +984,34 @@ func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { return err } - if !r.MatchString(is.Attributes[key]) { - return fmt.Errorf( - "%s: Attribute '%s' didn't match %q, got %#v", - name, - key, - r.String(), - is.Attributes[key]) + return testMatchResourceAttr(is, name, key, r) + } +} + +// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with +// support for non-root modules +func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mp, name) + if err != nil { + return err } - return nil + return testMatchResourceAttr(is, name, key, r) + } +} + +func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { + if !r.MatchString(is.Attributes[key]) { + return fmt.Errorf( + "%s: Attribute '%s' didn't match %q, got %#v", + name, + key, + r.String(), + is.Attributes[key]) } + + return nil } // TestCheckResourceAttrPtr is like TestCheckResourceAttr except the @@ -853,6 +1023,14 @@ func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckF } } +// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with +// support for non-root modules +func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckModuleResourceAttr(mp, name, key, *value)(s) + } +} + // TestCheckResourceAttrPair is a TestCheckFunc which validates that the values // in state for a pair of name/key combinations are equal. func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { @@ -861,33 +1039,57 @@ func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string if err != nil { return err } - vFirst, ok := isFirst.Attributes[keyFirst] - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst) - } isSecond, err := primaryInstanceState(s, nameSecond) if err != nil { return err } - vSecond, ok := isSecond.Attributes[keySecond] - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond) + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + } +} + +// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with +// support for non-root modules +func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { + return func(s *terraform.State) error { + isFirst, err := modulePathPrimaryInstanceState(s, mpFirst, nameFirst) + if err != nil { + return err } - if vFirst != vSecond { - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - nameFirst, - keyFirst, - vSecond, - vFirst) + isSecond, err := modulePathPrimaryInstanceState(s, mpSecond, nameSecond) + if err != nil { + return err } - return nil + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) } } +func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { + vFirst, ok := isFirst.Attributes[keyFirst] + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst) + } + + vSecond, ok := isSecond.Attributes[keySecond] + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond) + } + + if vFirst != vSecond { + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + nameFirst, + keyFirst, + vSecond, + vFirst) + } + + return nil +} + // TestCheckOutput checks an output in the Terraform configuration func TestCheckOutput(name, value string) TestCheckFunc { return func(s *terraform.State) error { @@ -936,23 +1138,43 @@ type TestT interface { Error(args ...interface{}) Fatal(args ...interface{}) Skip(args ...interface{}) + Name() string + Parallel() } // This is set to true by unit tests to alter some behavior var testTesting = false -// primaryInstanceState returns the primary instance state for the given resource name. -func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { - ms := s.RootModule() +// modulePrimaryInstanceState returns the instance state for the given resource +// name in a ModuleState +func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { rs, ok := ms.Resources[name] if !ok { - return nil, fmt.Errorf("Not found: %s", name) + return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) } is := rs.Primary if is == nil { - return nil, fmt.Errorf("No primary instance: %s", name) + return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) } return is, nil } + +// modulePathPrimaryInstanceState returns the primary instance state for the +// given resource name in a given module path. +func modulePathPrimaryInstanceState(s *terraform.State, mp []string, name string) (*terraform.InstanceState, error) { + ms := s.ModuleByPath(mp) + if ms == nil { + return nil, fmt.Errorf("No module found at: %s", mp) + } + + return modulePrimaryInstanceState(s, ms, name) +} + +// primaryInstanceState returns the primary instance state for the given +// resource name in the root module. +func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { + ms := s.RootModule() + return modulePrimaryInstanceState(s, ms, name) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go index 537a11c..033f126 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go @@ -1,10 +1,12 @@ package resource import ( + "errors" "fmt" "log" "strings" + "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/terraform" ) @@ -20,6 +22,14 @@ func testStep( opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { + // Pre-taint any resources that have been defined in Taint, as long as this + // is not a destroy step. + if !step.Destroy { + if err := testStepTaint(state, step); err != nil { + return state, err + } + } + mod, err := testModule(opts, step) if err != nil { return state, err @@ -33,17 +43,12 @@ func testStep( if err != nil { return state, fmt.Errorf("Error initializing context: %s", err) } - if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { - if len(es) > 0 { - estrs := make([]string, len(es)) - for i, e := range es { - estrs[i] = e.Error() - } - return state, fmt.Errorf( - "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v", - ws, estrs) + if diags := ctx.Validate(); len(diags) > 0 { + if diags.HasErrors() { + return nil, errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) } - log.Printf("[WARN] Config warnings: %#v", ws) + + log.Printf("[WARN] Config warnings:\n%s", diags) } // Refresh! @@ -158,3 +163,19 @@ func testStep( // Made it here? Good job test step! return state, nil } + +func testStepTaint(state *terraform.State, step TestStep) error { + for _, p := range step.Taint { + m := state.RootModule() + if m == nil { + return errors.New("no state") + } + rs, ok := m.Resources[p] + if !ok { + return fmt.Errorf("resource %q not found in state", p) + } + log.Printf("[WARN] Test: Explicitly tainting resource %q", p) + rs.Taint() + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go index 28ad105..94fef3c 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go @@ -16,15 +16,24 @@ func testStepImportState( state *terraform.State, step TestStep) (*terraform.State, error) { // Determine the ID to import - importId := step.ImportStateId - if importId == "" { + var importId string + switch { + case step.ImportStateIdFunc != nil: + var err error + importId, err = step.ImportStateIdFunc(state) + if err != nil { + return state, err + } + case step.ImportStateId != "": + importId = step.ImportStateId + default: resource, err := testResource(step, state) if err != nil { return state, err } - importId = resource.Primary.ID } + importPrefix := step.ImportStateIdPrefix if importPrefix != "" { importId = fmt.Sprintf("%s%s", importPrefix, importId) diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go index ca50e29..e56a515 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go @@ -74,7 +74,7 @@ func RetryableError(err error) *RetryError { return &RetryError{Err: err, Retryable: true} } -// NonRetryableError is a helper to create a RetryError that's _not)_ retryable +// NonRetryableError is a helper to create a RetryError that's _not_ retryable // from a given error. func NonRetryableError(err error) *RetryError { if err == nil { diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go index a0729c0..57fbba7 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go @@ -65,7 +65,7 @@ func (b *Backend) Configure(c *terraform.ResourceConfig) error { // Get a ResourceData for this configuration. To do this, we actually // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c) + diff, err := sm.Diff(nil, c, nil, nil) if err != nil { return err } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go new file mode 100644 index 0000000..bf952f6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go @@ -0,0 +1,155 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform/config/configschema" + "github.com/zclconf/go-cty/cty" +) + +// The functions and methods in this file are concerned with the conversion +// of this package's schema model into the slightly-lower-level schema model +// used by Terraform core for configuration parsing. + +// CoreConfigSchema lowers the receiver to the schema model expected by +// Terraform core. +// +// This lower-level model has fewer features than the schema in this package, +// describing only the basic structure of configuration and state values we +// expect. The full schemaMap from this package is still required for full +// validation, handling of default values, etc. +// +// This method presumes a schema that passes InternalValidate, and so may +// panic or produce an invalid result if given an invalid schemaMap. +func (m schemaMap) CoreConfigSchema() *configschema.Block { + if len(m) == 0 { + // We return an actual (empty) object here, rather than a nil, + // because a nil result would mean that we don't have a schema at + // all, rather than that we have an empty one. + return &configschema.Block{} + } + + ret := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + BlockTypes: map[string]*configschema.NestedBlock{}, + } + + for name, schema := range m { + if schema.Elem == nil { + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + switch schema.Elem.(type) { + case *Schema: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case *Resource: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) + } + } + + return ret +} + +// coreConfigSchemaAttribute prepares a configschema.Attribute representation +// of a schema. This is appropriate only for primitives or collections whose +// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections +// whose elem is a whole resource. +func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { + return &configschema.Attribute{ + Type: s.coreConfigSchemaType(), + Optional: s.Optional, + Required: s.Required, + Computed: s.Computed, + Sensitive: s.Sensitive, + } +} + +// coreConfigSchemaBlock prepares a configschema.NestedBlock representation of +// a schema. This is appropriate only for collections whose Elem is an instance +// of Resource, and will panic otherwise. +func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { + ret := &configschema.NestedBlock{} + if nested := s.Elem.(*Resource).CoreConfigSchema(); nested != nil { + ret.Block = *nested + } + switch s.Type { + case TypeList: + ret.Nesting = configschema.NestingList + case TypeSet: + ret.Nesting = configschema.NestingSet + case TypeMap: + ret.Nesting = configschema.NestingMap + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid s.Type %s for s.Elem being resource", s.Type)) + } + + ret.MinItems = s.MinItems + ret.MaxItems = s.MaxItems + + if s.Required && s.MinItems == 0 { + // configschema doesn't have a "required" representation for nested + // blocks, but we can fake it by requiring at least one item. + ret.MinItems = 1 + } + + return ret +} + +// coreConfigSchemaType determines the core config schema type that corresponds +// to a particular schema's type. +func (s *Schema) coreConfigSchemaType() cty.Type { + switch s.Type { + case TypeString: + return cty.String + case TypeBool: + return cty.Bool + case TypeInt, TypeFloat: + // configschema doesn't distinguish int and float, so helper/schema + // will deal with this as an additional validation step after + // configuration has been parsed and decoded. + return cty.Number + case TypeList, TypeSet, TypeMap: + var elemType cty.Type + switch set := s.Elem.(type) { + case *Schema: + elemType = set.coreConfigSchemaType() + case *Resource: + // In practice we don't actually use this for normal schema + // construction because we construct a NestedBlock in that + // case instead. See schemaMap.CoreConfigSchema. + elemType = set.CoreConfigSchema().ImpliedType() + default: + if set != nil { + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", s.Elem)) + } + // Some pre-existing schemas assume string as default, so we need + // to be compatible with them. + elemType = cty.String + } + switch s.Type { + case TypeList: + return cty.List(elemType) + case TypeSet: + return cty.Set(elemType) + case TypeMap: + return cty.Map(elemType) + default: + // can never get here in practice, due to the case we're inside + panic("invalid collection type") + } + default: + // should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Type %s", s.Type)) + } +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema +// on the resource's schema. +func (r *Resource) CoreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go index 5a03d2d..8d93750 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go @@ -32,7 +32,7 @@ func DataSourceResourceShim(name string, dataSource *Resource) *Resource { // FIXME: Link to some further docs either on the website or in the // changelog, once such a thing exists. - dataSource.deprecationMessage = fmt.Sprintf( + dataSource.DeprecationMessage = fmt.Sprintf( "using %s as a resource is deprecated; consider using the data source instead", name, ) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go index 1660a67..b80b223 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go @@ -126,6 +126,8 @@ func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { switch v := current.Elem.(type) { case ValueType: current = &Schema{Type: v} + case *Schema: + current, _ = current.Elem.(*Schema) default: // maps default to string values. This is all we can have // if this is nested in another list or map. @@ -249,11 +251,10 @@ func readObjectField( } // convert map values to the proper primitive type based on schema.Elem -func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error { - - elemType := TypeString - if et, ok := schema.Elem.(ValueType); ok { - elemType = et +func mapValuesToPrimitive(k string, m map[string]interface{}, schema *Schema) error { + elemType, err := getValueType(k, schema) + if err != nil { + return err } switch elemType { diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go index f958bbc..55a301d 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go @@ -206,7 +206,7 @@ func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, panic(fmt.Sprintf("unknown type: %#v", mraw)) } - err := mapValuesToPrimitive(result, schema) + err := mapValuesToPrimitive(k, result, schema) if err != nil { return FieldReadResult{}, nil } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go index 16bbae2..d558a5b 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go @@ -29,29 +29,59 @@ type DiffFieldReader struct { Diff *terraform.InstanceDiff Source FieldReader Schema map[string]*Schema + + // cache for memoizing ReadField calls. + cache map[string]cachedFieldReadResult +} + +type cachedFieldReadResult struct { + val FieldReadResult + err error } func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { + if r.cache == nil { + r.cache = make(map[string]cachedFieldReadResult) + } + + // Create the cache key by joining around a value that isn't a valid part + // of an address. This assumes that the Source and Schema are not changed + // for the life of this DiffFieldReader. + cacheKey := strings.Join(address, "|") + if cached, ok := r.cache[cacheKey]; ok { + return cached.val, cached.err + } + schemaList := addrToSchema(address, r.Schema) if len(schemaList) == 0 { + r.cache[cacheKey] = cachedFieldReadResult{} return FieldReadResult{}, nil } + var res FieldReadResult + var err error + schema := schemaList[len(schemaList)-1] switch schema.Type { case TypeBool, TypeInt, TypeFloat, TypeString: - return r.readPrimitive(address, schema) + res, err = r.readPrimitive(address, schema) case TypeList: - return readListField(r, address, schema) + res, err = readListField(r, address, schema) case TypeMap: - return r.readMap(address, schema) + res, err = r.readMap(address, schema) case TypeSet: - return r.readSet(address, schema) + res, err = r.readSet(address, schema) case typeObject: - return readObjectField(r, address, schema.Elem.(map[string]*Schema)) + res, err = readObjectField(r, address, schema.Elem.(map[string]*Schema)) default: panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) } + + r.cache[cacheKey] = cachedFieldReadResult{ + val: res, + err: err, + } + return res, err } func (r *DiffFieldReader) readMap( @@ -92,7 +122,8 @@ func (r *DiffFieldReader) readMap( result[k] = v.New } - err = mapValuesToPrimitive(result, schema) + key := address[len(address)-1] + err = mapValuesToPrimitive(key, result, schema) if err != nil { return FieldReadResult{}, nil } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go index 9533981..054efe0 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go @@ -61,7 +61,7 @@ func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, err return true }) - err := mapValuesToPrimitive(result, schema) + err := mapValuesToPrimitive(k, result, schema) if err != nil { return FieldReadResult{}, nil } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go index 689ed8d..814c7ba 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go @@ -39,6 +39,19 @@ func (w *MapFieldWriter) unsafeWriteField(addr string, value string) { w.result[addr] = value } +// clearTree clears a field and any sub-fields of the given address out of the +// map. This should be used to reset some kind of complex structures (namely +// sets) before writing to make sure that any conflicting data is removed (for +// example, if the set was previously written to the writer's layer). +func (w *MapFieldWriter) clearTree(addr []string) { + prefix := strings.Join(addr, ".") + "." + for k := range w.result { + if strings.HasPrefix(k, prefix) { + delete(w.result, k) + } + } +} + func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error { w.lock.Lock() defer w.lock.Unlock() @@ -115,6 +128,14 @@ func (w *MapFieldWriter) setList( return fmt.Errorf("%s: %s", k, err) } + // Wipe the set from the current writer prior to writing if it exists. + // Multiple writes to the same layer is a lot safer for lists than sets due + // to the fact that indexes are always deterministic and the length will + // always be updated with the current length on the last write, but making + // sure we have a clean namespace removes any chance for edge cases to pop up + // and ensures that the last write to the set is the correct value. + w.clearTree(addr) + // Set the entire list. var err error for i, elem := range vs { @@ -162,6 +183,10 @@ func (w *MapFieldWriter) setMap( vs[mk.String()] = mv.Interface() } + // Wipe this address tree. The contents of the map should always reflect the + // last write made to it. + w.clearTree(addr) + // Remove the pure key since we're setting the full map value delete(w.result, k) @@ -308,6 +333,13 @@ func (w *MapFieldWriter) setSet( value = s } + // Clear any keys that match the set address first. This is necessary because + // it's always possible and sometimes may be necessary to write to a certain + // writer layer more than once with different set data each time, which will + // lead to different keys being inserted, which can lead to determinism + // problems when the old data isn't wiped first. + w.clearTree(addr) + for code, elem := range value.(*Set).m { if err := w.set(append(addrCopy, code), elem); err != nil { return err diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go index 3a97629..38cd8c7 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go @@ -2,7 +2,7 @@ package schema -import "fmt" +import "strconv" const ( _getSource_name_0 = "getSourceStategetSourceConfig" @@ -13,8 +13,6 @@ const ( var ( _getSource_index_0 = [...]uint8{0, 14, 29} - _getSource_index_1 = [...]uint8{0, 13} - _getSource_index_2 = [...]uint8{0, 12} _getSource_index_3 = [...]uint8{0, 18, 32} ) @@ -31,6 +29,6 @@ func (i getSource) String() string { i -= 15 return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]] default: - return fmt.Sprintf("getSource(%d)", i) + return "getSource(" + strconv.FormatInt(int64(i), 10) + ")" } } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go index fb28b41..6cd325d 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/configschema" "github.com/hashicorp/terraform/terraform" ) @@ -58,7 +59,7 @@ type Provider struct { meta interface{} - // a mutex is required because TestReset can directly repalce the stopCtx + // a mutex is required because TestReset can directly replace the stopCtx stopMu sync.Mutex stopCtx context.Context stopCtxCancel context.CancelFunc @@ -185,6 +186,29 @@ func (p *Provider) TestReset() error { return nil } +// GetSchema implementation of terraform.ResourceProvider interface +func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + resourceTypes := map[string]*configschema.Block{} + dataSources := map[string]*configschema.Block{} + + for _, name := range req.ResourceTypes { + if r, exists := p.ResourcesMap[name]; exists { + resourceTypes[name] = r.CoreConfigSchema() + } + } + for _, name := range req.DataSources { + if r, exists := p.DataSourcesMap[name]; exists { + dataSources[name] = r.CoreConfigSchema() + } + } + + return &terraform.ProviderSchema{ + Provider: schemaMap(p.Schema).CoreConfigSchema(), + ResourceTypes: resourceTypes, + DataSources: dataSources, + }, nil +} + // Input implementation of terraform.ResourceProvider interface. func (p *Provider) Input( input terraform.UIInput, @@ -227,7 +251,7 @@ func (p *Provider) Configure(c *terraform.ResourceConfig) error { // Get a ResourceData for this configuration. To do this, we actually // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c) + diff, err := sm.Diff(nil, c, nil, p.meta) if err != nil { return err } @@ -269,7 +293,7 @@ func (p *Provider) Diff( return nil, fmt.Errorf("unknown resource type: %s", info.Type) } - return r.Diff(s, c) + return r.Diff(s, c, p.meta) } // Refresh implementation of terraform.ResourceProvider interface. @@ -305,6 +329,10 @@ func (p *Provider) Resources() []terraform.ResourceType { result = append(result, terraform.ResourceType{ Name: k, Importable: resource.Importer != nil, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, }) } @@ -382,7 +410,7 @@ func (p *Provider) ReadDataDiff( return nil, fmt.Errorf("unknown data source: %s", info.Type) } - return r.Diff(nil, c) + return r.Diff(nil, c, p.meta) } // RefreshData implementation of terraform.ResourceProvider interface. @@ -410,6 +438,10 @@ func (p *Provider) DataSources() []terraform.DataSource { for _, k := range keys { result = append(result, terraform.DataSource{ Name: k, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, }) } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go index 476192e..a8d42db 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go @@ -146,7 +146,7 @@ func (p *Provisioner) Apply( } sm := schemaMap(p.ConnSchema) - diff, err := sm.Diff(nil, terraform.NewResourceConfig(c)) + diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil) if err != nil { return err } @@ -160,7 +160,7 @@ func (p *Provisioner) Apply( // Build the configuration data. Doing this requires making a "diff" // even though that's never used. We use that just to get the correct types. configMap := schemaMap(p.Schema) - diff, err := configMap.Diff(nil, c) + diff, err := configMap.Diff(nil, c, nil, nil) if err != nil { return err } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go index ddba109..d3be2d6 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go @@ -85,6 +85,37 @@ type Resource struct { Delete DeleteFunc Exists ExistsFunc + // CustomizeDiff is a custom function for working with the diff that + // Terraform has created for this resource - it can be used to customize the + // diff that has been created, diff values not controlled by configuration, + // or even veto the diff altogether and abort the plan. It is passed a + // *ResourceDiff, a structure similar to ResourceData but lacking most write + // functions like Set, while introducing new functions that work with the + // diff such as SetNew, SetNewComputed, and ForceNew. + // + // The phases Terraform runs this in, and the state available via functions + // like Get and GetChange, are as follows: + // + // * New resource: One run with no state + // * Existing resource: One run with state + // * Existing resource, forced new: One run with state (before ForceNew), + // then one run without state (as if new resource) + // * Tainted resource: No runs (custom diff logic is skipped) + // * Destroy: No runs (standard diff logic is skipped on destroy diffs) + // + // This function needs to be resilient to support all scenarios. + // + // If this function needs to access external API resources, remember to flag + // the RequiresRefresh attribute mentioned below to ensure that + // -refresh=false is blocked when running plan or apply, as this means that + // this resource requires refresh-like behaviour to work effectively. + // + // For the most part, only computed fields can be customized by this + // function. + // + // This function is only allowed on regular resources (not data sources). + CustomizeDiff CustomizeDiffFunc + // Importer is the ResourceImporter implementation for this resource. // If this is nil, then this resource does not support importing. If // this is non-nil, then it supports importing and ResourceImporter @@ -93,9 +124,7 @@ type Resource struct { Importer *ResourceImporter // If non-empty, this string is emitted as a warning during Validate. - // This is a private interface for now, for use by DataSourceResourceShim, - // and not for general use. (But maybe later...) - deprecationMessage string + DeprecationMessage string // Timeouts allow users to specify specific time durations in which an // operation should time out, to allow them to extend an action to suit their @@ -126,6 +155,9 @@ type ExistsFunc func(*ResourceData, interface{}) (bool, error) type StateMigrateFunc func( int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) +// See Resource documentation. +type CustomizeDiffFunc func(*ResourceDiff, interface{}) error + // Apply creates, updates, and/or deletes a resource. func (r *Resource) Apply( s *terraform.InstanceState, @@ -202,11 +234,11 @@ func (r *Resource) Apply( return r.recordCurrentSchemaVersion(data.State()), err } -// Diff returns a diff of this resource and is API compatible with the -// ResourceProvider interface. +// Diff returns a diff of this resource. func (r *Resource) Diff( s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { t := &ResourceTimeout{} err := t.ConfigDecode(r, c) @@ -215,7 +247,7 @@ func (r *Resource) Diff( return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) } - instanceDiff, err := schemaMap(r.Schema).Diff(s, c) + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta) if err != nil { return instanceDiff, err } @@ -235,8 +267,8 @@ func (r *Resource) Diff( func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { warns, errs := schemaMap(r.Schema).Validate(c) - if r.deprecationMessage != "" { - warns = append(warns, r.deprecationMessage) + if r.DeprecationMessage != "" { + warns = append(warns, r.DeprecationMessage) } return warns, errs @@ -248,7 +280,6 @@ func (r *Resource) ReadDataApply( d *terraform.InstanceDiff, meta interface{}, ) (*terraform.InstanceState, error) { - // Data sources are always built completely from scratch // on each read, so the source state is always nil. data, err := schemaMap(r.Schema).Data(nil, d) @@ -346,6 +377,11 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error if r.Create != nil || r.Update != nil || r.Delete != nil { return fmt.Errorf("must not implement Create, Update or Delete") } + + // CustomizeDiff cannot be defined for read-only resources + if r.CustomizeDiff != nil { + return fmt.Errorf("cannot implement CustomizeDiff") + } } tsm := topSchemaMap @@ -393,19 +429,43 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error return err } } + + for k, f := range tsm { + if isReservedResourceFieldName(k, f) { + return fmt.Errorf("%s is a reserved field name", k) + } + } } - // Resource-specific checks - for k, _ := range tsm { - if isReservedResourceFieldName(k) { - return fmt.Errorf("%s is a reserved field name for a resource", k) + // Data source + if r.isTopLevel() && !writable { + tsm = schemaMap(r.Schema) + for k, _ := range tsm { + if isReservedDataSourceFieldName(k) { + return fmt.Errorf("%s is a reserved field name", k) + } } } return schemaMap(r.Schema).InternalValidate(tsm) } -func isReservedResourceFieldName(name string) bool { +func isReservedDataSourceFieldName(name string) bool { + for _, reservedName := range config.ReservedDataSourceFields { + if name == reservedName { + return true + } + } + return false +} + +func isReservedResourceFieldName(name string, s *Schema) bool { + // Allow phasing out "id" + // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 + if name == "id" && (s.Deprecated != "" || s.Removed != "") { + return false + } + for _, reservedName := range config.ReservedResourceFields { if name == reservedName { return true @@ -430,6 +490,12 @@ func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { panic(err) } + // load the Resource timeouts + result.timeouts = r.Timeouts + if result.timeouts == nil { + result.timeouts = &ResourceTimeout{} + } + // Set the schema version to latest by default result.meta = map[string]interface{}{ "schema_version": strconv.Itoa(r.SchemaVersion), @@ -450,7 +516,7 @@ func (r *Resource) TestResourceData() *ResourceData { // Returns true if the resource is "top level" i.e. not a sub-resource. func (r *Resource) isTopLevel() bool { // TODO: This is a heuristic; replace with a definitive attribute? - return r.Create != nil + return (r.Create != nil || r.Read != nil) } // Determines if a given InstanceState needs to be migrated by checking the diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go index b2bc8f6..6cc01ee 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go @@ -35,6 +35,8 @@ type ResourceData struct { partialMap map[string]struct{} once sync.Once isNew bool + + panicOnError bool } // getResult is the internal structure that is generated when a Get @@ -104,6 +106,22 @@ func (d *ResourceData) GetOk(key string) (interface{}, bool) { return r.Value, exists } +// GetOkExists returns the data for a given key and whether or not the key +// has been set to a non-zero value. This is only useful for determining +// if boolean attributes have been set, if they are Optional but do not +// have a Default value. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +// This should only be used if absolutely required/needed. +func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + return r.Value, exists +} + func (d *ResourceData) getRaw(key string, level getSource) getResult { var parts []string if key != "" { @@ -168,7 +186,11 @@ func (d *ResourceData) Set(key string, value interface{}) error { } } - return d.setWriter.WriteField(strings.Split(key, "."), value) + err := d.setWriter.WriteField(strings.Split(key, "."), value) + if err != nil && d.panicOnError { + panic(err) + } + return err } // SetPartial adds the key to the final state output while @@ -293,6 +315,7 @@ func (d *ResourceData) State() *terraform.InstanceState { mapW := &MapFieldWriter{Schema: d.schema} if err := mapW.WriteField(nil, rawMap); err != nil { + log.Printf("[ERR] Error writing fields: %s", err) return nil } @@ -344,6 +367,13 @@ func (d *ResourceData) State() *terraform.InstanceState { func (d *ResourceData) Timeout(key string) time.Duration { key = strings.ToLower(key) + // System default of 20 minutes + defaultTimeout := 20 * time.Minute + + if d.timeouts == nil { + return defaultTimeout + } + var timeout *time.Duration switch key { case TimeoutCreate: @@ -364,8 +394,7 @@ func (d *ResourceData) Timeout(key string) time.Duration { return *d.timeouts.Default } - // Return system default of 20 minutes - return 20 * time.Minute + return defaultTimeout } func (d *ResourceData) init() { @@ -423,7 +452,7 @@ func (d *ResourceData) init() { } func (d *ResourceData) diffChange( - k string) (interface{}, interface{}, bool, bool) { + k string) (interface{}, interface{}, bool, bool, bool) { // Get the change between the state and the config. o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) if !o.Exists { @@ -434,7 +463,7 @@ func (d *ResourceData) diffChange( } // Return the old, new, and whether there is a change - return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed + return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false } func (d *ResourceData) getChange( diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go new file mode 100644 index 0000000..7db3dec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go @@ -0,0 +1,559 @@ +package schema + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/hashicorp/terraform/terraform" +) + +// newValueWriter is a minor re-implementation of MapFieldWriter to include +// keys that should be marked as computed, to represent the new part of a +// pseudo-diff. +type newValueWriter struct { + *MapFieldWriter + + // A list of keys that should be marked as computed. + computedKeys map[string]bool + + // A lock to prevent races on writes. The underlying writer will have one as + // well - this is for computed keys. + lock sync.Mutex + + // To be used with init. + once sync.Once +} + +// init performs any initialization tasks for the newValueWriter. +func (w *newValueWriter) init() { + if w.computedKeys == nil { + w.computedKeys = make(map[string]bool) + } +} + +// WriteField overrides MapValueWriter's WriteField, adding the ability to flag +// the address as computed. +func (w *newValueWriter) WriteField(address []string, value interface{}, computed bool) error { + // Fail the write if we have a non-nil value and computed is true. + // NewComputed values should not have a value when written. + if value != nil && computed { + return errors.New("Non-nil value with computed set") + } + + if err := w.MapFieldWriter.WriteField(address, value); err != nil { + return err + } + + w.once.Do(w.init) + + w.lock.Lock() + defer w.lock.Unlock() + if computed { + w.computedKeys[strings.Join(address, ".")] = true + } + return nil +} + +// ComputedKeysMap returns the underlying computed keys map. +func (w *newValueWriter) ComputedKeysMap() map[string]bool { + w.once.Do(w.init) + return w.computedKeys +} + +// newValueReader is a minor re-implementation of MapFieldReader and is the +// read counterpart to MapValueWriter, allowing the read of keys flagged as +// computed to accommodate the diff override logic in ResourceDiff. +type newValueReader struct { + *MapFieldReader + + // The list of computed keys from a newValueWriter. + computedKeys map[string]bool +} + +// ReadField reads the values from the underlying writer, returning the +// computed value if it is found as well. +func (r *newValueReader) ReadField(address []string) (FieldReadResult, error) { + addrKey := strings.Join(address, ".") + v, err := r.MapFieldReader.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + for computedKey := range r.computedKeys { + if childAddrOf(addrKey, computedKey) { + if strings.HasSuffix(addrKey, ".#") { + // This is a count value for a list or set that has been marked as + // computed, or a sub-list/sub-set of a complex resource that has + // been marked as computed. We need to pass through to other readers + // so that an accurate previous count can be fetched for the diff. + v.Exists = false + } + v.Computed = true + } + } + + return v, nil +} + +// ResourceDiff is used to query and make custom changes to an in-flight diff. +// It can be used to veto particular changes in the diff, customize the diff +// that has been created, or diff values not controlled by config. +// +// The object functions similar to ResourceData, however most notably lacks +// Set, SetPartial, and Partial, as it should be used to change diff values +// only. Most other first-class ResourceData functions exist, namely Get, +// GetOk, HasChange, and GetChange exist. +// +// All functions in ResourceDiff, save for ForceNew, can only be used on +// computed fields. +type ResourceDiff struct { + // The schema for the resource being worked on. + schema map[string]*Schema + + // The current config for this resource. + config *terraform.ResourceConfig + + // The state for this resource as it exists post-refresh, after the initial + // diff. + state *terraform.InstanceState + + // The diff created by Terraform. This diff is used, along with state, + // config, and custom-set diff data, to provide a multi-level reader + // experience similar to ResourceData. + diff *terraform.InstanceDiff + + // The internal reader structure that contains the state, config, the default + // diff, and the new diff. + multiReader *MultiLevelFieldReader + + // A writer that writes overridden new fields. + newWriter *newValueWriter + + // Tracks which keys have been updated by ResourceDiff to ensure that the + // diff does not get re-run on keys that were not touched, or diffs that were + // just removed (re-running on the latter would just roll back the removal). + updatedKeys map[string]bool + + // Tracks which keys were flagged as forceNew. These keys are not saved in + // newWriter, but we need to track them so that they can be re-diffed later. + forcedNewKeys map[string]bool +} + +// newResourceDiff creates a new ResourceDiff instance. +func newResourceDiff(schema map[string]*Schema, config *terraform.ResourceConfig, state *terraform.InstanceState, diff *terraform.InstanceDiff) *ResourceDiff { + d := &ResourceDiff{ + config: config, + state: state, + diff: diff, + schema: schema, + } + + d.newWriter = &newValueWriter{ + MapFieldWriter: &MapFieldWriter{Schema: d.schema}, + } + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["newDiff"] = &newValueReader{ + MapFieldReader: &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.newWriter.Map()), + }, + computedKeys: d.newWriter.ComputedKeysMap(), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "newDiff", + }, + + Readers: readers, + } + + d.updatedKeys = make(map[string]bool) + d.forcedNewKeys = make(map[string]bool) + + return d +} + +// UpdatedKeys returns the keys that were updated by this ResourceDiff run. +// These are the only keys that a diff should be re-calculated for. +// +// This is the combined result of both keys for which diff values were updated +// for or cleared, and also keys that were flagged to be re-diffed as a result +// of ForceNew. +func (d *ResourceDiff) UpdatedKeys() []string { + var s []string + for k := range d.updatedKeys { + s = append(s, k) + } + for k := range d.forcedNewKeys { + for _, l := range s { + if k == l { + break + } + } + s = append(s, k) + } + return s +} + +// Clear wipes the diff for a particular key. It is called by ResourceDiff's +// functionality to remove any possibility of conflicts, but can be called on +// its own to just remove a specific key from the diff completely. +// +// Note that this does not wipe an override. This function is only allowed on +// computed keys. +func (d *ResourceDiff) Clear(key string) error { + if err := d.checkKey(key, "Clear", true); err != nil { + return err + } + + return d.clear(key) +} + +func (d *ResourceDiff) clear(key string) error { + // Check the schema to make sure that this key exists first. + schemaL := addrToSchema(strings.Split(key, "."), d.schema) + if len(schemaL) == 0 { + return fmt.Errorf("%s is not a valid key", key) + } + + for k := range d.diff.Attributes { + if strings.HasPrefix(k, key) { + delete(d.diff.Attributes, k) + } + } + return nil +} + +// GetChangedKeysPrefix helps to implement Resource.CustomizeDiff +// where we need to act on all nested fields +// without calling out each one separately +func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string { + keys := make([]string, 0) + for k := range d.diff.Attributes { + if strings.HasPrefix(k, prefix) { + keys = append(keys, k) + } + } + return keys +} + +// diffChange helps to implement resourceDiffer and derives its change values +// from ResourceDiff's own change data, in addition to existing diff, config, and state. +func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) { + old, new, customized := d.getChange(key) + + if !old.Exists { + old.Value = nil + } + if !new.Exists || d.removed(key) { + new.Value = nil + } + + return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized +} + +// SetNew is used to set a new diff value for the mentioned key. The value must +// be correct for the attribute's schema (mostly relevant for maps, lists, and +// sets). The original value from the state is used as the old value. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNew(key string, value interface{}) error { + if err := d.checkKey(key, "SetNew", false); err != nil { + return err + } + + return d.setDiff(key, value, false) +} + +// SetNewComputed functions like SetNew, except that it blanks out a new value +// and marks it as computed. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNewComputed(key string) error { + if err := d.checkKey(key, "SetNewComputed", false); err != nil { + return err + } + + return d.setDiff(key, nil, true) +} + +// setDiff performs common diff setting behaviour. +func (d *ResourceDiff) setDiff(key string, new interface{}, computed bool) error { + if err := d.clear(key); err != nil { + return err + } + + if err := d.newWriter.WriteField(strings.Split(key, "."), new, computed); err != nil { + return fmt.Errorf("Cannot set new diff value for key %s: %s", key, err) + } + + d.updatedKeys[key] = true + + return nil +} + +// ForceNew force-flags ForceNew in the schema for a specific key, and +// re-calculates its diff, effectively causing this attribute to force a new +// resource. +// +// Keep in mind that forcing a new resource will force a second run of the +// resource's CustomizeDiff function (with a new ResourceDiff) once the current +// one has completed. This second run is performed without state. This behavior +// will be the same as if a new resource is being created and is performed to +// ensure that the diff looks like the diff for a new resource as much as +// possible. CustomizeDiff should expect such a scenario and act correctly. +// +// This function is a no-op/error if there is no diff. +// +// Note that the change to schema is permanent for the lifecycle of this +// specific ResourceDiff instance. +func (d *ResourceDiff) ForceNew(key string) error { + if !d.HasChange(key) { + return fmt.Errorf("ForceNew: No changes for %s", key) + } + + keyParts := strings.Split(key, ".") + var schema *Schema + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } else { + return fmt.Errorf("ForceNew: %s is not a valid key", key) + } + + schema.ForceNew = true + + // Flag this for a re-diff. Don't save any values to guarantee that existing + // diffs aren't messed with, as this gets messy when dealing with complex + // structures, zero values, etc. + d.forcedNewKeys[keyParts[0]] = true + + return nil +} + +// Get hands off to ResourceData.Get. +func (d *ResourceDiff) Get(key string) interface{} { + r, _ := d.GetOk(key) + return r +} + +// GetChange gets the change between the state and diff, checking first to see +// if a overridden diff exists. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) { + old, new, _ := d.getChange(key) + return old.Value, new.Value +} + +// GetOk functions the same way as ResourceData.GetOk, but it also checks the +// new diff levels to provide data consistent with the current state of the +// customized diff. +func (d *ResourceDiff) GetOk(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists functions the same way as GetOkExists within ResourceData, but +// it also checks the new diff levels to provide data consistent with the +// current state of the customized diff. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +func (d *ResourceDiff) GetOkExists(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + return r.Value, exists +} + +// NewValueKnown returns true if the new value for the given key is available +// as its final value at diff time. If the return value is false, this means +// either the value is based of interpolation that was unavailable at diff +// time, or that the value was explicitly marked as computed by SetNewComputed. +func (d *ResourceDiff) NewValueKnown(key string) bool { + r := d.get(strings.Split(key, "."), "newDiff") + return !r.Computed +} + +// HasChange checks to see if there is a change between state and the diff, or +// in the overridden diff. +func (d *ResourceDiff) HasChange(key string) bool { + old, new := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := old.(Equal); ok { + return !eq.Equal(new) + } + + return !reflect.DeepEqual(old, new) +} + +// Id returns the ID of this resource. +// +// Note that technically, ID does not change during diffs (it either has +// already changed in the refresh, or will change on update), hence we do not +// support updating the ID or fetching it from anything else other than state. +func (d *ResourceDiff) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + } + return result +} + +// getChange gets values from two different levels, designed for use in +// diffChange, HasChange, and GetChange. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) { + old := d.get(strings.Split(key, "."), "state") + var new getResult + for p := range d.updatedKeys { + if childAddrOf(key, p) { + new = d.getExact(strings.Split(key, "."), "newDiff") + return old, new, true + } + } + new = d.get(strings.Split(key, "."), "newDiff") + return old, new, false +} + +// removed checks to see if the key is present in the existing, pre-customized +// diff and if it was marked as NewRemoved. +func (d *ResourceDiff) removed(k string) bool { + diff, ok := d.diff.Attributes[k] + if !ok { + return false + } + return diff.NewRemoved +} + +// get performs the appropriate multi-level reader logic for ResourceDiff, +// starting at source. Refer to newResourceDiff for the level order. +func (d *ResourceDiff) get(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldMerge(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// getExact gets an attribute from the exact level referenced by source. +func (d *ResourceDiff) getExact(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldExact(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// finalizeResult does some post-processing of the result produced by get and getExact. +func (d *ResourceDiff) finalizeResult(addr []string, result FieldReadResult) getResult { + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} + +// childAddrOf does a comparison of two addresses to see if one is the child of +// the other. +func childAddrOf(child, parent string) bool { + cs := strings.Split(child, ".") + ps := strings.Split(parent, ".") + if len(ps) > len(cs) { + return false + } + return reflect.DeepEqual(ps, cs[:len(ps)]) +} + +// checkKey checks the key to make sure it exists and is computed. +func (d *ResourceDiff) checkKey(key, caller string, nested bool) error { + var schema *Schema + if nested { + keyParts := strings.Split(key, ".") + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + } else { + s, ok := d.schema[key] + if ok { + schema = s + } + } + if schema == nil { + return fmt.Errorf("%s: invalid key: %s", caller, key) + } + if !schema.Computed { + return fmt.Errorf("%s only operates on computed keys - %s is not one", caller, key) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go index acb5618..0ea5aad 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go @@ -21,9 +21,13 @@ import ( "strings" "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/copystructure" "github.com/mitchellh/mapstructure" ) +// Name of ENV variable which (if not empty) prefers panic over error +const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" + // type used for schema package context keys type contextKey string @@ -116,12 +120,16 @@ type Schema struct { ForceNew bool StateFunc SchemaStateFunc - // The following fields are only set for a TypeList or TypeSet Type. + // The following fields are only set for a TypeList, TypeSet, or TypeMap. // - // Elem must be either a *Schema or a *Resource only if the Type is - // TypeList, and represents what the element type is. If it is *Schema, - // the element type is just a simple value. If it is *Resource, the - // element type is a complex structure, potentially with its own lifecycle. + // Elem represents the element type. For a TypeMap, it must be a *Schema + // with a Type of TypeString, otherwise it may be either a *Schema or a + // *Resource. If it is *Schema, the element type is just a simple value. + // If it is *Resource, the element type is a complex structure, + // potentially with its own lifecycle. + Elem interface{} + + // The following fields are only set for a TypeList or TypeSet. // // MaxItems defines a maximum amount of items that can exist within a // TypeSet or TypeList. Specific use cases would be if a TypeSet is being @@ -138,7 +146,6 @@ type Schema struct { // ["foo"] automatically. This is primarily for legacy reasons and the // ambiguity is not recommended for new usage. Promotion is only allowed // for primitive element types. - Elem interface{} MaxItems int MinItems int PromoteSingle bool @@ -192,7 +199,7 @@ type Schema struct { Sensitive bool } -// SchemaDiffSuppresFunc is a function which can be used to determine +// SchemaDiffSuppressFunc is a function which can be used to determine // whether a detected diff on a schema element is "valid" or not, and // suppress it from the plan if necessary. // @@ -289,8 +296,7 @@ func (s *Schema) ZeroValue() interface{} { } } -func (s *Schema) finalizeDiff( - d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff { +func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { if d == nil { return d } @@ -331,13 +337,20 @@ func (s *Schema) finalizeDiff( } if s.Computed { - if d.Old != "" && d.New == "" { - // This is a computed value with an old value set already, - // just let it go. - return nil + // FIXME: This is where the customized bool from getChange finally + // comes into play. It allows the previously incorrect behavior + // of an empty string being used as "unset" when the value is + // computed. This should be removed once we can properly + // represent an unset/nil value from the configuration. + if !customized { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + return nil + } } - if d.New == "" { + if d.New == "" && !d.NewComputed { // Computed attribute without a new value set d.NewComputed = true } @@ -354,6 +367,13 @@ func (s *Schema) finalizeDiff( // schemaMap is a wrapper that adds nice functions on top of schemas. type schemaMap map[string]*Schema +func (m schemaMap) panicOnError() bool { + if os.Getenv(PanicOnErr) != "" { + return true + } + return false +} + // Data returns a ResourceData for the given schema, state, and diff. // // The diff is optional. @@ -361,17 +381,30 @@ func (m schemaMap) Data( s *terraform.InstanceState, d *terraform.InstanceDiff) (*ResourceData, error) { return &ResourceData{ - schema: m, - state: s, - diff: d, + schema: m, + state: s, + diff: d, + panicOnError: m.panicOnError(), }, nil } +// DeepCopy returns a copy of this schemaMap. The copy can be safely modified +// without affecting the original. +func (m *schemaMap) DeepCopy() schemaMap { + copy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + return *copy.(*schemaMap) +} + // Diff returns the diff for a resource given the schema map, // state, and configuration. func (m schemaMap) Diff( s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + c *terraform.ResourceConfig, + customizeDiff CustomizeDiffFunc, + meta interface{}) (*terraform.InstanceDiff, error) { result := new(terraform.InstanceDiff) result.Attributes = make(map[string]*terraform.ResourceAttrDiff) @@ -381,9 +414,10 @@ func (m schemaMap) Diff( } d := &ResourceData{ - schema: m, - state: s, - config: c, + schema: m, + state: s, + config: c, + panicOnError: m.panicOnError(), } for k, schema := range m { @@ -393,6 +427,29 @@ func (m schemaMap) Diff( } } + // Remove any nil diffs just to keep things clean + for k, v := range result.Attributes { + if v == nil { + delete(result.Attributes, k) + } + } + + // If this is a non-destroy diff, call any custom diff logic that has been + // defined. + if !result.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, s, result) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result, rd, false) + if err != nil { + return nil, err + } + } + } + // If the diff requires a new resource, then we recompute the diff // so we have the complete new resource diff, and preserve the // RequiresNew fields where necessary so the user knows exactly what @@ -418,6 +475,21 @@ func (m schemaMap) Diff( } } + // Re-run customization + if !result2.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, d.state, result2) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result2, rd, false) + if err != nil { + return nil, err + } + } + } + // Force all the fields to not force a new since we know what we // want to force new. for k, attr := range result2.Attributes { @@ -456,13 +528,6 @@ func (m schemaMap) Diff( result = result2 } - // Remove any nil diffs just to keep things clean - for k, v := range result.Attributes { - if v == nil { - delete(result.Attributes, k) - } - } - // Go through and detect all of the ComputedWhens now that we've // finished the diff. // TODO @@ -681,11 +746,23 @@ func isValidFieldName(name string) bool { return re.MatchString(name) } +// resourceDiffer is an interface that is used by the private diff functions. +// This helps facilitate diff logic for both ResourceData and ResoureDiff with +// minimal divergence in code. +type resourceDiffer interface { + diffChange(string) (interface{}, interface{}, bool, bool, bool) + Get(string) interface{} + GetChange(string) (interface{}, interface{}) + GetOk(string) (interface{}, bool) + HasChange(string) bool + Id() string +} + func (m schemaMap) diff( k string, schema *Schema, diff *terraform.InstanceDiff, - d *ResourceData, + d resourceDiffer, all bool) error { unsupressedDiff := new(terraform.InstanceDiff) @@ -706,12 +783,14 @@ func (m schemaMap) diff( } for attrK, attrV := range unsupressedDiff.Attributes { - if schema.DiffSuppressFunc != nil && - attrV != nil && - schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, d) { - continue + switch rd := d.(type) { + case *ResourceData: + if schema.DiffSuppressFunc != nil && + attrV != nil && + schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { + continue + } } - diff.Attributes[attrK] = attrV } @@ -722,9 +801,9 @@ func (m schemaMap) diffList( k string, schema *Schema, diff *terraform.InstanceDiff, - d *ResourceData, + d resourceDiffer, all bool) error { - o, n, _, computedList := d.diffChange(k) + o, n, _, computedList, customized := d.diffChange(k) if computedList { n = nil } @@ -791,10 +870,13 @@ func (m schemaMap) diffList( oldStr = "" } - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) } // Figure out the maximum @@ -841,13 +923,13 @@ func (m schemaMap) diffMap( k string, schema *Schema, diff *terraform.InstanceDiff, - d *ResourceData, + d resourceDiffer, all bool) error { prefix := k + "." // First get all the values from the state var stateMap, configMap map[string]string - o, n, _, nComputed := d.diffChange(k) + o, n, _, nComputed, customized := d.diffChange(k) if err := mapstructure.WeakDecode(o, &stateMap); err != nil { return fmt.Errorf("%s: %s", k, err) } @@ -899,6 +981,7 @@ func (m schemaMap) diffMap( Old: oldStr, New: newStr, }, + customized, ) } @@ -916,16 +999,22 @@ func (m schemaMap) diffMap( continue } - diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: old, - New: v, - }) + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: old, + New: v, + }, + customized, + ) } for k, v := range stateMap { - diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: v, - NewRemoved: true, - }) + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }, + customized, + ) } return nil @@ -935,10 +1024,10 @@ func (m schemaMap) diffSet( k string, schema *Schema, diff *terraform.InstanceDiff, - d *ResourceData, + d resourceDiffer, all bool) error { - o, n, _, computedSet := d.diffChange(k) + o, n, _, computedSet, customized := d.diffChange(k) if computedSet { n = nil } @@ -997,20 +1086,26 @@ func (m schemaMap) diffSet( countStr = "" } - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: countStr, - NewComputed: true, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }, + customized, + ) return nil } // If the counts are not the same, then record that diff changed := oldLen != newLen if changed || all { - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) } // Build the list of codes that will make up our set. This is the @@ -1056,11 +1151,11 @@ func (m schemaMap) diffString( k string, schema *Schema, diff *terraform.InstanceDiff, - d *ResourceData, + d resourceDiffer, all bool) error { var originalN interface{} var os, ns string - o, n, _, computed := d.diffChange(k) + o, n, _, computed, customized := d.diffChange(k) if schema.StateFunc != nil && n != nil { originalN = n n = schema.StateFunc(n) @@ -1090,20 +1185,23 @@ func (m schemaMap) diffString( } removed := false - if o != nil && n == nil { + if o != nil && n == nil && !computed { removed = true } if removed && schema.Computed { return nil } - diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: os, - New: ns, - NewExtra: originalN, - NewRemoved: removed, - NewComputed: computed, - }) + diff.Attributes[k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }, + customized, + ) return nil } @@ -1172,9 +1270,9 @@ func (m schemaMap) validateConflictingAttributes( } for _, conflicting_key := range schema.ConflictsWith { - if value, ok := c.Get(conflicting_key); ok { + if _, ok := c.Get(conflicting_key); ok { return fmt.Errorf( - "%q: conflicts with %s (%#v)", k, conflicting_key, value) + "%q: conflicts with %s", k, conflicting_key) } } @@ -1363,13 +1461,10 @@ func getValueType(k string, schema *Schema) (ValueType, error) { return vt, nil } + // If a Schema is provided to a Map, we use the Type of that schema + // as the type for each element in the Map. if s, ok := schema.Elem.(*Schema); ok { - if s.Elem == nil { - return TypeString, nil - } - if vt, ok := s.Elem.(ValueType); ok { - return vt, nil - } + return s.Type, nil } if _, ok := schema.Elem.(*Resource); ok { @@ -1430,7 +1525,6 @@ func (m schemaMap) validatePrimitive( raw interface{}, schema *Schema, c *terraform.ResourceConfig) ([]string, []error) { - // Catch if the user gave a complex type where a primitive was // expected, so we can return a friendly error message that // doesn't contain Go type system terminology. diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go index de05f40..cba2890 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/set.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go @@ -17,6 +17,12 @@ func HashString(v interface{}) int { return hashcode.String(v.(string)) } +// HashInt hashes integers. If you want a Set of integers, this is the +// SchemaSetFunc you want. +func HashInt(v interface{}) int { + return hashcode.String(strconv.Itoa(v.(int))) +} + // HashResource hashes complex structures that are described using // a *Resource. This is the default set implementation used when a set's // element type is a full resource. @@ -153,6 +159,31 @@ func (s *Set) Equal(raw interface{}) bool { return reflect.DeepEqual(s.m, other.m) } +// HashEqual simply checks to the keys the top-level map to the keys in the +// other set's top-level map to see if they are equal. This obviously assumes +// you have a properly working hash function - use HashResource if in doubt. +func (s *Set) HashEqual(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + ks1 := make([]string, 0) + ks2 := make([]string, 0) + + for k := range s.m { + ks1 = append(ks1, k) + } + for k := range other.m { + ks2 = append(ks2, k) + } + + sort.Strings(ks1) + sort.Strings(ks2) + + return reflect.DeepEqual(ks1, ks2) +} + func (s *Set) GoString() string { return fmt.Sprintf("*Set(%#v)", s.m) } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go index 9765bdb..da754ac 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go @@ -10,13 +10,15 @@ import ( // TestResourceDataRaw creates a ResourceData from a raw configuration map. func TestResourceDataRaw( t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + t.Helper() + c, err := config.NewRawConfig(raw) if err != nil { t.Fatalf("err: %s", err) } sm := schemaMap(schema) - diff, err := sm.Diff(nil, terraform.NewResourceConfig(c)) + diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil) if err != nil { t.Fatalf("err: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go index 1610cec..3bc3ac4 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go @@ -2,7 +2,7 @@ package schema -import "fmt" +import "strconv" const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" @@ -10,7 +10,7 @@ var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} func (i ValueType) String() string { if i < 0 || i >= ValueType(len(_ValueType_index)-1) { - return fmt.Sprintf("ValueType(%d)", i) + return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")" } return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go deleted file mode 100644 index edc1e2a..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go +++ /dev/null @@ -1,83 +0,0 @@ -package shadow - -import ( - "fmt" - "io" - "reflect" - - "github.com/hashicorp/go-multierror" - "github.com/mitchellh/reflectwalk" -) - -// Close will close all shadow values within the given structure. -// -// This uses reflection to walk the structure, find all shadow elements, -// and close them. Currently this will only find struct fields that are -// shadow values, and not slice elements, etc. -func Close(v interface{}) error { - // We require a pointer so we can address the internal fields - val := reflect.ValueOf(v) - if val.Kind() != reflect.Ptr { - return fmt.Errorf("value must be a pointer") - } - - // Walk and close - var w closeWalker - if err := reflectwalk.Walk(v, &w); err != nil { - return err - } - - return w.Err -} - -type closeWalker struct { - Err error -} - -func (w *closeWalker) Struct(reflect.Value) error { - // Do nothing. We implement this for reflectwalk.StructWalker - return nil -} - -var closerType = reflect.TypeOf((*io.Closer)(nil)).Elem() - -func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error { - // Not sure why this would be but lets avoid some panics - if !v.IsValid() { - return nil - } - - // Empty for exported, so don't check unexported fields - if f.PkgPath != "" { - return nil - } - - // Verify the io.Closer is in this package - typ := v.Type() - if typ.PkgPath() != "github.com/hashicorp/terraform/helper/shadow" { - return nil - } - - var closer io.Closer - if v.Type().Implements(closerType) { - closer = v.Interface().(io.Closer) - } else if v.CanAddr() { - // The Close method may require a pointer receiver, but we only have a value. - v := v.Addr() - if v.Type().Implements(closerType) { - closer = v.Interface().(io.Closer) - } - } - - if closer == nil { - return reflectwalk.SkipEntry - } - - // Close it - if err := closer.Close(); err != nil { - w.Err = multierror.Append(w.Err, err) - } - - // Don't go into the struct field - return reflectwalk.SkipEntry -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go deleted file mode 100644 index 4223e92..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go +++ /dev/null @@ -1,128 +0,0 @@ -package shadow - -import ( - "sync" -) - -// ComparedValue is a struct that finds a value by comparing some key -// to the list of stored values. This is useful when there is no easy -// uniquely identifying key that works in a map (for that, use KeyedValue). -// -// ComparedValue is very expensive, relative to other Value types. Try to -// limit the number of values stored in a ComparedValue by potentially -// nesting it within a KeyedValue (a keyed value points to a compared value, -// for example). -type ComparedValue struct { - // Func is a function that is given the lookup key and a single - // stored value. If it matches, it returns true. - Func func(k, v interface{}) bool - - lock sync.Mutex - once sync.Once - closed bool - values []interface{} - waiters map[interface{}]*Value -} - -// Close closes the value. This can never fail. For a definition of -// "close" see the ErrClosed docs. -func (w *ComparedValue) Close() error { - w.lock.Lock() - defer w.lock.Unlock() - - // Set closed to true always - w.closed = true - - // For all waiters, complete with ErrClosed - for k, val := range w.waiters { - val.SetValue(ErrClosed) - delete(w.waiters, k) - } - - return nil -} - -// Value returns the value that was set for the given key, or blocks -// until one is available. -func (w *ComparedValue) Value(k interface{}) interface{} { - v, val := w.valueWaiter(k) - if val == nil { - return v - } - - return val.Value() -} - -// ValueOk gets the value for the given key, returning immediately if the -// value doesn't exist. The second return argument is true if the value exists. -func (w *ComparedValue) ValueOk(k interface{}) (interface{}, bool) { - v, val := w.valueWaiter(k) - return v, val == nil -} - -func (w *ComparedValue) SetValue(v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - w.once.Do(w.init) - - // Check if we already have this exact value (by simply comparing - // with == directly). If we do, then we don't insert it again. - found := false - for _, v2 := range w.values { - if v == v2 { - found = true - break - } - } - - if !found { - // Set the value, always - w.values = append(w.values, v) - } - - // Go through the waiters - for k, val := range w.waiters { - if w.Func(k, v) { - val.SetValue(v) - delete(w.waiters, k) - } - } -} - -func (w *ComparedValue) valueWaiter(k interface{}) (interface{}, *Value) { - w.lock.Lock() - w.once.Do(w.init) - - // Look for a pre-existing value - for _, v := range w.values { - if w.Func(k, v) { - w.lock.Unlock() - return v, nil - } - } - - // If we're closed, return that - if w.closed { - w.lock.Unlock() - return ErrClosed, nil - } - - // Pre-existing value doesn't exist, create a waiter - val := w.waiters[k] - if val == nil { - val = new(Value) - w.waiters[k] = val - } - w.lock.Unlock() - - // Return the waiter - return nil, val -} - -// Must be called with w.lock held. -func (w *ComparedValue) init() { - w.waiters = make(map[interface{}]*Value) - if w.Func == nil { - w.Func = func(k, v interface{}) bool { return k == v } - } -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go deleted file mode 100644 index 432b036..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go +++ /dev/null @@ -1,151 +0,0 @@ -package shadow - -import ( - "sync" -) - -// KeyedValue is a struct that coordinates a value by key. If a value is -// not available for a give key, it'll block until it is available. -type KeyedValue struct { - lock sync.Mutex - once sync.Once - values map[string]interface{} - waiters map[string]*Value - closed bool -} - -// Close closes the value. This can never fail. For a definition of -// "close" see the ErrClosed docs. -func (w *KeyedValue) Close() error { - w.lock.Lock() - defer w.lock.Unlock() - - // Set closed to true always - w.closed = true - - // For all waiters, complete with ErrClosed - for k, val := range w.waiters { - val.SetValue(ErrClosed) - delete(w.waiters, k) - } - - return nil -} - -// Value returns the value that was set for the given key, or blocks -// until one is available. -func (w *KeyedValue) Value(k string) interface{} { - w.lock.Lock() - v, val := w.valueWaiter(k) - w.lock.Unlock() - - // If we have no waiter, then return the value - if val == nil { - return v - } - - // We have a waiter, so wait - return val.Value() -} - -// WaitForChange waits for the value with the given key to be set again. -// If the key isn't set, it'll wait for an initial value. Note that while -// it is called "WaitForChange", the value isn't guaranteed to _change_; -// this will return when a SetValue is called for the given k. -func (w *KeyedValue) WaitForChange(k string) interface{} { - w.lock.Lock() - w.once.Do(w.init) - - // If we're closed, we're closed - if w.closed { - w.lock.Unlock() - return ErrClosed - } - - // Check for an active waiter. If there isn't one, make it - val := w.waiters[k] - if val == nil { - val = new(Value) - w.waiters[k] = val - } - w.lock.Unlock() - - // And wait - return val.Value() -} - -// ValueOk gets the value for the given key, returning immediately if the -// value doesn't exist. The second return argument is true if the value exists. -func (w *KeyedValue) ValueOk(k string) (interface{}, bool) { - w.lock.Lock() - defer w.lock.Unlock() - - v, val := w.valueWaiter(k) - return v, val == nil -} - -func (w *KeyedValue) SetValue(k string, v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - w.setValue(k, v) -} - -// Init will initialize the key to a given value only if the key has -// not been set before. This is safe to call multiple times and in parallel. -func (w *KeyedValue) Init(k string, v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - - // If we have a waiter, set the value. - _, val := w.valueWaiter(k) - if val != nil { - w.setValue(k, v) - } -} - -// Must be called with w.lock held. -func (w *KeyedValue) init() { - w.values = make(map[string]interface{}) - w.waiters = make(map[string]*Value) -} - -// setValue is like SetValue but assumes the lock is held. -func (w *KeyedValue) setValue(k string, v interface{}) { - w.once.Do(w.init) - - // Set the value, always - w.values[k] = v - - // If we have a waiter, set it - if val, ok := w.waiters[k]; ok { - val.SetValue(v) - delete(w.waiters, k) - } -} - -// valueWaiter gets the value or the Value waiter for a given key. -// -// This must be called with lock held. -func (w *KeyedValue) valueWaiter(k string) (interface{}, *Value) { - w.once.Do(w.init) - - // If we have this value already, return it - if v, ok := w.values[k]; ok { - return v, nil - } - - // If we're closed, return that - if w.closed { - return ErrClosed, nil - } - - // No pending value, check for a waiter - val := w.waiters[k] - if val == nil { - val = new(Value) - w.waiters[k] = val - } - - // Return the waiter - return nil, val -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go deleted file mode 100644 index 0a43d4d..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go +++ /dev/null @@ -1,66 +0,0 @@ -package shadow - -import ( - "container/list" - "sync" -) - -// OrderedValue is a struct that keeps track of a value in the order -// it is set. Each time Value() is called, it will return the most recent -// calls value then discard it. -// -// This is unlike Value that returns the same value once it is set. -type OrderedValue struct { - lock sync.Mutex - values *list.List - waiters *list.List -} - -// Value returns the last value that was set, or blocks until one -// is received. -func (w *OrderedValue) Value() interface{} { - w.lock.Lock() - - // If we have a pending value already, use it - if w.values != nil && w.values.Len() > 0 { - front := w.values.Front() - w.values.Remove(front) - w.lock.Unlock() - return front.Value - } - - // No pending value, create a waiter - if w.waiters == nil { - w.waiters = list.New() - } - - var val Value - w.waiters.PushBack(&val) - w.lock.Unlock() - - // Return the value once we have it - return val.Value() -} - -// SetValue sets the latest value. -func (w *OrderedValue) SetValue(v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - - // If we have a waiter, notify it - if w.waiters != nil && w.waiters.Len() > 0 { - front := w.waiters.Front() - w.waiters.Remove(front) - - val := front.Value.(*Value) - val.SetValue(v) - return - } - - // Add it to the list of values - if w.values == nil { - w.values = list.New() - } - - w.values.PushBack(v) -} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go deleted file mode 100644 index 178b7e7..0000000 --- a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go +++ /dev/null @@ -1,87 +0,0 @@ -package shadow - -import ( - "errors" - "sync" -) - -// ErrClosed is returned by any closed values. -// -// A "closed value" is when the shadow has been notified that the real -// side is complete and any blocking values will _never_ be satisfied -// in the future. In this case, this error is returned. If a value is already -// available, that is still returned. -var ErrClosed = errors.New("shadow closed") - -// Value is a struct that coordinates a value between two -// parallel routines. It is similar to atomic.Value except that when -// Value is called if it isn't set it will wait for it. -// -// The Value can be closed with Close, which will cause any future -// blocking operations to return immediately with ErrClosed. -type Value struct { - lock sync.Mutex - cond *sync.Cond - value interface{} - valueSet bool -} - -func (v *Value) Lock() { - v.lock.Lock() -} - -func (v *Value) Unlock() { - v.lock.Unlock() -} - -// Close closes the value. This can never fail. For a definition of -// "close" see the struct docs. -func (w *Value) Close() error { - w.lock.Lock() - set := w.valueSet - w.lock.Unlock() - - // If we haven't set the value, set it - if !set { - w.SetValue(ErrClosed) - } - - // Done - return nil -} - -// Value returns the value that was set. -func (w *Value) Value() interface{} { - w.lock.Lock() - defer w.lock.Unlock() - - // If we already have a value just return - for !w.valueSet { - // No value, setup the condition variable if we have to - if w.cond == nil { - w.cond = sync.NewCond(&w.lock) - } - - // Wait on it - w.cond.Wait() - } - - // Return the value - return w.value -} - -// SetValue sets the value. -func (w *Value) SetValue(v interface{}) { - w.lock.Lock() - defer w.lock.Unlock() - - // Set the value - w.valueSet = true - w.value = v - - // If we have a condition, clear it - if w.cond != nil { - w.cond.Broadcast() - w.cond = nil - } -} diff --git a/vendor/github.com/hashicorp/terraform/httpclient/client.go b/vendor/github.com/hashicorp/terraform/httpclient/client.go new file mode 100644 index 0000000..bb06beb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/httpclient/client.go @@ -0,0 +1,18 @@ +package httpclient + +import ( + "net/http" + + cleanhttp "github.com/hashicorp/go-cleanhttp" +) + +// New returns the DefaultPooledClient from the cleanhttp +// package that will also send a Terraform User-Agent string. +func New() *http.Client { + cli := cleanhttp.DefaultPooledClient() + cli.Transport = &userAgentRoundTripper{ + userAgent: UserAgentString(), + inner: cli.Transport, + } + return cli +} diff --git a/vendor/github.com/hashicorp/terraform/httpclient/useragent.go b/vendor/github.com/hashicorp/terraform/httpclient/useragent.go new file mode 100644 index 0000000..5e28017 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/httpclient/useragent.go @@ -0,0 +1,40 @@ +package httpclient + +import ( + "fmt" + "log" + "net/http" + "os" + "strings" + + "github.com/hashicorp/terraform/version" +) + +const userAgentFormat = "Terraform/%s" +const uaEnvVar = "TF_APPEND_USER_AGENT" + +func UserAgentString() string { + ua := fmt.Sprintf(userAgentFormat, version.Version) + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} + +type userAgentRoundTripper struct { + inner http.RoundTripper + userAgent string +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", rt.userAgent) + } + return rt.inner.RoundTrip(req) +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/client.go b/vendor/github.com/hashicorp/terraform/plugin/client.go index 3a5cb7a..7e2f4fe 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/client.go +++ b/vendor/github.com/hashicorp/terraform/plugin/client.go @@ -1,8 +1,10 @@ package plugin import ( + "os" "os/exec" + hclog "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform/plugin/discovery" ) @@ -10,11 +12,18 @@ import ( // ClientConfig returns a configuration object that can be used to instantiate // a client for the plugin described by the given metadata. func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig { + logger := hclog.New(&hclog.LoggerOptions{ + Name: "plugin", + Level: hclog.Trace, + Output: os.Stderr, + }) + return &plugin.ClientConfig{ Cmd: exec.Command(m.Path), HandshakeConfig: Handshake, Managed: true, Plugins: PluginMap, + Logger: logger, } } diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go index f5bc4c1..f053312 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go @@ -3,6 +3,7 @@ package discovery import ( "io/ioutil" "log" + "os" "path/filepath" "strings" ) @@ -59,7 +60,6 @@ func findPluginPaths(kind string, dirs []string) []string { fullName := item.Name() if !strings.HasPrefix(fullName, prefix) { - log.Printf("[DEBUG] skipping %q, not a %s", fullName, kind) continue } @@ -71,6 +71,12 @@ func findPluginPaths(kind string, dirs []string) []string { continue } + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + log.Printf("[DEBUG] found %s %q", kind, fullName) ret = append(ret, filepath.Clean(absPath)) continue @@ -83,7 +89,13 @@ func findPluginPaths(kind string, dirs []string) []string { continue } - log.Printf("[WARNING] found legacy %s %q", kind, fullName) + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + + log.Printf("[WARN] found legacy %s %q", kind, fullName) ret = append(ret, filepath.Clean(absPath)) } @@ -92,6 +104,17 @@ func findPluginPaths(kind string, dirs []string) []string { return ret } +// Returns true if and only if the given path refers to a file or a symlink +// to a file. +func pathIsFile(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + + return !info.IsDir() +} + // ResolvePluginPaths takes a list of paths to plugin executables (as returned // by e.g. FindPluginPaths) and produces a PluginMetaSet describing the // referenced plugins. diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go index 241b5cb..815640f 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go @@ -3,19 +3,22 @@ package discovery import ( "errors" "fmt" + "io" "io/ioutil" "log" "net/http" "os" + "path/filepath" "runtime" "strconv" "strings" "golang.org/x/net/html" - cleanhttp "github.com/hashicorp/go-cleanhttp" getter "github.com/hashicorp/go-getter" multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/httpclient" + "github.com/mitchellh/cli" ) // Releases are located by parsing the html listing from releases.hashicorp.com. @@ -30,7 +33,19 @@ const protocolVersionHeader = "x-terraform-protocol-version" var releaseHost = "https://releases.hashicorp.com" -var httpClient = cleanhttp.DefaultClient() +var httpClient *http.Client + +func init() { + httpClient = httpclient.New() + + httpGetter := &getter.HttpGetter{ + Client: httpClient, + Netrc: true, + } + + getter.Getters["http"] = httpGetter + getter.Getters["https"] = httpGetter +} // An Installer maintains a local cache of plugins by downloading plugins // from an online repository. @@ -47,6 +62,10 @@ type Installer interface { type ProviderInstaller struct { Dir string + // Cache is used to access and update a local cache of plugins if non-nil. + // Can be nil to disable caching. + Cache PluginCache + PluginProtocolVersion uint // OS and Arch specify the OS and architecture that should be used when @@ -58,6 +77,8 @@ type ProviderInstaller struct { // Skip checksum and signature verification SkipVerify bool + + Ui cli.Ui // Ui for output } // Get is part of an implementation of type Installer, and attempts to download @@ -98,6 +119,12 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, e // sort them newest to oldest Versions(versions).Sort() + // Ensure that our installation directory exists + err = os.MkdirAll(i.Dir, os.ModePerm) + if err != nil { + return PluginMeta{}, fmt.Errorf("failed to create plugin dir %s: %s", i.Dir, err) + } + // take the first matching plugin we find for _, v := range versions { url := i.providerURL(provider, v.String()) @@ -116,8 +143,9 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, e log.Printf("[DEBUG] fetching provider info for %s version %s", provider, v) if checkPlugin(url, i.PluginProtocolVersion) { - log.Printf("[DEBUG] getting provider %q version %q at %s", provider, v, url) - err := getter.Get(i.Dir, url) + i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %q (%s)...", provider, v.String())) + log.Printf("[DEBUG] getting provider %q version %q", provider, v) + err := i.install(provider, v, url) if err != nil { return PluginMeta{}, err } @@ -164,6 +192,98 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, e return PluginMeta{}, ErrorNoVersionCompatible } +func (i *ProviderInstaller) install(provider string, version Version, url string) error { + if i.Cache != nil { + log.Printf("[DEBUG] looking for provider %s %s in plugin cache", provider, version) + cached := i.Cache.CachedPluginPath("provider", provider, version) + if cached == "" { + log.Printf("[DEBUG] %s %s not yet in cache, so downloading %s", provider, version, url) + err := getter.Get(i.Cache.InstallDir(), url) + if err != nil { + return err + } + // should now be in cache + cached = i.Cache.CachedPluginPath("provider", provider, version) + if cached == "" { + // should never happen if the getter is behaving properly + // and the plugins are packaged properly. + return fmt.Errorf("failed to find downloaded plugin in cache %s", i.Cache.InstallDir()) + } + } + + // Link or copy the cached binary into our install dir so the + // normal resolution machinery can find it. + filename := filepath.Base(cached) + targetPath := filepath.Join(i.Dir, filename) + + log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider, version, targetPath, cached) + + // Delete if we can. If there's nothing there already then no harm done. + // This is important because we can't create a link if there's + // already a file of the same name present. + // (any other error here we'll catch below when we try to write here) + os.Remove(targetPath) + + // We don't attempt linking on Windows because links are not + // comprehensively supported by all tools/apps in Windows and + // so we choose to be conservative to avoid creating any + // weird issues for Windows users. + linkErr := errors.New("link not supported for Windows") // placeholder error, never actually returned + if runtime.GOOS != "windows" { + // Try hard linking first. Hard links are preferable because this + // creates a self-contained directory that doesn't depend on the + // cache after install. + linkErr = os.Link(cached, targetPath) + + // If that failed, try a symlink. This _does_ depend on the cache + // after install, so the user must manage the cache more carefully + // in this case, but avoids creating redundant copies of the + // plugins on disk. + if linkErr != nil { + linkErr = os.Symlink(cached, targetPath) + } + } + + // If we still have an error then we'll try a copy as a fallback. + // In this case either the OS is Windows or the target filesystem + // can't support symlinks. + if linkErr != nil { + srcFile, err := os.Open(cached) + if err != nil { + return fmt.Errorf("failed to open cached plugin %s: %s", cached, err) + } + defer srcFile.Close() + + destFile, err := os.OpenFile(targetPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create %s: %s", targetPath, err) + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + destFile.Close() + return fmt.Errorf("failed to copy cached plugin from %s to %s: %s", cached, targetPath, err) + } + + err = destFile.Close() + if err != nil { + return fmt.Errorf("error creating %s: %s", targetPath, err) + } + } + + // One way or another, by the time we get here we should have either + // a link or a copy of the cached plugin within i.Dir, as expected. + } else { + log.Printf("[DEBUG] plugin cache is disabled, so downloading %s %s from %s", provider, version, url) + err := getter.Get(i.Dir, url) + if err != nil { + return err + } + } + + return nil +} + func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) { purge := make(PluginMetaSet) @@ -261,7 +381,7 @@ func checkPlugin(url string, pluginProtocolVersion uint) bool { if proto == "" { // The header isn't present, but we don't make this error fatal since // the latest version will probably work. - log.Printf("[WARNING] missing %s from: %s", protocolVersionHeader, url) + log.Printf("[WARN] missing %s from: %s", protocolVersionHeader, url) return true } @@ -422,3 +542,7 @@ func getFile(url string) ([]byte, error) { } return data, nil } + +func GetReleaseHost() string { + return releaseHost +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go new file mode 100644 index 0000000..1a10042 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go @@ -0,0 +1,48 @@ +package discovery + +// PluginCache is an interface implemented by objects that are able to maintain +// a cache of plugins. +type PluginCache interface { + // CachedPluginPath returns a path where the requested plugin is already + // cached, or an empty string if the requested plugin is not yet cached. + CachedPluginPath(kind string, name string, version Version) string + + // InstallDir returns the directory that new plugins should be installed into + // in order to populate the cache. This directory should be used as the + // first argument to getter.Get when downloading plugins with go-getter. + // + // After installing into this directory, use CachedPluginPath to obtain the + // path where the plugin was installed. + InstallDir() string +} + +// NewLocalPluginCache returns a PluginCache that caches plugins in a +// given local directory. +func NewLocalPluginCache(dir string) PluginCache { + return &pluginCache{ + Dir: dir, + } +} + +type pluginCache struct { + Dir string +} + +func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string { + allPlugins := FindPlugins(kind, []string{c.Dir}) + plugins := allPlugins.WithName(name).WithVersion(version) + + if plugins.Count() == 0 { + // nothing cached + return "" + } + + // There should generally be only one plugin here; if there's more than + // one match for some reason then we'll just choose one arbitrarily. + plugin := plugins.Newest() + return plugin.Path +} + +func (c *pluginCache) InstallDir() string { + return c.Dir +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go index 473f786..d6a433c 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go +++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go @@ -41,6 +41,24 @@ func (p *ResourceProvider) Stop() error { return err } +func (p *ResourceProvider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + var result ResourceProviderGetSchemaResponse + args := &ResourceProviderGetSchemaArgs{ + Req: req, + } + + err := p.Client.Call("Plugin.GetSchema", args, &result) + if err != nil { + return nil, err + } + + if result.Error != nil { + err = result.Error + } + + return result.Schema, err +} + func (p *ResourceProvider) Input( input terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { @@ -312,6 +330,15 @@ type ResourceProviderStopResponse struct { Error *plugin.BasicError } +type ResourceProviderGetSchemaArgs struct { + Req *terraform.ProviderSchemaRequest +} + +type ResourceProviderGetSchemaResponse struct { + Schema *terraform.ProviderSchema + Error *plugin.BasicError +} + type ResourceProviderConfigureResponse struct { Error *plugin.BasicError } @@ -418,6 +445,18 @@ func (s *ResourceProviderServer) Stop( return nil } +func (s *ResourceProviderServer) GetSchema( + args *ResourceProviderGetSchemaArgs, + result *ResourceProviderGetSchemaResponse, +) error { + schema, err := s.Provider.GetSchema(args.Req) + result.Schema = schema + if err != nil { + result.Error = plugin.NewBasicError(err) + } + return nil +} + func (s *ResourceProviderServer) Input( args *ResourceProviderInputArgs, reply *ResourceProviderInputResponse) error { diff --git a/vendor/github.com/hashicorp/terraform/registry/client.go b/vendor/github.com/hashicorp/terraform/registry/client.go new file mode 100644 index 0000000..a18e6b8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/client.go @@ -0,0 +1,227 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/registry/response" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/hashicorp/terraform/version" +) + +const ( + xTerraformGet = "X-Terraform-Get" + xTerraformVersion = "X-Terraform-Version" + requestTimeout = 10 * time.Second + serviceID = "modules.v1" +) + +var tfVersion = version.String() + +// Client provides methods to query Terraform Registries. +type Client struct { + // this is the client to be used for all requests. + client *http.Client + + // services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + services *disco.Disco +} + +// NewClient returns a new initialized registry client. +func NewClient(services *disco.Disco, client *http.Client) *Client { + if services == nil { + services = disco.New() + } + + if client == nil { + client = httpclient.New() + client.Timeout = requestTimeout + } + + services.Transport = client.Transport + + return &Client{ + client: client, + services: services, + } +} + +// Discover queries the host, and returns the url for the registry. +func (c *Client) Discover(host svchost.Hostname) (*url.URL, error) { + service, err := c.services.DiscoverServiceURL(host, serviceID) + if err != nil { + return nil, err + } + if !strings.HasSuffix(service.Path, "/") { + service.Path += "/" + } + return service, nil +} + +// Versions queries the registry for a module, and returns the available versions. +func (c *Client) Versions(module *regsrc.Module) (*response.ModuleVersions, error) { + host, err := module.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join(module.Module(), "versions")) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching module versions from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // OK + case http.StatusNotFound: + return nil, &errModuleNotFound{addr: module} + default: + return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) + } + + var versions response.ModuleVersions + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&versions); err != nil { + return nil, err + } + + for _, mod := range versions.Modules { + for _, v := range mod.Versions { + log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source) + } + } + + return &versions, nil +} + +func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { + creds, err := c.services.CredentialsForHost(host) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) + return + } + + if creds != nil { + creds.PrepareRequest(req) + } +} + +// Location find the download location for a specific version module. +// This returns a string, because the final location may contain special go-getter syntax. +func (c *Client) Location(module *regsrc.Module, version string) (string, error) { + host, err := module.SvcHost() + if err != nil { + return "", err + } + + service, err := c.Discover(host) + if err != nil { + return "", err + } + + var p *url.URL + if version == "" { + p, err = url.Parse(path.Join(module.Module(), "download")) + } else { + p, err = url.Parse(path.Join(module.Module(), version, "download")) + } + if err != nil { + return "", err + } + download := service.ResolveReference(p) + + log.Printf("[DEBUG] looking up module location from %q", download) + + req, err := http.NewRequest("GET", download.String(), nil) + if err != nil { + return "", err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + // there should be no body, but save it for logging + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("error reading response body from registry: %s", err) + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusNoContent: + // OK + case http.StatusNotFound: + return "", fmt.Errorf("module %q version %q not found", module, version) + default: + // anything else is an error: + return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body) + } + + // the download location is in the X-Terraform-Get header + location := resp.Header.Get(xTerraformGet) + if location == "" { + return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body) + } + + // If location looks like it's trying to be a relative URL, treat it as + // one. + // + // We don't do this for just _any_ location, since the X-Terraform-Get + // header is a go-getter location rather than a URL, and so not all + // possible values will parse reasonably as URLs.) + // + // When used in conjunction with go-getter we normally require this header + // to be an absolute URL, but we are more liberal here because third-party + // registry implementations may not "know" their own absolute URLs if + // e.g. they are running behind a reverse proxy frontend, or such. + if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") { + locationURL, err := url.Parse(location) + if err != nil { + return "", fmt.Errorf("invalid relative URL for %q: %s", module, err) + } + locationURL = download.ResolveReference(locationURL) + location = locationURL.String() + } + + return location, nil +} diff --git a/vendor/github.com/hashicorp/terraform/registry/errors.go b/vendor/github.com/hashicorp/terraform/registry/errors.go new file mode 100644 index 0000000..b8dcd31 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/errors.go @@ -0,0 +1,23 @@ +package registry + +import ( + "fmt" + + "github.com/hashicorp/terraform/registry/regsrc" +) + +type errModuleNotFound struct { + addr *regsrc.Module +} + +func (e *errModuleNotFound) Error() string { + return fmt.Sprintf("module %s not found", e.addr) +} + +// IsModuleNotFound returns true only if the given error is a "module not found" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsModuleNotFound(err error) bool { + _, ok := err.(*errModuleNotFound) + return ok +} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go new file mode 100644 index 0000000..14b4dce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go @@ -0,0 +1,140 @@ +package regsrc + +import ( + "regexp" + "strings" + + "github.com/hashicorp/terraform/svchost" +) + +var ( + // InvalidHostString is a placeholder returned when a raw host can't be + // converted by IDNA spec. It will never be returned for any host for which + // Valid() is true. + InvalidHostString = "" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at the start or end of a URL label according to RFC1123. + urlLabelEndSubRe = "[0-9A-Za-z]" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at in a non-start or end of a URL label according to RFC1123. + urlLabelMidSubRe = "[0-9A-Za-z-]" + + // urlLabelUnicodeSubRe is a sub-expression that matches any non-ascii char + // in an IDN (Unicode) display URL. It's not strict - there are only ~15k + // valid Unicode points in IDN RFC (some with conditions). We are just going + // with being liberal with matching and then erroring if we fail to convert + // to punycode later (which validates chars fully). This at least ensures + // ascii chars dissalowed by the RC1123 parts above don't become legal + // again. + urlLabelUnicodeSubRe = "[^[:ascii:]]" + + // hostLabelSubRe is the sub-expression that matches a valid hostname label. + // It does not anchor the start or end so it can be composed into more + // complex RegExps below. Note that for sanity we don't handle disallowing + // raw punycode in this regexp (esp. since re2 doesn't support negative + // lookbehind, but we can capture it's presence here to check later). + hostLabelSubRe = "" + + // Match valid initial char, or unicode char + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + // Optionally, match 0 to 61 valid URL or Unicode chars, + // followed by one valid end char or unicode char + "(?:" + + "(?:" + urlLabelMidSubRe + "|" + urlLabelUnicodeSubRe + "){0,61}" + + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + ")?" + + // hostSubRe is the sub-expression that matches a valid host prefix. + // Allows custom port. + hostSubRe = hostLabelSubRe + "(?:\\." + hostLabelSubRe + ")+(?::\\d+)?" + + // hostRe is a regexp that matches a valid host prefix. Additional + // validation of unicode strings is needed for matches. + hostRe = regexp.MustCompile("^" + hostSubRe + "$") +) + +// FriendlyHost describes a registry instance identified in source strings by a +// simple bare hostname like registry.terraform.io. +type FriendlyHost struct { + Raw string +} + +func NewFriendlyHost(host string) *FriendlyHost { + return &FriendlyHost{Raw: host} +} + +// ParseFriendlyHost attempts to parse a valid "friendly host" prefix from the +// given string. If no valid prefix is found, host will be nil and rest will +// contain the full source string. The host prefix must terminate at the end of +// the input or at the first / character. If one or more characters exist after +// the first /, they will be returned as rest (without the / delimiter). +// Hostnames containing punycode WILL be parsed successfully since they may have +// come from an internal normalized source string, however should be considered +// invalid if the string came from a user directly. This must be checked +// explicitly for user-input strings by calling Valid() on the +// returned host. +func ParseFriendlyHost(source string) (host *FriendlyHost, rest string) { + parts := strings.SplitN(source, "/", 2) + + if hostRe.MatchString(parts[0]) { + host = &FriendlyHost{Raw: parts[0]} + if len(parts) == 2 { + rest = parts[1] + } + return + } + + // No match, return whole string as rest along with nil host + rest = source + return +} + +// Valid returns whether the host prefix is considered valid in any case. +// Example of invalid prefixes might include ones that don't conform to the host +// name specifications. Not that IDN prefixes containing punycode are not valid +// input which we expect to always be in user-input or normalised display form. +func (h *FriendlyHost) Valid() bool { + return svchost.IsValid(h.Raw) +} + +// Display returns the host formatted for display to the user in CLI or web +// output. +func (h *FriendlyHost) Display() string { + return svchost.ForDisplay(h.Raw) +} + +// Normalized returns the host formatted for internal reference or comparison. +func (h *FriendlyHost) Normalized() string { + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return InvalidHostString + } + return string(host) +} + +// String returns the host formatted as the user originally typed it assuming it +// was parsed from user input. +func (h *FriendlyHost) String() string { + return h.Raw +} + +// Equal compares the FriendlyHost against another instance taking normalization +// into account. Invalid hosts cannot be compared and will always return false. +func (h *FriendlyHost) Equal(other *FriendlyHost) bool { + if other == nil { + return false + } + + otherHost, err := svchost.ForComparison(other.Raw) + if err != nil { + return false + } + + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return false + } + + return otherHost == host +} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go new file mode 100644 index 0000000..325706e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go @@ -0,0 +1,205 @@ +package regsrc + +import ( + "errors" + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform/svchost" +) + +var ( + ErrInvalidModuleSource = errors.New("not a valid registry module source") + + // nameSubRe is the sub-expression that matches a valid module namespace or + // name. It's strictly a super-set of what GitHub allows for user/org and + // repo names respectively, but more restrictive than our original repo-name + // regex which allowed periods but could cause ambiguity with hostname + // prefixes. It does not anchor the start or end so it can be composed into + // more complex RegExps below. Alphanumeric with - and _ allowed in non + // leading or trailing positions. Max length 64 chars. (GitHub username is + // 38 max.) + nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?" + + // providerSubRe is the sub-expression that matches a valid provider. It + // does not anchor the start or end so it can be composed into more complex + // RegExps below. Only lowercase chars and digits are supported in practice. + // Max length 64 chars. + providerSubRe = "[0-9a-z]{1,64}" + + // moduleSourceRe is a regular expression that matches the basic + // namespace/name/provider[//...] format for registry sources. It assumes + // any FriendlyHost prefix has already been removed if present. + moduleSourceRe = regexp.MustCompile( + fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", + nameSubRe, nameSubRe, providerSubRe)) + + // NameRe is a regular expression defining the format allowed for namespace + // or name fields in module registry implementations. + NameRe = regexp.MustCompile("^" + nameSubRe + "$") + + // ProviderRe is a regular expression defining the format allowed for + // provider fields in module registry implementations. + ProviderRe = regexp.MustCompile("^" + providerSubRe + "$") + + // these hostnames are not allowed as registry sources, because they are + // already special case module sources in terraform. + disallowed = map[string]bool{ + "github.com": true, + "bitbucket.org": true, + } +) + +// Module describes a Terraform Registry Module source. +type Module struct { + // RawHost is the friendly host prefix if one was present. It might be nil + // if the original source had no host prefix which implies + // PublicRegistryHost but is distinct from having an actual pointer to + // PublicRegistryHost since it encodes the fact the original string didn't + // include a host prefix at all which is significant for recovering actual + // input not just normalized form. Most callers should access it with Host() + // which will return public registry host instance if it's nil. + RawHost *FriendlyHost + RawNamespace string + RawName string + RawProvider string + RawSubmodule string +} + +// NewModule construct a new module source from separate parts. Pass empty +// string if host or submodule are not needed. +func NewModule(host, namespace, name, provider, submodule string) (*Module, error) { + m := &Module{ + RawNamespace: namespace, + RawName: name, + RawProvider: provider, + RawSubmodule: submodule, + } + if host != "" { + h := NewFriendlyHost(host) + if h != nil { + fmt.Println("HOST:", h) + if !h.Valid() || disallowed[h.Display()] { + return nil, ErrInvalidModuleSource + } + } + m.RawHost = h + } + return m, nil +} + +// ParseModuleSource attempts to parse source as a Terraform registry module +// source. If the string is not found to be in a valid format, +// ErrInvalidModuleSource is returned. Note that this can only be used on +// "input" strings, e.g. either ones supplied by the user or potentially +// normalised but in Display form (unicode). It will fail to parse a source with +// a punycoded domain since this is not permitted input from a user. If you have +// an already normalized string internally, you can compare it without parsing +// by comparing with the normalized version of the subject with the normal +// string equality operator. +func ParseModuleSource(source string) (*Module, error) { + // See if there is a friendly host prefix. + host, rest := ParseFriendlyHost(source) + if host != nil { + if !host.Valid() || disallowed[host.Display()] { + return nil, ErrInvalidModuleSource + } + } + + matches := moduleSourceRe.FindStringSubmatch(rest) + if len(matches) < 4 { + return nil, ErrInvalidModuleSource + } + + m := &Module{ + RawHost: host, + RawNamespace: matches[1], + RawName: matches[2], + RawProvider: matches[3], + } + + if len(matches) == 5 { + m.RawSubmodule = matches[4] + } + + return m, nil +} + +// Display returns the source formatted for display to the user in CLI or web +// output. +func (m *Module) Display() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false) +} + +// Normalized returns the source formatted for internal reference or comparison. +func (m *Module) Normalized() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false) +} + +// String returns the source formatted as the user originally typed it assuming +// it was parsed from user input. +func (m *Module) String() string { + // Don't normalize public registry hostname - leave it exactly like the user + // input it. + hostPrefix := "" + if m.RawHost != nil { + hostPrefix = m.RawHost.String() + "/" + } + return m.formatWithPrefix(hostPrefix, true) +} + +// Equal compares the module source against another instance taking +// normalization into account. +func (m *Module) Equal(other *Module) bool { + return m.Normalized() == other.Normalized() +} + +// Host returns the FriendlyHost object describing which registry this module is +// in. If the original source string had not host component this will return the +// PublicRegistryHost. +func (m *Module) Host() *FriendlyHost { + if m.RawHost == nil { + return PublicRegistryHost + } + return m.RawHost +} + +func (m *Module) normalizedHostPrefix(host string) string { + if m.Host().Equal(PublicRegistryHost) { + return "" + } + return host + "/" +} + +func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { + suffix := "" + if m.RawSubmodule != "" { + suffix = "//" + m.RawSubmodule + } + str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName, + m.RawProvider, suffix) + + // lower case by default + if !preserveCase { + return strings.ToLower(str) + } + return str +} + +// Module returns just the registry ID of the module, without a hostname or +// suffix. +func (m *Module) Module() string { + return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) +} + +// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may +// contain an invalid hostname, this also returns an error indicating if it +// could be converted to a svchost.Hostname. If no host is specified, the +// default PublicRegistryHost is returned. +func (m *Module) SvcHost() (svchost.Hostname, error) { + if m.RawHost == nil { + return svchost.ForComparison(PublicRegistryHost.Raw) + } + return svchost.ForComparison(m.RawHost.Raw) +} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go new file mode 100644 index 0000000..c430bf1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go @@ -0,0 +1,8 @@ +// Package regsrc provides helpers for working with source strings that identify +// resources within a Terraform registry. +package regsrc + +var ( + // PublicRegistryHost is a FriendlyHost that represents the public registry. + PublicRegistryHost = NewFriendlyHost("registry.terraform.io") +) diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module.go b/vendor/github.com/hashicorp/terraform/registry/response/module.go new file mode 100644 index 0000000..3bd2b3d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module.go @@ -0,0 +1,93 @@ +package response + +import ( + "time" +) + +// Module is the response structure with the data for a single module version. +type Module struct { + ID string `json:"id"` + + //--------------------------------------------------------------- + // Metadata about the overall module. + + Owner string `json:"owner"` + Namespace string `json:"namespace"` + Name string `json:"name"` + Version string `json:"version"` + Provider string `json:"provider"` + Description string `json:"description"` + Source string `json:"source"` + PublishedAt time.Time `json:"published_at"` + Downloads int `json:"downloads"` + Verified bool `json:"verified"` +} + +// ModuleDetail represents a module in full detail. +type ModuleDetail struct { + Module + + //--------------------------------------------------------------- + // Metadata about the overall module. This is only available when + // requesting the specific module (not in list responses). + + // Root is the root module. + Root *ModuleSubmodule `json:"root"` + + // Submodules are the other submodules that are available within + // this module. + Submodules []*ModuleSubmodule `json:"submodules"` + + //--------------------------------------------------------------- + // The fields below are only set when requesting this specific + // module. They are available to easily know all available versions + // and providers without multiple API calls. + + Providers []string `json:"providers"` // All available providers + Versions []string `json:"versions"` // All versions +} + +// ModuleSubmodule is the metadata about a specific submodule within +// a module. This includes the root module as a special case. +type ModuleSubmodule struct { + Path string `json:"path"` + Readme string `json:"readme"` + Empty bool `json:"empty"` + + Inputs []*ModuleInput `json:"inputs"` + Outputs []*ModuleOutput `json:"outputs"` + Dependencies []*ModuleDep `json:"dependencies"` + Resources []*ModuleResource `json:"resources"` +} + +// ModuleInput is an input for a module. +type ModuleInput struct { + Name string `json:"name"` + Description string `json:"description"` + Default string `json:"default"` +} + +// ModuleOutput is an output for a module. +type ModuleOutput struct { + Name string `json:"name"` + Description string `json:"description"` +} + +// ModuleDep is an output for a module. +type ModuleDep struct { + Name string `json:"name"` + Source string `json:"source"` + Version string `json:"version"` +} + +// ModuleProviderDep is the output for a provider dependency +type ModuleProviderDep struct { + Name string `json:"name"` + Version string `json:"version"` +} + +// ModuleResource is an output for a module. +type ModuleResource struct { + Name string `json:"name"` + Type string `json:"type"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_list.go b/vendor/github.com/hashicorp/terraform/registry/response/module_list.go new file mode 100644 index 0000000..9783748 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module_list.go @@ -0,0 +1,7 @@ +package response + +// ModuleList is the response structure for a pageable list of modules. +type ModuleList struct { + Meta PaginationMeta `json:"meta"` + Modules []*Module `json:"modules"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go b/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go new file mode 100644 index 0000000..e48499d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go @@ -0,0 +1,14 @@ +package response + +// ModuleProvider represents a single provider for modules. +type ModuleProvider struct { + Name string `json:"name"` + Downloads int `json:"downloads"` + ModuleCount int `json:"module_count"` +} + +// ModuleProviderList is the response structure for a pageable list of ModuleProviders. +type ModuleProviderList struct { + Meta PaginationMeta `json:"meta"` + Providers []*ModuleProvider `json:"providers"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go b/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go new file mode 100644 index 0000000..f69e975 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go @@ -0,0 +1,32 @@ +package response + +// ModuleVersions is the response format that contains all metadata about module +// versions needed for terraform CLI to resolve version constraints. See RFC +// TF-042 for details on this format. +type ModuleVersions struct { + Modules []*ModuleProviderVersions `json:"modules"` +} + +// ModuleProviderVersions is the response format for a single module instance, +// containing metadata about all versions and their dependencies. +type ModuleProviderVersions struct { + Source string `json:"source"` + Versions []*ModuleVersion `json:"versions"` +} + +// ModuleVersion is the output metadata for a given version needed by CLI to +// resolve candidate versions to satisfy requirements. +type ModuleVersion struct { + Version string `json:"version"` + Root VersionSubmodule `json:"root"` + Submodules []*VersionSubmodule `json:"submodules"` +} + +// VersionSubmodule is the output metadata for a submodule within a given +// version needed by CLI to resolve candidate versions to satisfy requirements. +// When representing the Root in JSON the path is omitted. +type VersionSubmodule struct { + Path string `json:"path,omitempty"` + Providers []*ModuleProviderDep `json:"providers"` + Dependencies []*ModuleDep `json:"dependencies"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/pagination.go b/vendor/github.com/hashicorp/terraform/registry/response/pagination.go new file mode 100644 index 0000000..75a9254 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/pagination.go @@ -0,0 +1,65 @@ +package response + +import ( + "net/url" + "strconv" +) + +// PaginationMeta is a structure included in responses for pagination. +type PaginationMeta struct { + Limit int `json:"limit"` + CurrentOffset int `json:"current_offset"` + NextOffset *int `json:"next_offset,omitempty"` + PrevOffset *int `json:"prev_offset,omitempty"` + NextURL string `json:"next_url,omitempty"` + PrevURL string `json:"prev_url,omitempty"` +} + +// NewPaginationMeta populates pagination meta data from result parameters +func NewPaginationMeta(offset, limit int, hasMore bool, currentURL string) PaginationMeta { + pm := PaginationMeta{ + Limit: limit, + CurrentOffset: offset, + } + + // Calculate next/prev offsets, leave nil if not valid pages + nextOffset := offset + limit + if hasMore { + pm.NextOffset = &nextOffset + } + + prevOffset := offset - limit + if prevOffset < 0 { + prevOffset = 0 + } + if prevOffset < offset { + pm.PrevOffset = &prevOffset + } + + // If URL format provided, populate URLs. Intentionally swallow URL errors for now, API should + // catch missing URLs if we call with bad URL arg (and we care about them being present). + if currentURL != "" && pm.NextOffset != nil { + pm.NextURL, _ = setQueryParam(currentURL, "offset", *pm.NextOffset, 0) + } + if currentURL != "" && pm.PrevOffset != nil { + pm.PrevURL, _ = setQueryParam(currentURL, "offset", *pm.PrevOffset, 0) + } + + return pm +} + +func setQueryParam(baseURL, key string, val, defaultVal int) (string, error) { + u, err := url.Parse(baseURL) + if err != nil { + return "", err + } + q := u.Query() + if val == defaultVal { + // elide param if it's the default value + q.Del(key) + } else { + q.Set(key, strconv.Itoa(val)) + } + u.RawQuery = q.Encode() + return u.String(), nil +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/redirect.go b/vendor/github.com/hashicorp/terraform/registry/response/redirect.go new file mode 100644 index 0000000..d5eb49b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/redirect.go @@ -0,0 +1,6 @@ +package response + +// Redirect causes the frontend to perform a window redirect. +type Redirect struct { + URL string `json:"url"` +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go b/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go new file mode 100644 index 0000000..4f0d168 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go @@ -0,0 +1,45 @@ +package auth + +import ( + "github.com/hashicorp/terraform/svchost" +) + +// CachingCredentialsSource creates a new credentials source that wraps another +// and caches its results in memory, on a per-hostname basis. +// +// No means is provided for expiration of cached credentials, so a caching +// credentials source should have a limited lifetime (one Terraform operation, +// for example) to ensure that time-limited credentials don't expire before +// their cache entries do. +func CachingCredentialsSource(source CredentialsSource) CredentialsSource { + return &cachingCredentialsSource{ + source: source, + cache: map[svchost.Hostname]HostCredentials{}, + } +} + +type cachingCredentialsSource struct { + source CredentialsSource + cache map[svchost.Hostname]HostCredentials +} + +// ForHost passes the given hostname on to the wrapped credentials source and +// caches the result to return for future requests with the same hostname. +// +// Both credentials and non-credentials (nil) responses are cached. +// +// No cache entry is created if the wrapped source returns an error, to allow +// the caller to retry the failing operation. +func (s *cachingCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if cache, cached := s.cache[host]; cached { + return cache, nil + } + + result, err := s.source.ForHost(host) + if err != nil { + return result, err + } + + s.cache[host] = result + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go b/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go new file mode 100644 index 0000000..0372c16 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go @@ -0,0 +1,63 @@ +// Package auth contains types and functions to manage authentication +// credentials for service hosts. +package auth + +import ( + "net/http" + + "github.com/hashicorp/terraform/svchost" +) + +// Credentials is a list of CredentialsSource objects that can be tried in +// turn until one returns credentials for a host, or one returns an error. +// +// A Credentials is itself a CredentialsSource, wrapping its members. +// In principle one CredentialsSource can be nested inside another, though +// there is no good reason to do so. +type Credentials []CredentialsSource + +// NoCredentials is an empty CredentialsSource that always returns nil +// when asked for credentials. +var NoCredentials CredentialsSource = Credentials{} + +// A CredentialsSource is an object that may be able to provide credentials +// for a given host. +// +// Credentials lookups are not guaranteed to be concurrency-safe. Callers +// using these facilities in concurrent code must use external concurrency +// primitives to prevent race conditions. +type CredentialsSource interface { + // ForHost returns a non-nil HostCredentials if the source has credentials + // available for the host, and a nil HostCredentials if it does not. + // + // If an error is returned, progress through a list of CredentialsSources + // is halted and the error is returned to the user. + ForHost(host svchost.Hostname) (HostCredentials, error) +} + +// HostCredentials represents a single set of credentials for a particular +// host. +type HostCredentials interface { + // PrepareRequest modifies the given request in-place to apply the + // receiving credentials. The usual behavior of this method is to + // add some sort of Authorization header to the request. + PrepareRequest(req *http.Request) + + // Token returns the authentication token. + Token() string +} + +// ForHost iterates over the contained CredentialsSource objects and +// tries to obtain credentials for the given host from each one in turn. +// +// If any source returns either a non-nil HostCredentials or a non-nil error +// then this result is returned. Otherwise, the result is nil, nil. +func (c Credentials) ForHost(host svchost.Hostname) (HostCredentials, error) { + for _, source := range c { + creds, err := source.ForHost(host) + if creds != nil || err != nil { + return creds, err + } + } + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go b/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go new file mode 100644 index 0000000..f91006a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go @@ -0,0 +1,18 @@ +package auth + +// HostCredentialsFromMap converts a map of key-value pairs from a credentials +// definition provided by the user (e.g. in a config file, or via a credentials +// helper) into a HostCredentials object if possible, or returns nil if +// no credentials could be extracted from the map. +// +// This function ignores map keys it is unfamiliar with, to allow for future +// expansion of the credentials map format for new credential types. +func HostCredentialsFromMap(m map[string]interface{}) HostCredentials { + if m == nil { + return nil + } + if token, ok := m["token"].(string); ok { + return HostCredentialsToken(token) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go b/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go new file mode 100644 index 0000000..d72ffe3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go @@ -0,0 +1,80 @@ +package auth + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "path/filepath" + + "github.com/hashicorp/terraform/svchost" +) + +type helperProgramCredentialsSource struct { + executable string + args []string +} + +// HelperProgramCredentialsSource returns a CredentialsSource that runs the +// given program with the given arguments in order to obtain credentials. +// +// The given executable path must be an absolute path; it is the caller's +// responsibility to validate and process a relative path or other input +// provided by an end-user. If the given path is not absolute, this +// function will panic. +// +// When credentials are requested, the program will be run in a child process +// with the given arguments along with two additional arguments added to the +// end of the list: the literal string "get", followed by the requested +// hostname in ASCII compatibility form (punycode form). +func HelperProgramCredentialsSource(executable string, args ...string) CredentialsSource { + if !filepath.IsAbs(executable) { + panic("NewCredentialsSourceHelperProgram requires absolute path to executable") + } + + fullArgs := make([]string, len(args)+1) + fullArgs[0] = executable + copy(fullArgs[1:], args) + + return &helperProgramCredentialsSource{ + executable: executable, + args: fullArgs, + } +} + +func (s *helperProgramCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + args := make([]string, len(s.args), len(s.args)+2) + copy(args, s.args) + args = append(args, "get") + args = append(args, string(host)) + + outBuf := bytes.Buffer{} + errBuf := bytes.Buffer{} + + cmd := exec.Cmd{ + Path: s.executable, + Args: args, + Stdin: nil, + Stdout: &outBuf, + Stderr: &errBuf, + } + err := cmd.Run() + if _, isExitErr := err.(*exec.ExitError); isExitErr { + errText := errBuf.String() + if errText == "" { + // Shouldn't happen for a well-behaved helper program + return nil, fmt.Errorf("error in %s, but it produced no error message", s.executable) + } + return nil, fmt.Errorf("error in %s: %s", s.executable, errText) + } else if err != nil { + return nil, fmt.Errorf("failed to run %s: %s", s.executable, err) + } + + var m map[string]interface{} + err = json.Unmarshal(outBuf.Bytes(), &m) + if err != nil { + return nil, fmt.Errorf("malformed output from %s: %s", s.executable, err) + } + + return HostCredentialsFromMap(m), nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/static.go b/vendor/github.com/hashicorp/terraform/svchost/auth/static.go new file mode 100644 index 0000000..5373fdd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/static.go @@ -0,0 +1,28 @@ +package auth + +import ( + "github.com/hashicorp/terraform/svchost" +) + +// StaticCredentialsSource is a credentials source that retrieves credentials +// from the provided map. It returns nil if a requested hostname is not +// present in the map. +// +// The caller should not modify the given map after passing it to this function. +func StaticCredentialsSource(creds map[svchost.Hostname]map[string]interface{}) CredentialsSource { + return staticCredentialsSource(creds) +} + +type staticCredentialsSource map[svchost.Hostname]map[string]interface{} + +func (s staticCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if s == nil { + return nil, nil + } + + if m, exists := s[host]; exists { + return HostCredentialsFromMap(m), nil + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go b/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go new file mode 100644 index 0000000..9358bcb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go @@ -0,0 +1,25 @@ +package auth + +import ( + "net/http" +) + +// HostCredentialsToken is a HostCredentials implementation that represents a +// single "bearer token", to be sent to the server via an Authorization header +// with the auth type set to "Bearer" +type HostCredentialsToken string + +// PrepareRequest alters the given HTTP request by setting its Authorization +// header to the string "Bearer " followed by the encapsulated authentication +// token. +func (tc HostCredentialsToken) PrepareRequest(req *http.Request) { + if req.Header == nil { + req.Header = http.Header{} + } + req.Header.Set("Authorization", "Bearer "+string(tc)) +} + +// Token returns the authentication token. +func (tc HostCredentialsToken) Token() string { + return string(tc) +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go new file mode 100644 index 0000000..1963cbd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go @@ -0,0 +1,259 @@ +// Package disco handles Terraform's remote service discovery protocol. +// +// This protocol allows mapping from a service hostname, as produced by the +// svchost package, to a set of services supported by that host and the +// endpoint information for each supported service. +package disco + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "mime" + "net/http" + "net/url" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/auth" +) + +const ( + // Fixed path to the discovery manifest. + discoPath = "/.well-known/terraform.json" + + // Arbitrary-but-small number to prevent runaway redirect loops. + maxRedirects = 3 + + // Arbitrary-but-small time limit to prevent UI "hangs" during discovery. + discoTimeout = 11 * time.Second + + // 1MB - to prevent abusive services from using loads of our memory. + maxDiscoDocBytes = 1 * 1024 * 1024 +) + +// httpTransport is overridden during tests, to skip TLS verification. +var httpTransport = cleanhttp.DefaultPooledTransport() + +// Disco is the main type in this package, which allows discovery on given +// hostnames and caches the results by hostname to avoid repeated requests +// for the same information. +type Disco struct { + hostCache map[svchost.Hostname]*Host + credsSrc auth.CredentialsSource + + // Transport is a custom http.RoundTripper to use. + Transport http.RoundTripper +} + +// New returns a new initialized discovery object. +func New() *Disco { + return NewWithCredentialsSource(nil) +} + +// NewWithCredentialsSource returns a new discovery object initialized with +// the given credentials source. +func NewWithCredentialsSource(credsSrc auth.CredentialsSource) *Disco { + return &Disco{ + hostCache: make(map[svchost.Hostname]*Host), + credsSrc: credsSrc, + Transport: httpTransport, + } +} + +// SetCredentialsSource provides a credentials source that will be used to +// add credentials to outgoing discovery requests, where available. +// +// If this method is never called, no outgoing discovery requests will have +// credentials. +func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) { + d.credsSrc = src +} + +// CredentialsForHost returns a non-nil HostCredentials if the embedded source has +// credentials available for the host, and a nil HostCredentials if it does not. +func (d *Disco) CredentialsForHost(hostname svchost.Hostname) (auth.HostCredentials, error) { + if d.credsSrc == nil { + return nil, nil + } + return d.credsSrc.ForHost(hostname) +} + +// ForceHostServices provides a pre-defined set of services for a given +// host, which prevents the receiver from attempting network-based discovery +// for the given host. Instead, the given services map will be returned +// verbatim. +// +// When providing "forced" services, any relative URLs are resolved against +// the initial discovery URL that would have been used for network-based +// discovery, yielding the same results as if the given map were published +// at the host's default discovery URL, though using absolute URLs is strongly +// recommended to make the configured behavior more explicit. +func (d *Disco) ForceHostServices(hostname svchost.Hostname, services map[string]interface{}) { + if services == nil { + services = map[string]interface{}{} + } + + d.hostCache[hostname] = &Host{ + discoURL: &url.URL{ + Scheme: "https", + Host: string(hostname), + Path: discoPath, + }, + hostname: hostname.ForDisplay(), + services: services, + transport: d.Transport, + } +} + +// Discover runs the discovery protocol against the given hostname (which must +// already have been validated and prepared with svchost.ForComparison) and +// returns an object describing the services available at that host. +// +// If a given hostname supports no Terraform services at all, a non-nil but +// empty Host object is returned. When giving feedback to the end user about +// such situations, we say "host does not provide a service", +// regardless of whether that is due to that service specifically being absent +// or due to the host not providing Terraform services at all, since we don't +// wish to expose the detail of whole-host discovery to an end-user. +func (d *Disco) Discover(hostname svchost.Hostname) (*Host, error) { + if host, cached := d.hostCache[hostname]; cached { + return host, nil + } + + host, err := d.discover(hostname) + if err != nil { + return nil, err + } + d.hostCache[hostname] = host + + return host, nil +} + +// DiscoverServiceURL is a convenience wrapper for discovery on a given +// hostname and then looking up a particular service in the result. +func (d *Disco) DiscoverServiceURL(hostname svchost.Hostname, serviceID string) (*url.URL, error) { + host, err := d.Discover(hostname) + if err != nil { + return nil, err + } + return host.ServiceURL(serviceID) +} + +// discover implements the actual discovery process, with its result cached +// by the public-facing Discover method. +func (d *Disco) discover(hostname svchost.Hostname) (*Host, error) { + discoURL := &url.URL{ + Scheme: "https", + Host: hostname.String(), + Path: discoPath, + } + + client := &http.Client{ + Transport: d.Transport, + Timeout: discoTimeout, + + CheckRedirect: func(req *http.Request, via []*http.Request) error { + log.Printf("[DEBUG] Service discovery redirected to %s", req.URL) + if len(via) > maxRedirects { + return errors.New("too many redirects") // this error will never actually be seen + } + return nil + }, + } + + req := &http.Request{ + Header: make(http.Header), + Method: "GET", + URL: discoURL, + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", httpclient.UserAgentString()) + + creds, err := d.CredentialsForHost(hostname) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", hostname, err) + } + if creds != nil { + // Update the request to include credentials. + creds.PrepareRequest(req) + } + + log.Printf("[DEBUG] Service discovery for %s at %s", hostname, discoURL) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to request discovery document: %v", err) + } + defer resp.Body.Close() + + host := &Host{ + // Use the discovery URL from resp.Request in + // case the client followed any redirects. + discoURL: resp.Request.URL, + hostname: hostname.ForDisplay(), + transport: d.Transport, + } + + // Return the host without any services. + if resp.StatusCode == 404 { + return host, nil + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Failed to request discovery document: %s", resp.Status) + } + + contentType := resp.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return nil, fmt.Errorf("Discovery URL has a malformed Content-Type %q", contentType) + } + if mediaType != "application/json" { + return nil, fmt.Errorf("Discovery URL returned an unsupported Content-Type %q", mediaType) + } + + // This doesn't catch chunked encoding, because ContentLength is -1 in that case. + if resp.ContentLength > maxDiscoDocBytes { + // Size limit here is not a contractual requirement and so we may + // adjust it over time if we find a different limit is warranted. + return nil, fmt.Errorf( + "Discovery doc response is too large (got %d bytes; limit %d)", + resp.ContentLength, maxDiscoDocBytes, + ) + } + + // If the response is using chunked encoding then we can't predict its + // size, but we'll at least prevent reading the entire thing into memory. + lr := io.LimitReader(resp.Body, maxDiscoDocBytes) + + servicesBytes, err := ioutil.ReadAll(lr) + if err != nil { + return nil, fmt.Errorf("Error reading discovery document body: %v", err) + } + + var services map[string]interface{} + err = json.Unmarshal(servicesBytes, &services) + if err != nil { + return nil, fmt.Errorf("Failed to decode discovery document as a JSON object: %v", err) + } + host.services = services + + return host, nil +} + +// Forget invalidates any cached record of the given hostname. If the host +// has no cache entry then this is a no-op. +func (d *Disco) Forget(hostname svchost.Hostname) { + delete(d.hostCache, hostname) +} + +// ForgetAll is like Forget, but for all of the hostnames that have cache entries. +func (d *Disco) ForgetAll() { + d.hostCache = make(map[svchost.Hostname]*Host) +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/host.go b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go new file mode 100644 index 0000000..ab9514c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go @@ -0,0 +1,264 @@ +package disco + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/httpclient" +) + +const versionServiceID = "versions.v1" + +// Host represents a service discovered host. +type Host struct { + discoURL *url.URL + hostname string + services map[string]interface{} + transport http.RoundTripper +} + +// Constraints represents the version constraints of a service. +type Constraints struct { + Service string `json:"service"` + Product string `json:"product"` + Minimum string `json:"minimum"` + Maximum string `json:"maximum"` + Excluding []string `json:"excluding"` +} + +// ErrServiceNotProvided is returned when the service is not provided. +type ErrServiceNotProvided struct { + hostname string + service string +} + +// Error returns a customized error message. +func (e *ErrServiceNotProvided) Error() string { + if e.hostname == "" { + return fmt.Sprintf("host does not provide a %s service", e.service) + } + return fmt.Sprintf("host %s does not provide a %s service", e.hostname, e.service) +} + +// ErrVersionNotSupported is returned when the version is not supported. +type ErrVersionNotSupported struct { + hostname string + service string + version string +} + +// Error returns a customized error message. +func (e *ErrVersionNotSupported) Error() string { + if e.hostname == "" { + return fmt.Sprintf("host does not support %s version %s", e.service, e.version) + } + return fmt.Sprintf("host %s does not support %s version %s", e.hostname, e.service, e.version) +} + +// ErrNoVersionConstraints is returned when checkpoint was disabled +// or the endpoint to query for version constraints was unavailable. +type ErrNoVersionConstraints struct { + disabled bool +} + +// Error returns a customized error message. +func (e *ErrNoVersionConstraints) Error() string { + if e.disabled { + return "checkpoint disabled" + } + return "unable to contact versions service" +} + +// ServiceURL returns the URL associated with the given service identifier, +// which should be of the form "servicename.vN". +// +// A non-nil result is always an absolute URL with a scheme of either HTTPS +// or HTTP. +func (h *Host) ServiceURL(id string) (*url.URL, error) { + svc, ver, err := parseServiceID(id) + if err != nil { + return nil, err + } + + // No services supported for an empty Host. + if h == nil || h.services == nil { + return nil, &ErrServiceNotProvided{service: svc} + } + + urlStr, ok := h.services[id].(string) + if !ok { + // See if we have a matching service as that would indicate + // the service is supported, but not the requested version. + for serviceID := range h.services { + if strings.HasPrefix(serviceID, svc+".") { + return nil, &ErrVersionNotSupported{ + hostname: h.hostname, + service: svc, + version: ver.Original(), + } + } + } + + // No discovered services match the requested service. + return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, fmt.Errorf("Failed to parse service URL: %v", err) + } + + // Make relative URLs absolute using our discovery URL. + if !u.IsAbs() { + u = h.discoURL.ResolveReference(u) + } + + if u.Scheme != "https" && u.Scheme != "http" { + return nil, fmt.Errorf("Service URL is using an unsupported scheme: %s", u.Scheme) + } + if u.User != nil { + return nil, fmt.Errorf("Embedded username/password information is not permitted") + } + + // Fragment part is irrelevant, since we're not a browser. + u.Fragment = "" + + return h.discoURL.ResolveReference(u), nil +} + +// VersionConstraints returns the contraints for a given service identifier +// (which should be of the form "servicename.vN") and product. +// +// When an exact (service and version) match is found, the constraints for +// that service are returned. +// +// When the requested version is not provided but the service is, we will +// search for all alternative versions. If mutliple alternative versions +// are found, the contrains of the latest available version are returned. +// +// When a service is not provided at all an error will be returned instead. +// +// When checkpoint is disabled or when a 404 is returned after making the +// HTTP call, an ErrNoVersionConstraints error will be returned. +func (h *Host) VersionConstraints(id, product string) (*Constraints, error) { + svc, _, err := parseServiceID(id) + if err != nil { + return nil, err + } + + // Return early if checkpoint is disabled. + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return nil, &ErrNoVersionConstraints{disabled: true} + } + + // No services supported for an empty Host. + if h == nil || h.services == nil { + return nil, &ErrServiceNotProvided{service: svc} + } + + // Try to get the service URL for the version service and + // return early if the service isn't provided by the host. + u, err := h.ServiceURL(versionServiceID) + if err != nil { + return nil, err + } + + // Check if we have an exact (service and version) match. + if _, ok := h.services[id].(string); !ok { + // If we don't have an exact match, we search for all matching + // services and then use the service ID of the latest version. + var services []string + for serviceID := range h.services { + if strings.HasPrefix(serviceID, svc+".") { + services = append(services, serviceID) + } + } + + if len(services) == 0 { + // No discovered services match the requested service. + return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} + } + + // Set id to the latest service ID we found. + var latest *version.Version + for _, serviceID := range services { + if _, ver, err := parseServiceID(serviceID); err == nil { + if latest == nil || latest.LessThan(ver) { + id = serviceID + latest = ver + } + } + } + } + + // Set a default timeout of 1 sec for the versions request (in milliseconds) + timeout := 1000 + if v, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout = v + } + + client := &http.Client{ + Transport: h.transport, + Timeout: time.Duration(timeout) * time.Millisecond, + } + + // Prepare the service URL by setting the service and product. + v := u.Query() + v.Set("product", product) + u.Path += id + u.RawQuery = v.Encode() + + // Create a new request. + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, fmt.Errorf("Failed to create version constraints request: %v", err) + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", httpclient.UserAgentString()) + + log.Printf("[DEBUG] Retrieve version constraints for service %s and product %s", id, product) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to request version constraints: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode == 404 { + return nil, &ErrNoVersionConstraints{disabled: false} + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Failed to request version constraints: %s", resp.Status) + } + + // Parse the constraints from the response body. + result := &Constraints{} + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return nil, fmt.Errorf("Error parsing version constraints: %v", err) + } + + return result, nil +} + +func parseServiceID(id string) (string, *version.Version, error) { + parts := strings.SplitN(id, ".", 2) + if len(parts) != 2 { + return "", nil, fmt.Errorf("Invalid service ID format (i.e. service.vN): %s", id) + } + + version, err := version.NewVersion(parts[1]) + if err != nil { + return "", nil, fmt.Errorf("Invalid service version: %v", err) + } + + return parts[0], version, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/label_iter.go b/vendor/github.com/hashicorp/terraform/svchost/label_iter.go new file mode 100644 index 0000000..af8ccba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/label_iter.go @@ -0,0 +1,69 @@ +package svchost + +import ( + "strings" +) + +// A labelIter allows iterating over domain name labels. +// +// This type is copied from golang.org/x/net/idna, where it is used +// to segment hostnames into their separate labels for analysis. We use +// it for the same purpose here, in ForComparison. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/svchost.go b/vendor/github.com/hashicorp/terraform/svchost/svchost.go new file mode 100644 index 0000000..4eded14 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/svchost.go @@ -0,0 +1,207 @@ +// Package svchost deals with the representations of the so-called "friendly +// hostnames" that we use to represent systems that provide Terraform-native +// remote services, such as module registry, remote operations, etc. +// +// Friendly hostnames are specified such that, as much as possible, they +// are consistent with how web browsers think of hostnames, so that users +// can bring their intuitions about how hostnames behave when they access +// a Terraform Enterprise instance's web UI (or indeed any other website) +// and have this behave in a similar way. +package svchost + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "golang.org/x/net/idna" +) + +// Hostname is specialized name for string that indicates that the string +// has been converted to (or was already in) the storage and comparison form. +// +// Hostname values are not suitable for display in the user-interface. Use +// the ForDisplay method to obtain a form suitable for display in the UI. +// +// Unlike user-supplied hostnames, strings of type Hostname (assuming they +// were constructed by a function within this package) can be compared for +// equality using the standard Go == operator. +type Hostname string + +// acePrefix is the ASCII Compatible Encoding prefix, used to indicate that +// a domain name label is in "punycode" form. +const acePrefix = "xn--" + +// displayProfile is a very liberal idna profile that we use to do +// normalization for display without imposing validation rules. +var displayProfile = idna.New( + idna.MapForLookup(), + idna.Transitional(true), +) + +// ForDisplay takes a user-specified hostname and returns a normalized form of +// it suitable for display in the UI. +// +// If the input is so invalid that no normalization can be performed then +// this will return the input, assuming that the caller still wants to +// display _something_. This function is, however, more tolerant than the +// other functions in this package and will make a best effort to prepare +// _any_ given hostname for display. +// +// For validation, use either IsValid (for explicit validation) or +// ForComparison (which implicitly validates, returning an error if invalid). +func ForDisplay(given string) string { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + portPortion, _ = normalizePortPortion(portPortion) + + ascii, err := displayProfile.ToASCII(given) + if err != nil { + return given + portPortion + } + display, err := displayProfile.ToUnicode(ascii) + if err != nil { + return given + portPortion + } + return display + portPortion +} + +// IsValid returns true if the given user-specified hostname is a valid +// service hostname. +// +// Validity is determined by complying with the RFC 5891 requirements for +// names that are valid for domain lookup (section 5), with the additional +// requirement that user-supplied forms must not _already_ contain +// Punycode segments. +func IsValid(given string) bool { + _, err := ForComparison(given) + return err == nil +} + +// ForComparison takes a user-specified hostname and returns a normalized +// form of it suitable for storage and comparison. The result is not suitable +// for display to end-users because it uses Punycode to represent non-ASCII +// characters, and this form is unreadable for non-ASCII-speaking humans. +// +// The result is typed as Hostname -- a specialized name for string -- so that +// other APIs can make it clear within the type system whether they expect a +// user-specified or display-form hostname or a value already normalized for +// comparison. +// +// The returned Hostname is not valid if the returned error is non-nil. +func ForComparison(given string) (Hostname, error) { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + + var err error + portPortion, err = normalizePortPortion(portPortion) + if err != nil { + return Hostname(""), err + } + + if given == "" { + return Hostname(""), fmt.Errorf("empty string is not a valid hostname") + } + + // First we'll apply our additional constraint that Punycode must not + // be given directly by the user. This is not an IDN specification + // requirement, but we prohibit it to force users to use human-readable + // hostname forms within Terraform configuration. + labels := labelIter{orig: given} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + return Hostname(""), fmt.Errorf( + "hostname contains empty label (two consecutive periods)", + ) + } + if strings.HasPrefix(label, acePrefix) { + return Hostname(""), fmt.Errorf( + "hostname label %q specified in punycode format; service hostnames must be given in unicode", + label, + ) + } + } + + result, err := idna.Lookup.ToASCII(given) + if err != nil { + return Hostname(""), err + } + return Hostname(result + portPortion), nil +} + +// ForDisplay returns a version of the receiver that is appropriate for display +// in the UI. This includes converting any punycode labels to their +// corresponding Unicode characters. +// +// A round-trip through ForComparison and this ForDisplay method does not +// guarantee the same result as calling this package's top-level ForDisplay +// function, since a round-trip through the Hostname type implies stricter +// handling than we do when doing basic display-only processing. +func (h Hostname) ForDisplay() string { + given := string(h) + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + // We don't normalize the port portion here because we assume it's + // already been normalized on the way in. + + result, err := idna.Lookup.ToUnicode(given) + if err != nil { + // Should never happen, since type Hostname indicates that a string + // passed through our validation rules. + panic(fmt.Errorf("ForDisplay called on invalid Hostname: %s", err)) + } + return result + portPortion +} + +func (h Hostname) String() string { + return string(h) +} + +func (h Hostname) GoString() string { + return fmt.Sprintf("svchost.Hostname(%q)", string(h)) +} + +// normalizePortPortion attempts to normalize the "port portion" of a hostname, +// which begins with the first colon in the hostname and should be followed +// by a string of decimal digits. +// +// If the port portion is valid, a normalized version of it is returned along +// with a nil error. +// +// If the port portion is invalid, the input string is returned verbatim along +// with a non-nil error. +// +// An empty string is a valid port portion representing the absense of a port. +// If non-empty, the first character must be a colon. +func normalizePortPortion(s string) (string, error) { + if s == "" { + return s, nil + } + + if s[0] != ':' { + // should never happen, since caller tends to guarantee the presence + // of a colon due to how it's extracted from the string. + return s, errors.New("port portion is missing its initial colon") + } + + numStr := s[1:] + num, err := strconv.Atoi(numStr) + if err != nil { + return s, errors.New("port portion contains non-digit characters") + } + if num == 443 { + return "", nil // ":443" is the default + } + if num > 65535 { + return s, errors.New("port number is greater than 65535") + } + return fmt.Sprintf(":%d", num), nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go index a814a85..f133cc2 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context.go @@ -8,11 +8,13 @@ import ( "strings" "sync" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/experiment" + "github.com/hashicorp/terraform/version" ) // InputMode defines what sort of input will be asked for when Input @@ -123,7 +125,7 @@ type Context struct { func NewContext(opts *ContextOpts) (*Context, error) { // Validate the version requirement if it is given if opts.Module != nil { - if err := checkRequiredVersion(opts.Module); err != nil { + if err := CheckRequiredVersion(opts.Module); err != nil { return nil, err } } @@ -143,19 +145,14 @@ func NewContext(opts *ContextOpts) (*Context, error) { // If our state is from the future, then error. Callers can avoid // this error by explicitly setting `StateFutureAllowed`. - if !opts.StateFutureAllowed && state.FromFutureTerraform() { - return nil, fmt.Errorf( - "Terraform doesn't allow running any operations against a state\n"+ - "that was written by a future Terraform version. The state is\n"+ - "reporting it is written by Terraform '%s'.\n\n"+ - "Please run at least that version of Terraform to continue.", - state.TFVersion) + if err := CheckStateVersion(state); err != nil && !opts.StateFutureAllowed { + return nil, err } // Explicitly reset our state version to our current version so that // any operations we do will write out that our latest version // has run. - state.TFVersion = Version + state.TFVersion = version.Version // Determine parallelism, default to 10. We do this both to limit // CPU pressure but also to have an extra guard against rate throttling @@ -465,7 +462,7 @@ func (c *Context) Input(mode InputMode) error { } // Do the walk - if _, err := c.walk(graph, nil, walkInput); err != nil { + if _, err := c.walk(graph, walkInput); err != nil { return err } } @@ -490,6 +487,13 @@ func (c *Context) Input(mode InputMode) error { func (c *Context) Apply() (*State, error) { defer c.acquireRun("apply")() + // Check there are no empty target parameter values + for _, target := range c.targets { + if target == "" { + return nil, fmt.Errorf("Target parameter must not have empty value") + } + } + // Copy our own state c.state = c.state.DeepCopy() @@ -506,7 +510,7 @@ func (c *Context) Apply() (*State, error) { } // Walk the graph - walker, err := c.walk(graph, graph, operation) + walker, err := c.walk(graph, operation) if len(walker.ValidationErrors) > 0 { err = multierror.Append(err, walker.ValidationErrors...) } @@ -527,19 +531,27 @@ func (c *Context) Apply() (*State, error) { func (c *Context) Plan() (*Plan, error) { defer c.acquireRun("plan")() + // Check there are no empty target parameter values + for _, target := range c.targets { + if target == "" { + return nil, fmt.Errorf("Target parameter must not have empty value") + } + } + p := &Plan{ Module: c.module, Vars: c.variables, State: c.state, Targets: c.targets, - TerraformVersion: VersionString(), + TerraformVersion: version.String(), ProviderSHA256s: c.providerSHA256s, } var operation walkOperation if c.destroy { operation = walkPlanDestroy + p.Destroy = true } else { // Set our state to be something temporary. We do this so that // the plan can update a fake state so that variables work, then @@ -575,7 +587,7 @@ func (c *Context) Plan() (*Plan, error) { } // Do the walk - walker, err := c.walk(graph, graph, operation) + walker, err := c.walk(graph, operation) if err != nil { return nil, err } @@ -630,7 +642,7 @@ func (c *Context) Refresh() (*State, error) { } // Do the walk - if _, err := c.walk(graph, graph, walkRefresh); err != nil { + if _, err := c.walk(graph, walkRefresh); err != nil { return nil, err } @@ -670,29 +682,27 @@ func (c *Context) Stop() { } // Validate validates the configuration and returns any warnings or errors. -func (c *Context) Validate() ([]string, []error) { +func (c *Context) Validate() tfdiags.Diagnostics { defer c.acquireRun("validate")() - var errs error + var diags tfdiags.Diagnostics // Validate the configuration itself - if err := c.module.Validate(); err != nil { - errs = multierror.Append(errs, err) - } + diags = diags.Append(c.module.Validate()) // This only needs to be done for the root module, since inter-module // variables are validated in the module tree. if config := c.module.Config(); config != nil { // Validate the user variables - if err := smcUserVariables(config, c.variables); len(err) > 0 { - errs = multierror.Append(errs, err...) + for _, err := range smcUserVariables(config, c.variables) { + diags = diags.Append(err) } } // If we have errors at this point, the graphing has no chance, // so just bail early. - if errs != nil { - return nil, []error{errs} + if diags.HasErrors() { + return diags } // Build the graph so we can walk it and run Validate on nodes. @@ -701,24 +711,29 @@ func (c *Context) Validate() ([]string, []error) { // graph again later after Planning. graph, err := c.Graph(GraphTypeValidate, nil) if err != nil { - return nil, []error{err} + diags = diags.Append(err) + return diags } // Walk - walker, err := c.walk(graph, graph, walkValidate) + walker, err := c.walk(graph, walkValidate) if err != nil { - return nil, multierror.Append(errs, err).Errors + diags = diags.Append(err) } - // Return the result - rerrs := multierror.Append(errs, walker.ValidationErrors...) - sort.Strings(walker.ValidationWarnings) - sort.Slice(rerrs.Errors, func(i, j int) bool { - return rerrs.Errors[i].Error() < rerrs.Errors[j].Error() + sort.Slice(walker.ValidationErrors, func(i, j int) bool { + return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error() }) - return walker.ValidationWarnings, rerrs.Errors + for _, warn := range walker.ValidationWarnings { + diags = diags.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range walker.ValidationErrors { + diags = diags.Append(err) + } + + return diags } // Module returns the module tree associated with this context. @@ -792,33 +807,11 @@ func (c *Context) releaseRun() { c.runContext = nil } -func (c *Context) walk( - graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) { +func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) { // Keep track of the "real" context which is the context that does // the real work: talking to real providers, modifying real state, etc. realCtx := c - // If we don't want shadowing, remove it - if !experiment.Enabled(experiment.X_shadow) { - shadow = nil - } - - // Just log this so we can see it in a debug log - if !c.shadow { - log.Printf("[WARN] terraform: shadow graph disabled") - shadow = nil - } - - // If we have a shadow graph, walk that as well - var shadowCtx *Context - var shadowCloser Shadow - if shadow != nil { - // Build the shadow context. In the process, override the real context - // with the one that is wrapped so that the shadow context can verify - // the results of the real. - realCtx, shadowCtx, shadowCloser = newShadowContext(c) - } - log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) walker := &ContextGraphWalker{ @@ -837,90 +830,6 @@ func (c *Context) walk( close(watchStop) <-watchWait - // If we have a shadow graph and we interrupted the real graph, then - // we just close the shadow and never verify it. It is non-trivial to - // recreate the exact execution state up until an interruption so this - // isn't supported with shadows at the moment. - if shadowCloser != nil && c.sh.Stopped() { - // Ignore the error result, there is nothing we could care about - shadowCloser.CloseShadow() - - // Set it to nil so we don't do anything - shadowCloser = nil - } - - // If we have a shadow graph, wait for that to complete. - if shadowCloser != nil { - // Build the graph walker for the shadow. We also wrap this in - // a panicwrap so that panics are captured. For the shadow graph, - // we just want panics to be normal errors rather than to crash - // Terraform. - shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{ - Context: shadowCtx, - Operation: operation, - }) - - // Kick off the shadow walk. This will block on any operations - // on the real walk so it is fine to start first. - log.Printf("[INFO] Starting shadow graph walk: %s", operation.String()) - shadowCh := make(chan error) - go func() { - shadowCh <- shadow.Walk(shadowWalker) - }() - - // Notify the shadow that we're done - if err := shadowCloser.CloseShadow(); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // Wait for the walk to end - log.Printf("[DEBUG] Waiting for shadow graph to complete...") - shadowWalkErr := <-shadowCh - - // Get any shadow errors - if err := shadowCloser.ShadowError(); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // Verify the contexts (compare) - if err := shadowContextVerify(realCtx, shadowCtx); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // At this point, if we're supposed to fail on error, then - // we PANIC. Some tests just verify that there is an error, - // so simply appending it to realErr and returning could hide - // shadow problems. - // - // This must be done BEFORE appending shadowWalkErr since the - // shadowWalkErr may include expected errors. - // - // We only do this if we don't have a real error. In the case of - // a real error, we can't guarantee what nodes were and weren't - // traversed in parallel scenarios so we can't guarantee no - // shadow errors. - if c.shadowErr != nil && contextFailOnShadowError && realErr == nil { - panic(multierror.Prefix(c.shadowErr, "shadow graph:")) - } - - // Now, if we have a walk error, we append that through - if shadowWalkErr != nil { - c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr) - } - - if c.shadowErr == nil { - log.Printf("[INFO] Shadow graph success!") - } else { - log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr) - - // If we're supposed to fail on shadow errors, then report it - if contextFailOnShadowError { - realErr = multierror.Append(realErr, multierror.Prefix( - c.shadowErr, "shadow graph:")) - } - } - } - return walker, realErr } diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go index f1d5776..e940143 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context_import.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go @@ -66,7 +66,7 @@ func (c *Context) Import(opts *ImportOpts) (*State, error) { } // Walk it - if _, err := c.walk(graph, nil, walkImport); err != nil { + if _, err := c.walk(graph, walkImport); err != nil { return c.state, err } diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go index fd1687e..d6dc550 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go @@ -23,6 +23,12 @@ const ( DiffUpdate DiffDestroy DiffDestroyCreate + + // DiffRefresh is only used in the UI for displaying diffs. + // Managed resource reads never appear in plan, and when data source + // reads appear they are represented as DiffCreate in core before + // transforming to DiffRefresh in the UI layer. + DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion ) // multiVal matches the index key to a flatmapped set, list or map @@ -831,7 +837,14 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { } } - // TODO: check for the same value if not computed + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). } // Check for leftover attributes diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go index 3cb088a..10d9c22 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go @@ -49,11 +49,11 @@ func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { path = strings.Join(ctx.Path(), ".") } - log.Printf("[DEBUG] %s: eval: %T", path, n) + log.Printf("[TRACE] %s: eval: %T", path, n) output, err := n.Eval(ctx) if err != nil { if _, ok := err.(EvalEarlyExitError); ok { - log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err) + log.Printf("[TRACE] %s: eval: %T, err: %s", path, n, err) } else { log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go index 2f6a497..b9b4806 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go @@ -112,7 +112,7 @@ func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { } state.init() - { + if resourceHasUserVisibleApply(n.Info) { // Call post-apply hook err := ctx.Hook(func(h Hook) (HookAction, error) { return h.PreApply(n.Info, state, diff) @@ -136,7 +136,7 @@ type EvalApplyPost struct { func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { state := *n.State - { + if resourceHasUserVisibleApply(n.Info) { // Call post-apply hook err := ctx.Hook(func(h Hook) (HookAction, error) { return h.PostApply(n.Info, state, *n.Error) @@ -149,6 +149,22 @@ func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { return nil, *n.Error } +// resourceHasUserVisibleApply returns true if the given resource is one where +// apply actions should be exposed to the user. +// +// Certain resources do apply actions only as an implementation detail, so +// these should not be advertised to code outside of this package. +func resourceHasUserVisibleApply(info *InstanceInfo) bool { + addr := info.ResourceAddress() + + // Only managed resources have user-visible apply actions. + // In particular, this excludes data resources since we "apply" these + // only as an implementation detail of removing them from state when + // they are destroyed. (When reading, they don't get here at all because + // we present them as "Refresh" actions.) + return addr.Mode == config.ManagedResourceMode +} + // EvalApplyProvisioners is an EvalNode implementation that executes // the provisioners for a resource. // @@ -211,11 +227,8 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { state.Tainted = true } - if n.Error != nil { - *n.Error = multierror.Append(*n.Error, err) - } else { - return nil, err - } + *n.Error = multierror.Append(*n.Error, err) + return nil, err } { diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go index a1f815b..86481de 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go @@ -22,11 +22,11 @@ type EvalContext interface { // Input is the UIInput object for interacting with the UI. Input() UIInput - // InitProvider initializes the provider with the given name and + // InitProvider initializes the provider with the given type and name, and // returns the implementation of the resource provider or an error. // // It is an error to initialize the same provider more than once. - InitProvider(string) (ResourceProvider, error) + InitProvider(typ string, name string) (ResourceProvider, error) // Provider gets the provider instance with the given name (already // initialized) or returns nil if the provider isn't initialized. @@ -40,8 +40,6 @@ type EvalContext interface { // is used to store the provider configuration for inheritance lookups // with ParentProviderConfig(). ConfigureProvider(string, *ResourceConfig) error - SetProviderConfig(string, *ResourceConfig) error - ParentProviderConfig(string) *ResourceConfig // ProviderInput and SetProviderInput are used to configure providers // from user input. @@ -69,6 +67,13 @@ type EvalContext interface { // that is currently being acted upon. Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error) + // InterpolateProvider takes a ProviderConfig and interpolates it with the + // stored interpolation scope. Since provider configurations can be + // inherited, the interpolation scope may be different from the current + // context path. Interplation is otherwise executed the same as in the + // Interpolation method. + InterpolateProvider(*config.ProviderConfig, *Resource) (*ResourceConfig, error) + // SetVariables sets the variables for the module within // this context with the name n. This function call is additive: // the second parameter is merged with any previous call. diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go index 3dcfb22..1b6ee5a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "log" - "strings" "sync" "github.com/hashicorp/terraform/config" @@ -34,7 +33,6 @@ type BuiltinEvalContext struct { Hooks []Hook InputValue UIInput ProviderCache map[string]ResourceProvider - ProviderConfigCache map[string]*ResourceConfig ProviderInputConfig map[string]map[string]interface{} ProviderLock *sync.Mutex ProvisionerCache map[string]ResourceProvisioner @@ -80,12 +78,12 @@ func (ctx *BuiltinEvalContext) Input() UIInput { return ctx.InputValue } -func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) { +func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProvider, error) { ctx.once.Do(ctx.init) // If we already initialized, it is an error - if p := ctx.Provider(n); p != nil { - return nil, fmt.Errorf("Provider '%s' already initialized", n) + if p := ctx.Provider(name); p != nil { + return nil, fmt.Errorf("Provider '%s' already initialized", name) } // Warning: make sure to acquire these locks AFTER the call to Provider @@ -93,18 +91,12 @@ func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - key := PathCacheKey(providerPath) - - typeName := strings.SplitN(n, ".", 2)[0] - p, err := ctx.Components.ResourceProvider(typeName, key) + p, err := ctx.Components.ResourceProvider(typeName, name) if err != nil { return nil, err } - ctx.ProviderCache[key] = p + ctx.ProviderCache[name] = p return p, nil } @@ -114,11 +106,7 @@ func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider { ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - - return ctx.ProviderCache[PathCacheKey(providerPath)] + return ctx.ProviderCache[n] } func (ctx *BuiltinEvalContext) CloseProvider(n string) error { @@ -127,15 +115,11 @@ func (ctx *BuiltinEvalContext) CloseProvider(n string) error { ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - var provider interface{} - provider = ctx.ProviderCache[PathCacheKey(providerPath)] + provider = ctx.ProviderCache[n] if provider != nil { if p, ok := provider.(ResourceProviderCloser); ok { - delete(ctx.ProviderCache, PathCacheKey(providerPath)) + delete(ctx.ProviderCache, n) return p.Close() } } @@ -149,28 +133,9 @@ func (ctx *BuiltinEvalContext) ConfigureProvider( if p == nil { return fmt.Errorf("Provider '%s' not initialized", n) } - - if err := ctx.SetProviderConfig(n, cfg); err != nil { - return nil - } - return p.Configure(cfg) } -func (ctx *BuiltinEvalContext) SetProviderConfig( - n string, cfg *ResourceConfig) error { - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - - // Save the configuration - ctx.ProviderLock.Lock() - ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg - ctx.ProviderLock.Unlock() - - return nil -} - func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} { ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() @@ -203,27 +168,6 @@ func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface ctx.ProviderLock.Unlock() } -func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - // Make a copy of the path so we can safely edit it - path := ctx.Path() - pathCopy := make([]string, len(path)+1) - copy(pathCopy, path) - - // Go up the tree. - for i := len(path) - 1; i >= 0; i-- { - pathCopy[i+1] = n - k := PathCacheKey(pathCopy[:i+2]) - if v, ok := ctx.ProviderConfigCache[k]; ok { - return v - } - } - - return nil -} - func (ctx *BuiltinEvalContext) InitProvisioner( n string) (ResourceProvisioner, error) { ctx.once.Do(ctx.init) @@ -289,6 +233,7 @@ func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { func (ctx *BuiltinEvalContext) Interpolate( cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) { + if cfg != nil { scope := &InterpolationScope{ Path: ctx.Path(), @@ -311,6 +256,35 @@ func (ctx *BuiltinEvalContext) Interpolate( return result, nil } +func (ctx *BuiltinEvalContext) InterpolateProvider( + pc *config.ProviderConfig, r *Resource) (*ResourceConfig, error) { + + var cfg *config.RawConfig + + if pc != nil && pc.RawConfig != nil { + scope := &InterpolationScope{ + Path: ctx.Path(), + Resource: r, + } + + cfg = pc.RawConfig + + vs, err := ctx.Interpolater.Values(scope, cfg.Variables) + if err != nil { + return nil, err + } + + // Do the interpolation + if err := cfg.Interpolate(vs); err != nil { + return nil, err + } + } + + result := NewResourceConfig(cfg) + result.interpolateForce() + return result, nil +} + func (ctx *BuiltinEvalContext) Path() []string { return ctx.PathValue } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go index 4f90d5b..6464517 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go @@ -45,14 +45,6 @@ type MockEvalContext struct { ConfigureProviderConfig *ResourceConfig ConfigureProviderError error - SetProviderConfigCalled bool - SetProviderConfigName string - SetProviderConfigConfig *ResourceConfig - - ParentProviderConfigCalled bool - ParentProviderConfigName string - ParentProviderConfigConfig *ResourceConfig - InitProvisionerCalled bool InitProvisionerName string InitProvisionerProvisioner ResourceProvisioner @@ -72,6 +64,12 @@ type MockEvalContext struct { InterpolateConfigResult *ResourceConfig InterpolateError error + InterpolateProviderCalled bool + InterpolateProviderConfig *config.ProviderConfig + InterpolateProviderResource *Resource + InterpolateProviderConfigResult *ResourceConfig + InterpolateProviderError error + PathCalled bool PathPath []string @@ -109,7 +107,7 @@ func (c *MockEvalContext) Input() UIInput { return c.InputInput } -func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) { +func (c *MockEvalContext) InitProvider(t, n string) (ResourceProvider, error) { c.InitProviderCalled = true c.InitProviderName = n return c.InitProviderProvider, c.InitProviderError @@ -134,20 +132,6 @@ func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error return c.ConfigureProviderError } -func (c *MockEvalContext) SetProviderConfig( - n string, cfg *ResourceConfig) error { - c.SetProviderConfigCalled = true - c.SetProviderConfigName = n - c.SetProviderConfigConfig = cfg - return nil -} - -func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig { - c.ParentProviderConfigCalled = true - c.ParentProviderConfigName = n - return c.ParentProviderConfigConfig -} - func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} { c.ProviderInputCalled = true c.ProviderInputName = n @@ -186,6 +170,14 @@ func (c *MockEvalContext) Interpolate( return c.InterpolateConfigResult, c.InterpolateError } +func (c *MockEvalContext) InterpolateProvider( + config *config.ProviderConfig, resource *Resource) (*ResourceConfig, error) { + c.InterpolateProviderCalled = true + c.InterpolateProviderConfig = config + c.InterpolateProviderResource = resource + return c.InterpolateProviderConfigResult, c.InterpolateError +} + func (c *MockEvalContext) Path() []string { c.PathCalled = true return c.PathPath diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go index c35f908..26205ce 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/version" ) // EvalCompareDiff is an EvalNode implementation that compares two diffs @@ -60,7 +61,7 @@ func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { "\n"+ "Also include as much context as you can about your config, state, "+ "and the steps you performed to trigger this error.\n", - n.Info.Id, Version, n.Info.Id, reason, one, two) + n.Info.Id, version.Version, n.Info.Id, reason, one, two) } return nil, nil @@ -255,11 +256,15 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { containers := groupContainers(diff) keep := map[string]bool{} for _, v := range containers { - if v.keepDiff() { + if v.keepDiff(ignorableAttrKeys) { // At least one key has changes, so list all the sibling keys - // to keep in the diff. + // to keep in the diff for k := range v { keep[k] = true + // this key may have been added by the user to ignore, but + // if it's a subkey in a container, we need to un-ignore it + // to keep the complete containter. + delete(ignorableAttrKeys, k) } } } @@ -291,10 +296,17 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { // a group of key-*ResourceAttrDiff pairs from the same flatmapped container type flatAttrDiff map[string]*ResourceAttrDiff -// we need to keep all keys if any of them have a diff -func (f flatAttrDiff) keepDiff() bool { - for _, v := range f { - if !v.Empty() && !v.NewComputed { +// we need to keep all keys if any of them have a diff that's not ignored +func (f flatAttrDiff) keepDiff(ignoreChanges map[string]bool) bool { + for k, v := range f { + ignore := false + for attr := range ignoreChanges { + if strings.HasPrefix(k, attr) { + ignore = true + } + } + + if !v.Empty() && !v.NewComputed && !ignore { return true } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go index 6825ff5..6a78a6b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go @@ -1,17 +1,49 @@ package terraform -import "github.com/hashicorp/terraform/config" +import ( + "log" + + "github.com/hashicorp/terraform/config" +) // EvalInterpolate is an EvalNode implementation that takes a raw // configuration and interpolates it. type EvalInterpolate struct { - Config *config.RawConfig - Resource *Resource - Output **ResourceConfig + Config *config.RawConfig + Resource *Resource + Output **ResourceConfig + ContinueOnErr bool } func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) { rc, err := ctx.Interpolate(n.Config, n.Resource) + if err != nil { + if n.ContinueOnErr { + log.Printf("[WARN] Interpolation %q failed: %s", n.Config.Key, err) + return nil, EvalEarlyExitError{} + } + return nil, err + } + + if n.Output != nil { + *n.Output = rc + } + + return nil, nil +} + +// EvalInterpolateProvider is an EvalNode implementation that takes a +// ProviderConfig and interpolates it. Provider configurations are the only +// "inherited" type of configuration we have, and the original raw config may +// have a different interpolation scope. +type EvalInterpolateProvider struct { + Config *config.ProviderConfig + Resource *Resource + Output **ResourceConfig +} + +func (n *EvalInterpolateProvider) Eval(ctx EvalContext) (interface{}, error) { + rc, err := ctx.InterpolateProvider(n.Config, n.Resource) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go new file mode 100644 index 0000000..a4b2a50 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go @@ -0,0 +1,86 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalLocal is an EvalNode implementation that evaluates the +// expression for a local value and writes it into a transient part of +// the state. +type EvalLocal struct { + Name string + Value *config.RawConfig +} + +func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { + cfg, err := ctx.Interpolate(n.Value, nil) + if err != nil { + return nil, fmt.Errorf("local.%s: %s", n.Name, err) + } + + state, lock := ctx.State() + if state == nil { + return nil, fmt.Errorf("cannot write local value to nil state") + } + + // Get a write lock so we can access the state + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + mod = state.AddModule(ctx.Path()) + } + + // Get the value from the config + var valueRaw interface{} = config.UnknownVariableValue + if cfg != nil { + var ok bool + valueRaw, ok = cfg.Get("value") + if !ok { + valueRaw = "" + } + if cfg.IsComputed("value") { + valueRaw = config.UnknownVariableValue + } + } + + if mod.Locals == nil { + // initialize + mod.Locals = map[string]interface{}{} + } + mod.Locals[n.Name] = valueRaw + + return nil, nil +} + +// EvalDeleteLocal is an EvalNode implementation that deletes a Local value +// from the state. Locals aren't persisted, but we don't need to evaluate them +// during destroy. +type EvalDeleteLocal struct { + Name string +} + +func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + if state == nil { + return nil, nil + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + delete(mod.Locals, n.Name) + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go index cf61781..a834627 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go @@ -41,15 +41,16 @@ type EvalWriteOutput struct { Name string Sensitive bool Value *config.RawConfig + // ContinueOnErr allows interpolation to fail during Input + ContinueOnErr bool } // TODO: test func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { + // This has to run before we have a state lock, since interpolation also + // reads the state cfg, err := ctx.Interpolate(n.Value, nil) - if err != nil { - // Log error but continue anyway - log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err) - } + // handle the error after we have the module from the state state, lock := ctx.State() if state == nil { @@ -59,13 +60,27 @@ func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { // Get a write lock so we can access this instance lock.Lock() defer lock.Unlock() - // Look for the module state. If we don't have one, create it. mod := state.ModuleByPath(ctx.Path()) if mod == nil { mod = state.AddModule(ctx.Path()) } + // handling the interpolation error + if err != nil { + if n.ContinueOnErr || flagWarnOutputErrors { + log.Printf("[ERROR] Output interpolation %q failed: %s", n.Name, err) + // if we're continuing, make sure the output is included, and + // marked as unknown + mod.Outputs[n.Name] = &OutputState{ + Type: "string", + Value: config.UnknownVariableValue, + } + return nil, EvalEarlyExitError{} + } + return nil, err + } + // Get the value from the config var valueRaw interface{} = config.UnknownVariableValue if cfg != nil { diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go index 092fd18..61f6ff9 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go @@ -6,17 +6,6 @@ import ( "github.com/hashicorp/terraform/config" ) -// EvalSetProviderConfig sets the parent configuration for a provider -// without configuring that provider, validating it, etc. -type EvalSetProviderConfig struct { - Provider string - Config **ResourceConfig -} - -func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) { - return nil, ctx.SetProviderConfig(n.Provider, *n.Config) -} - // EvalBuildProviderConfig outputs a *ResourceConfig that is properly // merged with parents and inputs on top of what is configured in the file. type EvalBuildProviderConfig struct { @@ -28,7 +17,7 @@ type EvalBuildProviderConfig struct { func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) { cfg := *n.Config - // If we have a configuration set, then merge that in + // If we have an Input configuration set, then merge that in if input := ctx.ProviderInput(n.Provider); input != nil { // "input" is a map of the subset of config values that were known // during the input walk, set by EvalInputProvider. Note that @@ -40,13 +29,7 @@ func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) { return nil, err } - merged := cfg.raw.Merge(rc) - cfg = NewResourceConfig(merged) - } - - // Get the parent configuration if there is one - if parent := ctx.ParentProviderConfig(n.Provider); parent != nil { - merged := cfg.raw.Merge(parent.raw) + merged := rc.Merge(cfg.raw) cfg = NewResourceConfig(merged) } @@ -69,11 +52,12 @@ func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { // and returns nothing. The provider can be retrieved again with the // EvalGetProvider node. type EvalInitProvider struct { - Name string + TypeName string + Name string } func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvider(n.Name) + return ctx.InitProvider(n.TypeName, n.Name) } // EvalCloseProvider is an EvalNode implementation that closes provider @@ -116,12 +100,8 @@ type EvalInputProvider struct { } func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) { - // If we already configured this provider, then don't do this again - if v := ctx.ProviderInput(n.Name); v != nil { - return nil, nil - } - rc := *n.Config + orig := rc.DeepCopy() // Wrap the input into a namespace input := &PrefixUIInput{ @@ -138,27 +118,20 @@ func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) { "Error configuring %s: %s", n.Name, err) } - // Set the input that we received so that child modules don't attempt - // to ask for input again. + // We only store values that have changed through Input. + // The goal is to cache cache input responses, not to provide a complete + // config for other providers. + confMap := make(map[string]interface{}) if config != nil && len(config.Config) > 0 { - // This repository of provider input results on the context doesn't - // retain config.ComputedKeys, so we need to filter those out here - // in order that later users of this data won't try to use the unknown - // value placeholder as if it were a literal value. This map is just - // of known values we've been able to complete so far; dynamic stuff - // will be merged in by EvalBuildProviderConfig on subsequent - // (post-input) walks. - confMap := config.Config - if config.ComputedKeys != nil { - for _, key := range config.ComputedKeys { - delete(confMap, key) + // any values that weren't in the original ResourcConfig will be cached + for k, v := range config.Config { + if _, ok := orig.Config[k]; !ok { + confMap[k] = v } } - - ctx.SetProviderInput(n.Name, confMap) - } else { - ctx.SetProviderInput(n.Name, map[string]interface{}{}) } + ctx.SetProviderInput(n.Name, confMap) + return nil, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go index 126a0e6..1182690 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go @@ -1,6 +1,8 @@ package terraform -import "fmt" +import ( + "fmt" +) // EvalReadState is an EvalNode implementation that reads the // primary InstanceState for a specific resource out of the state. @@ -212,37 +214,6 @@ func writeInstanceToState( return nil, nil } -// EvalClearPrimaryState is an EvalNode implementation that clears the primary -// instance from a resource state. -type EvalClearPrimaryState struct { - Name string -} - -func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[n.Name] - if rs == nil { - return nil, nil - } - - // Clear primary from the resource state - rs.Primary = nil - - return nil, nil -} - // EvalDeposeState is an EvalNode implementation that takes the primary // out of a state and makes it Deposed. This is done at the beginning of // create-before-destroy calls so that the create can create while preserving diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go index 478aa64..3e5a84c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go @@ -144,16 +144,20 @@ func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) // For type=ssh only (enforced in ssh communicator) PrivateKey interface{} `mapstructure:"private_key"` + HostKey interface{} `mapstructure:"host_key"` Agent interface{} `mapstructure:"agent"` BastionHost interface{} `mapstructure:"bastion_host"` + BastionHostKey interface{} `mapstructure:"bastion_host_key"` BastionPort interface{} `mapstructure:"bastion_port"` BastionUser interface{} `mapstructure:"bastion_user"` BastionPassword interface{} `mapstructure:"bastion_password"` BastionPrivateKey interface{} `mapstructure:"bastion_private_key"` + AgentIdentity interface{} `mapstructure:"agent_identity"` // For type=winrm only (enforced in winrm communicator) HTTPS interface{} `mapstructure:"https"` Insecure interface{} `mapstructure:"insecure"` + NTLM interface{} `mapstructure:"use_ntlm"` CACert interface{} `mapstructure:"cacert"` } diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go index 00392ef..0c3da48 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go @@ -1,17 +1,24 @@ package terraform import ( + "strings" + "github.com/hashicorp/terraform/config" ) // ProviderEvalTree returns the evaluation tree for initializing and // configuring providers. -func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { +func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) EvalNode { var provider ResourceProvider var resourceConfig *ResourceConfig + typeName := strings.SplitN(n.NameValue, ".", 2)[0] + seq := make([]EvalNode, 0, 5) - seq = append(seq, &EvalInitProvider{Name: n}) + seq = append(seq, &EvalInitProvider{ + TypeName: typeName, + Name: n.Name(), + }) // Input stuff seq = append(seq, &EvalOpFilter{ @@ -19,20 +26,20 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n, + Name: n.Name(), Output: &provider, }, - &EvalInterpolate{ + &EvalInterpolateProvider{ Config: config, Output: &resourceConfig, }, &EvalBuildProviderConfig{ - Provider: n, + Provider: n.NameValue, Config: &resourceConfig, Output: &resourceConfig, }, &EvalInputProvider{ - Name: n, + Name: n.NameValue, Provider: &provider, Config: &resourceConfig, }, @@ -45,15 +52,15 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n, + Name: n.Name(), Output: &provider, }, - &EvalInterpolate{ + &EvalInterpolateProvider{ Config: config, Output: &resourceConfig, }, &EvalBuildProviderConfig{ - Provider: n, + Provider: n.NameValue, Config: &resourceConfig, Output: &resourceConfig, }, @@ -61,10 +68,6 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Provider: &provider, Config: &resourceConfig, }, - &EvalSetProviderConfig{ - Provider: n, - Config: &resourceConfig, - }, }, }, }) @@ -75,22 +78,18 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n, + Name: n.Name(), Output: &provider, }, - &EvalInterpolate{ + &EvalInterpolateProvider{ Config: config, Output: &resourceConfig, }, &EvalBuildProviderConfig{ - Provider: n, + Provider: n.NameValue, Config: &resourceConfig, Output: &resourceConfig, }, - &EvalSetProviderConfig{ - Provider: n, - Config: &resourceConfig, - }, }, }, }) @@ -102,7 +101,7 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalConfigProvider{ - Provider: n, + Provider: n.Name(), Config: &resourceConfig, }, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/features.go b/vendor/github.com/hashicorp/terraform/terraform/features.go new file mode 100644 index 0000000..97c77bd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/features.go @@ -0,0 +1,7 @@ +package terraform + +import "os" + +// This file holds feature flags for the next release + +var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go index 48ce6a3..735ec4e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go @@ -70,7 +70,7 @@ func (g *Graph) walk(walker GraphWalker) error { // Walk the graph. var walkFn dag.WalkFunc walkFn = func(v dag.Vertex) (rerr error) { - log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v)) + log.Printf("[TRACE] vertex '%s.%s': walking", path, dag.VertexName(v)) g.DebugVisitInfo(v, g.debugName) // If we have a panic wrap GraphWalker and a panic occurs, recover @@ -118,7 +118,7 @@ func (g *Graph) walk(walker GraphWalker) error { // Allow the walker to change our tree if needed. Eval, // then callback with the output. - log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v)) + log.Printf("[TRACE] vertex '%s.%s': evaluating", path, dag.VertexName(v)) g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) @@ -132,7 +132,7 @@ func (g *Graph) walk(walker GraphWalker) error { // If the node is dynamically expanded, then expand it if ev, ok := v.(GraphNodeDynamicExpandable); ok { log.Printf( - "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph", + "[TRACE] vertex '%s.%s': expanding/walking dynamic subgraph", path, dag.VertexName(v)) @@ -154,7 +154,7 @@ func (g *Graph) walk(walker GraphWalker) error { // If the node has a subgraph, then walk the subgraph if sn, ok := v.(GraphNodeSubgraph); ok { log.Printf( - "[DEBUG] vertex '%s.%s': walking subgraph", + "[TRACE] vertex '%s.%s': walking subgraph", path, dag.VertexName(v)) diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go index 38a90f2..0c2b233 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go @@ -87,12 +87,8 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { // Attach the state &AttachStateTransformer{State: b.State}, - // Create all the providers - &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: b.Module}, + // add providers + TransformProviders(b.Providers, concreteProvider, b.Module), // Destruction ordering &DestroyEdgeTransformer{Module: b.Module, State: b.State}, @@ -108,15 +104,36 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { // Add root variables &RootVariableTransformer{Module: b.Module}, + // Add the local values + &LocalTransformer{Module: b.Module}, + // Add the outputs &OutputTransformer{Module: b.Module}, // Add module variables &ModuleVariableTransformer{Module: b.Module}, + // Remove modules no longer present in the config + &RemovedModuleTransformer{Module: b.Module, State: b.State}, + // Connect references so ordering is correct &ReferenceTransformer{}, + // Handle destroy time transformations for output and local values. + // Reverse the edges from outputs and locals, so that + // interpolations don't fail during destroy. + // Create a destroy node for outputs to remove them from the state. + // Prune unreferenced values, which may have interpolations that can't + // be resolved. + GraphTransformIf( + func() bool { return b.Destroy }, + GraphTransformMulti( + &DestroyValueReferenceTransformer{}, + &DestroyOutputTransformer{}, + &PruneUnusedValuesTransformer{}, + ), + ), + // Add the node to fix the state count boundaries &CountBoundaryTransformer{}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go index 7070c59..07a1eaf 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go @@ -52,12 +52,7 @@ func (b *ImportGraphBuilder) Steps() []GraphTransformer { // Add the import steps &ImportStateTransformer{Targets: b.ImportTargets}, - // Provider-related transformations - &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: mod}, + TransformProviders(b.Providers, concreteProvider, mod), // This validates that the providers only depend on variables &ImportProviderValidateTransformer{}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go index 4b29bbb..f8dd0fc 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go @@ -71,6 +71,9 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { Module: b.Module, }, + // Add the local values + &LocalTransformer{Module: b.Module}, + // Add the outputs &OutputTransformer{Module: b.Module}, @@ -81,6 +84,12 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { Module: b.Module, }, + // Create orphan output nodes + &OrphanOutputTransformer{ + Module: b.Module, + State: b.State, + }, + // Attach the configuration to any resources &AttachResourceConfigTransformer{Module: b.Module}, @@ -90,12 +99,7 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { // Add root variables &RootVariableTransformer{Module: b.Module}, - // Create all the providers - &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: b.Module}, + TransformProviders(b.Providers, b.ConcreteProvider, b.Module), // Provisioner-related transformations. Only add these if requested. GraphTransformIf( @@ -107,7 +111,12 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { ), // Add module variables - &ModuleVariableTransformer{Module: b.Module}, + &ModuleVariableTransformer{ + Module: b.Module, + }, + + // Remove modules no longer present in the config + &RemovedModuleTransformer{Module: b.Module, State: b.State}, // Connect so that the references are ready for targeting. We'll // have to connect again later for providers and so on. diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go index 3d3e968..9638d4c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go @@ -126,12 +126,10 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer { // Add root variables &RootVariableTransformer{Module: b.Module}, - // Create all the providers - &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: b.Module}, + TransformProviders(b.Providers, concreteProvider, b.Module), + + // Add the local values + &LocalTransformer{Module: b.Module}, // Add the outputs &OutputTransformer{Module: b.Module}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go index e63b460..89f376e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go @@ -32,7 +32,6 @@ type ContextGraphWalker struct { interpolaterVars map[string]map[string]interface{} interpolaterVarLock sync.Mutex providerCache map[string]ResourceProvider - providerConfigCache map[string]*ResourceConfig providerLock sync.Mutex provisionerCache map[string]ResourceProvisioner provisionerLock sync.Mutex @@ -73,7 +72,6 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { InputValue: w.Context.uiInput, Components: w.Context.components, ProviderCache: w.providerCache, - ProviderConfigCache: w.providerConfigCache, ProviderInputConfig: w.Context.providerInputConfig, ProviderLock: &w.providerLock, ProvisionerCache: w.provisionerCache, @@ -151,7 +149,6 @@ func (w *ContextGraphWalker) ExitEvalTree( func (w *ContextGraphWalker) init() { w.contexts = make(map[string]*BuiltinEvalContext, 5) w.providerCache = make(map[string]ResourceProvider, 5) - w.providerConfigCache = make(map[string]*ResourceConfig, 5) w.provisionerCache = make(map[string]ResourceProvisioner, 5) w.interpolaterVars = make(map[string]map[string]interface{}, 5) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go index e97b485..95ef4e9 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go @@ -2,7 +2,7 @@ package terraform -import "fmt" +import "strconv" const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate" @@ -10,7 +10,7 @@ var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125} func (i GraphType) String() string { if i >= GraphType(len(_GraphType_index)-1) { - return fmt.Sprintf("GraphType(%d)", i) + return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")" } return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go index f69267c..b8e7d1f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go @@ -2,7 +2,7 @@ package terraform -import "fmt" +import "strconv" const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" @@ -10,7 +10,7 @@ var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} func (i InstanceType) String() string { if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { - return fmt.Sprintf("InstanceType(%d)", i) + return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" } return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go index 22ddce6..4f4e178 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go @@ -90,6 +90,8 @@ func (i *Interpolater) Values( err = i.valueSimpleVar(scope, n, v, result) case *config.TerraformVariable: err = i.valueTerraformVar(scope, n, v, result) + case *config.LocalVariable: + err = i.valueLocalVar(scope, n, v, result) case *config.UserVariable: err = i.valueUserVar(scope, n, v, result) default: @@ -140,7 +142,6 @@ func (i *Interpolater) valueModuleVar( n string, v *config.ModuleVariable, result map[string]ast.Variable) error { - // Build the path to the child module we want path := make([]string, len(scope.Path), len(scope.Path)+1) copy(path, scope.Path) @@ -317,7 +318,6 @@ func (i *Interpolater) valueTerraformVar( n string, v *config.TerraformVariable, result map[string]ast.Variable) error { - // "env" is supported for backward compatibility, but it's deprecated and // so we won't advertise it as being allowed in the error message. It will // be removed in a future version of Terraform. @@ -335,6 +335,59 @@ func (i *Interpolater) valueTerraformVar( return nil } +func (i *Interpolater) valueLocalVar( + scope *InterpolationScope, + n string, + v *config.LocalVariable, + result map[string]ast.Variable, +) error { + i.StateLock.RLock() + defer i.StateLock.RUnlock() + + modTree := i.Module + if len(scope.Path) > 1 { + modTree = i.Module.Child(scope.Path[1:]) + } + + // Get the resource from the configuration so we can verify + // that the resource is in the configuration and so we can access + // the configuration if we need to. + var cl *config.Local + for _, l := range modTree.Config().Locals { + if l.Name == v.Name { + cl = l + break + } + } + + if cl == nil { + return fmt.Errorf("%s: no local value of this name has been declared", n) + } + + // Get the relevant module + module := i.State.ModuleByPath(scope.Path) + if module == nil { + result[n] = unknownVariable() + return nil + } + + rawV, exists := module.Locals[v.Name] + if !exists { + result[n] = unknownVariable() + return nil + } + + varV, err := hil.InterfaceToVariable(rawV) + if err != nil { + // Should never happen, since interpolation should always produce + // something we can feed back in to interpolation. + return fmt.Errorf("%s: %s", n, err) + } + + result[n] = varV + return nil +} + func (i *Interpolater) valueUserVar( scope *InterpolationScope, n string, @@ -465,6 +518,16 @@ func (i *Interpolater) computeResourceVariable( return &v, err } + // special case for the "id" field which is usually also an attribute + if v.Field == "id" && r.Primary.ID != "" { + // This is usually pulled from the attributes, but is sometimes missing + // during destroy. We can return the ID field in this case. + // FIXME: there should only be one ID to rule them all. + log.Printf("[WARN] resource %s missing 'id' attribute", v.ResourceId()) + v, err := hil.InterfaceToVariable(r.Primary.ID) + return &v, err + } + // computed list or map attribute _, isList = r.Primary.Attributes[v.Field+".#"] _, isMap = r.Primary.Attributes[v.Field+".%"] @@ -602,6 +665,11 @@ func (i *Interpolater) computeResourceMultiVariable( continue } + if v.Field == "id" && r.Primary.ID != "" { + log.Printf("[WARN] resource %s missing 'id' attribute", v.ResourceId()) + values = append(values, r.Primary.ID) + } + // computed list or map attribute _, isList := r.Primary.Attributes[v.Field+".#"] _, isMap := r.Primary.Attributes[v.Field+".%"] @@ -646,7 +714,6 @@ func (i *Interpolater) computeResourceMultiVariable( func (i *Interpolater) interpolateComplexTypeAttribute( resourceID string, attributes map[string]string) (ast.Variable, error) { - // We can now distinguish between lists and maps in state by the count field: // - lists (and by extension, sets) use the traditional .# notation // - maps use the newer .% notation @@ -722,7 +789,8 @@ func (i *Interpolater) resourceCountMax( // If we're NOT applying, then we assume we can read the count // from the state. Plan and so on may not have any state yet so // we do a full interpolation. - if i.Operation != walkApply { + // Don't forget walkDestroy, which is a special case of walkApply + if !(i.Operation == walkApply || i.Operation == walkDestroy) { if cr == nil { return 0, nil } @@ -753,7 +821,13 @@ func (i *Interpolater) resourceCountMax( // use "cr.Count()" but that doesn't work if the count is interpolated // and we can't guarantee that so we instead depend on the state. max := -1 - for k, _ := range ms.Resources { + for k, s := range ms.Resources { + // This resource may have been just removed, in which case the Primary + // may be nil, or just empty. + if s == nil || s.Primary == nil || len(s.Primary.Attributes) == 0 { + continue + } + // Get the index number for this resource index := "" if k == id { diff --git a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go index b9f44a0..4594cb6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go +++ b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go @@ -17,7 +17,6 @@ import ( // present in the configuration. This is guaranteed not to happen for any // configuration that has passed a call to Config.Validate(). func ModuleTreeDependencies(root *module.Tree, state *State) *moduledeps.Module { - // First we walk the configuration tree to build the overall structure // and capture the explicit/implicit/inherited provider dependencies. deps := moduleTreeConfigDependencies(root, nil) diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go index 45129b3..d5ca641 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go @@ -27,6 +27,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er concreteResource := func(a *NodeAbstractResource) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider return &NodeRefreshableDataResourceInstance{ NodeAbstractResource: a, @@ -107,7 +108,9 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { // Get the state if we have it, if not we build it rs := n.ResourceState if rs == nil { - rs = &ResourceState{} + rs = &ResourceState{ + Provider: n.ResolvedProvider, + } } // If the config isn't empty we update the state @@ -145,7 +148,7 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { &EvalWriteState{ Name: stateId, ResourceType: rs.Type, - Provider: rs.Provider, + Provider: n.ResolvedProvider, Dependencies: rs.Dependencies, State: &state, // state is nil here }, @@ -185,7 +188,7 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { // provider configurations that need this data during // refresh/plan. &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, @@ -207,7 +210,7 @@ func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { &EvalWriteState{ Name: stateId, ResourceType: rs.Type, - Provider: rs.Provider, + Provider: n.ResolvedProvider, Dependencies: rs.Dependencies, State: &state, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_local.go b/vendor/github.com/hashicorp/terraform/terraform/node_local.go new file mode 100644 index 0000000..d387222 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_local.go @@ -0,0 +1,66 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/config" +) + +// NodeLocal represents a named local value in a particular module. +// +// Local value nodes only have one operation, common to all walk types: +// evaluate the result and place it in state. +type NodeLocal struct { + PathValue []string + Config *config.Local +} + +func (n *NodeLocal) Name() string { + result := fmt.Sprintf("local.%s", n.Config.Name) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeLocal) Path() []string { + return n.PathValue +} + +// RemovableIfNotTargeted +func (n *NodeLocal) RemoveIfNotTargeted() bool { + return true +} + +// GraphNodeReferenceable +func (n *NodeLocal) ReferenceableName() []string { + name := fmt.Sprintf("local.%s", n.Config.Name) + return []string{name} +} + +// GraphNodeReferencer +func (n *NodeLocal) References() []string { + var result []string + result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) + for _, v := range result { + split := strings.Split(v, "/") + for i, s := range split { + split[i] = s + ".destroy" + } + + result = append(result, strings.Join(split, "/")) + } + + return result +} + +// GraphNodeEvalable +func (n *NodeLocal) EvalTree() EvalNode { + return &EvalLocal{ + Name: n.Config.Name, + Value: n.Config.RawConfig, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go deleted file mode 100644 index 319df1e..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go +++ /dev/null @@ -1,29 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// NodeDestroyableModule represents a module destruction. -type NodeDestroyableModuleVariable struct { - PathValue []string -} - -func (n *NodeDestroyableModuleVariable) Name() string { - result := "plan-destroy" - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } - - return result -} - -// GraphNodeSubPath -func (n *NodeDestroyableModuleVariable) Path() []string { - return n.PathValue -} - -// GraphNodeEvalable -func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode { - return &EvalDiffDestroyModule{Path: n.PathValue} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go new file mode 100644 index 0000000..bb3e5ee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go @@ -0,0 +1,77 @@ +package terraform + +import ( + "fmt" + "log" + "reflect" +) + +// NodeModuleRemoved represents a module that is no longer in the +// config. +type NodeModuleRemoved struct { + PathValue []string +} + +func (n *NodeModuleRemoved) Name() string { + return fmt.Sprintf("%s (removed)", modulePrefixStr(n.PathValue)) +} + +// GraphNodeSubPath +func (n *NodeModuleRemoved) Path() []string { + return n.PathValue +} + +// GraphNodeEvalable +func (n *NodeModuleRemoved) EvalTree() EvalNode { + return &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, + Node: &EvalDeleteModule{ + PathValue: n.PathValue, + }, + } +} + +func (n *NodeModuleRemoved) ReferenceGlobal() bool { + return true +} + +func (n *NodeModuleRemoved) References() []string { + return []string{modulePrefixStr(n.PathValue)} +} + +// EvalDeleteModule is an EvalNode implementation that removes an empty module +// entry from the state. +type EvalDeleteModule struct { + PathValue []string +} + +func (n *EvalDeleteModule) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + if state == nil { + return nil, nil + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Make sure we have a clean state + // Destroyed resources aren't deleted, they're written with an ID of "". + state.prune() + + // find the module and delete it + for i, m := range state.Modules { + if reflect.DeepEqual(m.Path, n.PathValue) { + if !m.Empty() { + // a targeted apply may leave module resources even without a config, + // so just log this and return. + log.Printf("[DEBUG] cannot remove module %s, not empty", modulePrefixStr(n.PathValue)) + break + } + state.Modules = append(state.Modules[:i], state.Modules[i+1:]...) + break + } + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go index 13fe8fc..66ff7d5 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go @@ -92,11 +92,24 @@ func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { // within the variables mapping. var config *ResourceConfig variables := make(map[string]interface{}) + return &EvalSequence{ Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.Value, - Output: &config, + &EvalOpFilter{ + Ops: []walkOperation{walkInput}, + Node: &EvalInterpolate{ + Config: n.Value, + Output: &config, + ContinueOnErr: true, + }, + }, + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, + walkDestroy, walkValidate}, + Node: &EvalInterpolate{ + Config: n.Value, + Output: &config, + }, }, &EvalVariableBlock{ diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go index 9017a63..83e9925 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go @@ -69,12 +69,22 @@ func (n *NodeApplyableOutput) References() []string { // GraphNodeEvalable func (n *NodeApplyableOutput) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, - walkDestroy, walkInput, walkValidate}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalWriteOutput{ + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalOpFilter{ + // Don't let interpolation errors stop Input, since it happens + // before Refresh. + Ops: []walkOperation{walkInput}, + Node: &EvalWriteOutput{ + Name: n.Config.Name, + Sensitive: n.Config.Sensitive, + Value: n.Config.RawConfig, + ContinueOnErr: true, + }, + }, + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, + Node: &EvalWriteOutput{ Name: n.Config.Name, Sensitive: n.Config.Sensitive, Value: n.Config.RawConfig, @@ -83,3 +93,61 @@ func (n *NodeApplyableOutput) EvalTree() EvalNode { }, } } + +// NodeDestroyableOutput represents an output that is "destroybale": +// its application will remove the output from the state. +type NodeDestroyableOutput struct { + PathValue []string + Config *config.Output // Config is the output in the config +} + +func (n *NodeDestroyableOutput) Name() string { + result := fmt.Sprintf("output.%s (destroy)", n.Config.Name) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeDestroyableOutput) Path() []string { + return n.PathValue +} + +// RemovableIfNotTargeted +func (n *NodeDestroyableOutput) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// This will keep the destroy node in the graph if its corresponding output +// node is also in the destroy graph. +func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { + return true +} + +// GraphNodeReferencer +func (n *NodeDestroyableOutput) References() []string { + var result []string + result = append(result, n.Config.DependsOn...) + result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) + for _, v := range result { + split := strings.Split(v, "/") + for i, s := range split { + split[i] = s + ".destroy" + } + + result = append(result, strings.Join(split, "/")) + } + + return result +} + +// GraphNodeEvalable +func (n *NodeDestroyableOutput) EvalTree() EvalNode { + return &EvalDeleteOutput{ + Name: n.Config.Name, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go index 636a15d..0fd1554 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go @@ -19,6 +19,11 @@ func (n *NodeOutputOrphan) Name() string { return result } +// GraphNodeReferenceable +func (n *NodeOutputOrphan) ReferenceableName() []string { + return []string{"output." + n.OutputName} +} + // GraphNodeSubPath func (n *NodeOutputOrphan) Path() []string { return n.PathValue diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go index 8e2c176..2071ab1 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go @@ -7,5 +7,5 @@ type NodeApplyableProvider struct { // GraphNodeEvalable func (n *NodeApplyableProvider) EvalTree() EvalNode { - return ProviderEvalTree(n.NameValue, n.ProviderConfig()) + return ProviderEvalTree(n, n.ProviderConfig()) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go index 6cc8365..9e490f7 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go @@ -2,6 +2,7 @@ package terraform import ( "fmt" + "strings" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/dag" @@ -24,13 +25,22 @@ type NodeAbstractProvider struct { Config *config.ProviderConfig } -func (n *NodeAbstractProvider) Name() string { - result := fmt.Sprintf("provider.%s", n.NameValue) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) +func ResolveProviderName(name string, path []string) string { + if strings.Contains(name, "provider.") { + // already resolved + return name + } + + name = fmt.Sprintf("provider.%s", name) + if len(path) >= 1 { + name = fmt.Sprintf("%s.%s", modulePrefixStr(path), name) } - return result + return name +} + +func (n *NodeAbstractProvider) Name() string { + return ResolveProviderName(n.NameValue, n.PathValue) } // GraphNodeSubPath @@ -60,12 +70,12 @@ func (n *NodeAbstractProvider) ProviderName() string { } // GraphNodeProvider -func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig { +func (n *NodeAbstractProvider) ProviderConfig() *config.ProviderConfig { if n.Config == nil { return nil } - return n.Config.RawConfig + return n.Config } // GraphNodeAttachProvider diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go index 25e7e62..a00bc46 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go @@ -20,7 +20,7 @@ func (n *NodeDisabledProvider) EvalTree() EvalNode { var resourceConfig *ResourceConfig return &EvalSequence{ Nodes: []EvalNode{ - &EvalInterpolate{ + &EvalInterpolateProvider{ Config: n.ProviderConfig(), Output: &resourceConfig, }, @@ -29,10 +29,6 @@ func (n *NodeDisabledProvider) EvalTree() EvalNode { Config: &resourceConfig, Output: &resourceConfig, }, - &EvalSetProviderConfig{ - Provider: n.ProviderName(), - Config: &resourceConfig, - }, }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go index 50bb707..73509c8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go @@ -33,6 +33,9 @@ type NodeAbstractResource struct { ResourceState *ResourceState // ResourceState is the ResourceState for this Targets []ResourceAddress // Set from GraphNodeTargetable + + // The address of the provider this resource will use + ResolvedProvider string } func (n *NodeAbstractResource) Name() string { @@ -170,20 +173,24 @@ func (n *NodeAbstractResource) StateReferences() []string { return deps } +func (n *NodeAbstractResource) SetProvider(p string) { + n.ResolvedProvider = p +} + // GraphNodeProviderConsumer -func (n *NodeAbstractResource) ProvidedBy() []string { +func (n *NodeAbstractResource) ProvidedBy() string { // If we have a config we prefer that above all else if n.Config != nil { - return []string{resourceProvider(n.Config.Type, n.Config.Provider)} + return resourceProvider(n.Config.Type, n.Config.Provider) } // If we have state, then we will use the provider from there if n.ResourceState != nil && n.ResourceState.Provider != "" { - return []string{n.ResourceState.Provider} + return n.ResourceState.Provider } // Use our type - return []string{resourceProvider(n.Addr.Type, "")} + return resourceProvider(n.Addr.Type, "") } // GraphNodeProvisionerConsumer diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go index 3599782..40ee1cf 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go @@ -124,6 +124,27 @@ func (n *NodeApplyableResource) evalTreeDataResource( Then: EvalNoop{}, }, + // Normally we interpolate count as a preparation step before + // a DynamicExpand, but an apply graph has pre-expanded nodes + // and so the count would otherwise never be interpolated. + // + // This is redundant when there are multiple instances created + // from the same config (count > 1) but harmless since the + // underlying structures have mutexes to make this concurrency-safe. + // + // In most cases this isn't actually needed because we dealt with + // all of the counts during the plan walk, but we do it here + // for completeness because other code assumes that the + // final count is always available during interpolation. + // + // Here we are just populating the interpolated value in-place + // inside this RawConfig object, like we would in + // NodeAbstractCountResource. + &EvalInterpolate{ + Config: n.Config.RawCount, + ContinueOnErr: true, + }, + // We need to re-interpolate the config here, rather than // just using the diff's values directly, because we've // potentially learned more variable values during the @@ -135,7 +156,7 @@ func (n *NodeApplyableResource) evalTreeDataResource( }, &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, @@ -158,7 +179,7 @@ func (n *NodeApplyableResource) evalTreeDataResource( &EvalWriteState{ Name: stateId, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, @@ -236,13 +257,35 @@ func (n *NodeApplyableResource) evalTreeManagedResource( }, }, + // Normally we interpolate count as a preparation step before + // a DynamicExpand, but an apply graph has pre-expanded nodes + // and so the count would otherwise never be interpolated. + // + // This is redundant when there are multiple instances created + // from the same config (count > 1) but harmless since the + // underlying structures have mutexes to make this concurrency-safe. + // + // In most cases this isn't actually needed because we dealt with + // all of the counts during the plan walk, but we need to do this + // in order to support interpolation of resource counts from + // apply-time-interpolated expressions, such as those in + // "provisioner" blocks. + // + // Here we are just populating the interpolated value in-place + // inside this RawConfig object, like we would in + // NodeAbstractCountResource. + &EvalInterpolate{ + Config: n.Config.RawCount, + ContinueOnErr: true, + }, + &EvalInterpolate{ Config: n.Config.RawConfig.Copy(), Resource: resource, Output: &resourceConfig, }, &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, &EvalReadState{ @@ -283,7 +326,7 @@ func (n *NodeApplyableResource) evalTreeManagedResource( }, &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, &EvalReadState{ @@ -308,7 +351,7 @@ func (n *NodeApplyableResource) evalTreeManagedResource( &EvalWriteState{ Name: stateId, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, @@ -332,7 +375,7 @@ func (n *NodeApplyableResource) evalTreeManagedResource( Else: &EvalWriteState{ Name: stateId, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go index c2efd2c..657bbee 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go @@ -102,8 +102,9 @@ func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) { // We want deposed resources in the state to be destroyed steps = append(steps, &DeposedTransformer{ - State: state, - View: n.Addr.stateId(), + State: state, + View: n.Addr.stateId(), + ResolvedProvider: n.ResolvedProvider, }) // Target @@ -148,7 +149,9 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { // Get our state rs := n.ResourceState if rs == nil { - rs = &ResourceState{} + rs = &ResourceState{ + Provider: n.ResolvedProvider, + } } var diffApply *InstanceDiff @@ -188,7 +191,7 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { &EvalInstanceInfo{Info: info}, &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, &EvalReadState{ @@ -272,7 +275,7 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { &EvalWriteState{ Name: stateId, ResourceType: n.Addr.Type, - Provider: rs.Provider, + Provider: n.ResolvedProvider, Dependencies: rs.Dependencies, State: &state, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go index 52bbf88..1afae7a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go @@ -27,6 +27,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { concreteResource := func(a *NodeAbstractResource) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider return &NodePlannableResourceInstance{ NodeAbstractResource: a, @@ -37,6 +38,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider return &NodePlannableResourceOrphan{ NodeAbstractResource: a, diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go index b529569..7d9fcdd 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go @@ -97,7 +97,7 @@ func (n *NodePlannableResourceInstance) evalTreeDataResource( }, &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, @@ -112,7 +112,7 @@ func (n *NodePlannableResourceInstance) evalTreeDataResource( &EvalWriteState{ Name: stateId, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, @@ -143,7 +143,7 @@ func (n *NodePlannableResourceInstance) evalTreeManagedResource( Output: &resourceConfig, }, &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, // Re-run validation to catch any errors we missed, e.g. type @@ -177,7 +177,7 @@ func (n *NodePlannableResourceInstance) evalTreeManagedResource( &EvalWriteState{ Name: stateId, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go index cd4fe92..697bd49 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go @@ -30,6 +30,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, concreteResource := func(a *NodeAbstractResource) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider return &NodeRefreshableManagedResourceInstance{ NodeAbstractResource: a, @@ -149,7 +150,7 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN return &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, &EvalReadState{ @@ -165,7 +166,7 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN &EvalWriteState{ Name: stateId, ResourceType: n.ResourceState.Type, - Provider: n.ResourceState.Provider, + Provider: n.ResolvedProvider, Dependencies: n.ResourceState.Dependencies, State: &state, }, @@ -212,15 +213,21 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState( // Determine the dependencies for the state. stateDeps := n.StateReferences() + // n.Config can be nil if the config and state don't match + var raw *config.RawConfig + if n.Config != nil { + raw = n.Config.RawConfig.Copy() + } + return &EvalSequence{ Nodes: []EvalNode{ &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), + Config: raw, Resource: resource, Output: &resourceConfig, }, &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, // Re-run validation to catch any errors we missed, e.g. type @@ -250,7 +257,7 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState( &EvalWriteState{ Name: stateID, ResourceType: n.Config.Type, - Provider: n.Config.Provider, + Provider: n.ResolvedProvider, Dependencies: stateDeps, State: &state, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go index f528f24..0df223d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go @@ -39,6 +39,7 @@ func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) concreteResource := func(a *NodeAbstractResource) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider return &NodeValidatableResourceInstance{ NodeAbstractResource: a, @@ -108,7 +109,7 @@ func (n *NodeValidatableResourceInstance) EvalTree() EvalNode { Config: &n.Config.RawConfig, }, &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, &EvalInterpolate{ diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go index ca99685..51dd412 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/path.go +++ b/vendor/github.com/hashicorp/terraform/terraform/path.go @@ -1,24 +1,10 @@ package terraform import ( - "crypto/md5" - "encoding/hex" + "strings" ) // PathCacheKey returns a cache key for a module path. -// -// TODO: test func PathCacheKey(path []string) string { - // There is probably a better way to do this, but this is working for now. - // We just create an MD5 hash of all the MD5 hashes of all the path - // elements. This gets us the property that it is unique per ordering. - hash := md5.New() - for _, p := range path { - single := md5.Sum([]byte(p)) - if _, err := hash.Write(single[:]); err != nil { - panic(err) - } - } - - return hex.EncodeToString(hash.Sum(nil)) + return strings.Join(path, "|") } diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go index 51d6652..30db195 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go @@ -10,6 +10,7 @@ import ( "sync" "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/version" ) func init() { @@ -26,18 +27,54 @@ func init() { // necessary to make a change: the state, diff, config, backend config, etc. // This is so that it can run alone without any other data. type Plan struct { - Diff *Diff - Module *module.Tree - State *State - Vars map[string]interface{} + // Diff describes the resource actions that must be taken when this + // plan is applied. + Diff *Diff + + // Module represents the entire configuration that was present when this + // plan was created. + Module *module.Tree + + // State is the Terraform state that was current when this plan was + // created. + // + // It is not allowed to apply a plan that has a stale state, since its + // diff could be outdated. + State *State + + // Vars retains the variables that were set when creating the plan, so + // that the same variables can be applied during apply. + Vars map[string]interface{} + + // Targets, if non-empty, contains a set of resource address strings that + // identify graph nodes that were selected as targets for plan. + // + // When targets are set, any graph node that is not directly targeted or + // indirectly targeted via dependencies is excluded from the graph. Targets []string + // TerraformVersion is the version of Terraform that was used to create + // this plan. + // + // It is not allowed to apply a plan created with a different version of + // Terraform, since the other fields of this structure may be interpreted + // in different ways between versions. TerraformVersion string - ProviderSHA256s map[string][]byte + + // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries + // used as plugins for each provider during plan. + // + // These must match between plan and apply to ensure that the diff is + // correctly interpreted, since different provider versions may have + // different attributes or attribute value constraints. + ProviderSHA256s map[string][]byte // Backend is the backend that this plan should use and store data with. Backend *BackendState + // Destroy indicates that this plan was created for a full destroy operation + Destroy bool + once sync.Once } @@ -67,6 +104,7 @@ func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) { opts.Module = p.Module opts.Targets = p.Targets opts.ProviderSHA256s = p.ProviderSHA256s + opts.Destroy = p.Destroy if opts.State == nil { opts.State = p.State @@ -79,10 +117,10 @@ func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) { // the state, there is little chance that these aren't actually equal. // Log the error condition for reference, but continue with the state // we have. - log.Println("[WARNING] Plan state and ContextOpts state are not equal") + log.Println("[WARN] Plan state and ContextOpts state are not equal") } - thisVersion := VersionString() + thisVersion := version.String() if p.TerraformVersion != "" && p.TerraformVersion != thisVersion { return nil, fmt.Errorf( "plan was created with a different version of Terraform (created with %s, but running %s)", diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go index 0acf0be..2f5ebb5 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go @@ -88,6 +88,46 @@ func (i *InstanceInfo) HumanId() string { i.Id) } +// ResourceAddress returns the address of the resource that the receiver is describing. +func (i *InstanceInfo) ResourceAddress() *ResourceAddress { + // GROSS: for tainted and deposed instances, their status gets appended + // to i.Id to create a unique id for the graph node. Historically these + // ids were displayed to the user, so it's designed to be human-readable: + // "aws_instance.bar.0 (deposed #0)" + // + // So here we detect such suffixes and try to interpret them back to + // their original meaning so we can then produce a ResourceAddress + // with a suitable InstanceType. + id := i.Id + instanceType := TypeInvalid + if idx := strings.Index(id, " ("); idx != -1 { + remain := id[idx:] + id = id[:idx] + + switch { + case strings.Contains(remain, "tainted"): + instanceType = TypeTainted + case strings.Contains(remain, "deposed"): + instanceType = TypeDeposed + } + } + + addr, err := parseResourceAddressInternal(id) + if err != nil { + // should never happen, since that would indicate a bug in the + // code that constructed this InstanceInfo. + panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) + } + if len(i.ModulePath) > 1 { + addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied + } + if instanceType != TypeInvalid { + addr.InstanceTypeSet = true + addr.InstanceType = instanceType + } + return addr +} + func (i *InstanceInfo) uniqueId() string { prefix := i.HumanId() if v := i.uniqueExtra; v != "" { @@ -306,7 +346,7 @@ func (c *ResourceConfig) get( if err != nil { return nil, false } - if i >= int64(cv.Len()) { + if int(i) < 0 || int(i) >= cv.Len() { return nil, false } current = cv.Index(int(i)).Interface() diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go index 8badca8..a64f5d8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go @@ -42,9 +42,9 @@ func (r *ResourceAddress) Copy() *ResourceAddress { Type: r.Type, Mode: r.Mode, } - for _, p := range r.Path { - n.Path = append(n.Path, p) - } + + n.Path = append(n.Path, r.Path...) + return n } @@ -362,40 +362,41 @@ func (addr *ResourceAddress) Less(other *ResourceAddress) bool { switch { - case len(addr.Path) < len(other.Path): - return true + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) case !reflect.DeepEqual(addr.Path, other.Path): // If the two paths are the same length but don't match, we'll just // cheat and compare the string forms since it's easier than - // comparing all of the path segments in turn. + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. addrStr := addr.String() otherStr := other.String() return addrStr < otherStr - case addr.Mode == config.DataResourceMode && other.Mode != config.DataResourceMode: - return true + case addr.Mode != other.Mode: + return addr.Mode == config.DataResourceMode - case addr.Type < other.Type: - return true + case addr.Type != other.Type: + return addr.Type < other.Type - case addr.Name < other.Name: - return true + case addr.Name != other.Name: + return addr.Name < other.Name - case addr.Index < other.Index: + case addr.Index != other.Index: // Since "Index" is -1 for an un-indexed address, this also conveniently // sorts unindexed addresses before indexed ones, should they both // appear for some reason. - return true + return addr.Index < other.Index - case other.InstanceTypeSet && !addr.InstanceTypeSet: - return true + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet - case addr.InstanceType < other.InstanceType: + case addr.InstanceType != other.InstanceType: // InstanceType is actually an enum, so this is just an arbitrary // sort based on the enum numeric values, and thus not particularly // meaningful. - return true + return addr.InstanceType < other.InstanceType default: return false diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go index 7d78f67..93fd14f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go @@ -21,6 +21,15 @@ type ResourceProvider interface { * Functions related to the provider *********************************************************************/ + // ProviderSchema returns the config schema for the main provider + // configuration, as would appear in a "provider" block in the + // configuration files. + // + // Currently not all providers support schema. Callers must therefore + // first call Resources and DataSources and ensure that at least one + // resource or data source has the SchemaAvailable flag set. + GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) + // Input is called to ask the provider to ask the user for input // for completing the configuration if necesarry. // @@ -183,11 +192,25 @@ type ResourceProviderCloser interface { type ResourceType struct { Name string // Name of the resource, example "instance" (no provider prefix) Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool } // DataSource is a data source that a resource provider implements. type DataSource struct { Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool } // ResourceProviderResolver is an interface implemented by objects that are diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go index f531533..4000e3d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go @@ -1,6 +1,8 @@ package terraform -import "sync" +import ( + "sync" +) // MockResourceProvider implements ResourceProvider but mocks out all the // calls for testing purposes. @@ -12,6 +14,10 @@ type MockResourceProvider struct { CloseCalled bool CloseError error + GetSchemaCalled bool + GetSchemaRequest *ProviderSchemaRequest + GetSchemaReturn *ProviderSchema + GetSchemaReturnError error InputCalled bool InputInput UIInput InputConfig *ResourceConfig @@ -92,8 +98,19 @@ func (p *MockResourceProvider) Close() error { return p.CloseError } +func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + p.GetSchemaRequest = req + return p.GetSchemaReturn, p.GetSchemaReturnError +} + func (p *MockResourceProvider) Input( input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + p.Lock() + defer p.Unlock() p.InputCalled = true p.InputInput = input p.InputConfig = c @@ -186,6 +203,7 @@ func (p *MockResourceProvider) Diff( p.DiffInfo = info p.DiffState = state p.DiffDesired = desired + if p.DiffFn != nil { return p.DiffFn(info, state, desired) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/schemas.go b/vendor/github.com/hashicorp/terraform/terraform/schemas.go new file mode 100644 index 0000000..ec46efc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/schemas.go @@ -0,0 +1,34 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/configschema" +) + +type Schemas struct { + Providers ProviderSchemas +} + +// ProviderSchemas is a map from provider names to provider schemas. +// +// The names in this map are the direct plugin name (e.g. "aws") rather than +// any alias name (e.g. "aws.foo"), since. +type ProviderSchemas map[string]*ProviderSchema + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go deleted file mode 100644 index 4632559..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow.go +++ /dev/null @@ -1,28 +0,0 @@ -package terraform - -// Shadow is the interface that any "shadow" structures must implement. -// -// A shadow structure is an interface implementation (typically) that -// shadows a real implementation and verifies that the same behavior occurs -// on both. The semantics of this behavior are up to the interface itself. -// -// A shadow NEVER modifies real values or state. It must always be safe to use. -// -// For example, a ResourceProvider shadow ensures that the same operations -// are done on the same resources with the same configurations. -// -// The typical usage of a shadow following this interface is to complete -// the real operations, then call CloseShadow which tells the shadow that -// the real side is done. Then, once the shadow is also complete, call -// ShadowError to find any errors that may have been caught. -type Shadow interface { - // CloseShadow tells the shadow that the REAL implementation is - // complete. Therefore, any calls that would block should now return - // immediately since no more changes will happen to the real side. - CloseShadow() error - - // ShadowError returns the errors that the shadow has found. - // This should be called AFTER CloseShadow and AFTER the shadow is - // known to be complete (no more calls to it). - ShadowError() error -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go deleted file mode 100644 index 116cf84..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go +++ /dev/null @@ -1,273 +0,0 @@ -package terraform - -import ( - "fmt" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/shadow" -) - -// newShadowComponentFactory creates a shadowed contextComponentFactory -// so that requests to create new components result in both a real and -// shadow side. -func newShadowComponentFactory( - f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) { - // Create the shared data - shared := &shadowComponentFactoryShared{contextComponentFactory: f} - - // Create the real side - real := &shadowComponentFactory{ - shadowComponentFactoryShared: shared, - } - - // Create the shadow - shadow := &shadowComponentFactory{ - shadowComponentFactoryShared: shared, - Shadow: true, - } - - return real, shadow -} - -// shadowComponentFactory is the shadow side. Any components created -// with this factory are fake and will not cause real work to happen. -// -// Unlike other shadowers, the shadow component factory will allow the -// shadow to create _any_ component even if it is never requested on the -// real side. This is because errors will happen later downstream as function -// calls are made to the shadows that are never matched on the real side. -type shadowComponentFactory struct { - *shadowComponentFactoryShared - - Shadow bool // True if this should return the shadow - lock sync.Mutex -} - -func (f *shadowComponentFactory) ResourceProvider( - n, uid string) (ResourceProvider, error) { - f.lock.Lock() - defer f.lock.Unlock() - - real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid) - var result ResourceProvider = real - if f.Shadow { - result = shadow - } - - return result, err -} - -func (f *shadowComponentFactory) ResourceProvisioner( - n, uid string) (ResourceProvisioner, error) { - f.lock.Lock() - defer f.lock.Unlock() - - real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid) - var result ResourceProvisioner = real - if f.Shadow { - result = shadow - } - - return result, err -} - -// CloseShadow is called when the _real_ side is complete. This will cause -// all future blocking operations to return immediately on the shadow to -// ensure the shadow also completes. -func (f *shadowComponentFactory) CloseShadow() error { - // If we aren't the shadow, just return - if !f.Shadow { - return nil - } - - // Lock ourselves so we don't modify state - f.lock.Lock() - defer f.lock.Unlock() - - // Grab our shared state - shared := f.shadowComponentFactoryShared - - // If we're already closed, its an error - if shared.closed { - return fmt.Errorf("component factory shadow already closed") - } - - // Close all the providers and provisioners and return the error - var result error - for _, n := range shared.providerKeys { - _, shadow, err := shared.ResourceProvider(n, n) - if err == nil && shadow != nil { - if err := shadow.CloseShadow(); err != nil { - result = multierror.Append(result, err) - } - } - } - - for _, n := range shared.provisionerKeys { - _, shadow, err := shared.ResourceProvisioner(n, n) - if err == nil && shadow != nil { - if err := shadow.CloseShadow(); err != nil { - result = multierror.Append(result, err) - } - } - } - - // Mark ourselves as closed - shared.closed = true - - return result -} - -func (f *shadowComponentFactory) ShadowError() error { - // If we aren't the shadow, just return - if !f.Shadow { - return nil - } - - // Lock ourselves so we don't modify state - f.lock.Lock() - defer f.lock.Unlock() - - // Grab our shared state - shared := f.shadowComponentFactoryShared - - // If we're not closed, its an error - if !shared.closed { - return fmt.Errorf("component factory must be closed to retrieve errors") - } - - // Close all the providers and provisioners and return the error - var result error - for _, n := range shared.providerKeys { - _, shadow, err := shared.ResourceProvider(n, n) - if err == nil && shadow != nil { - if err := shadow.ShadowError(); err != nil { - result = multierror.Append(result, err) - } - } - } - - for _, n := range shared.provisionerKeys { - _, shadow, err := shared.ResourceProvisioner(n, n) - if err == nil && shadow != nil { - if err := shadow.ShadowError(); err != nil { - result = multierror.Append(result, err) - } - } - } - - return result -} - -// shadowComponentFactoryShared is shared data between the two factories. -// -// It is NOT SAFE to run any function on this struct in parallel. Lock -// access to this struct. -type shadowComponentFactoryShared struct { - contextComponentFactory - - closed bool - providers shadow.KeyedValue - providerKeys []string - provisioners shadow.KeyedValue - provisionerKeys []string -} - -// shadowResourceProviderFactoryEntry is the entry that is stored in -// the Shadows key/value for a provider. -type shadowComponentFactoryProviderEntry struct { - Real ResourceProvider - Shadow shadowResourceProvider - Err error -} - -type shadowComponentFactoryProvisionerEntry struct { - Real ResourceProvisioner - Shadow shadowResourceProvisioner - Err error -} - -func (f *shadowComponentFactoryShared) ResourceProvider( - n, uid string) (ResourceProvider, shadowResourceProvider, error) { - // Determine if we already have a value - raw, ok := f.providers.ValueOk(uid) - if !ok { - // Build the entry - var entry shadowComponentFactoryProviderEntry - - // No value, initialize. Create the original - p, err := f.contextComponentFactory.ResourceProvider(n, uid) - if err != nil { - entry.Err = err - p = nil // Just to be sure - } - - if p != nil { - // Create the shadow - real, shadow := newShadowResourceProvider(p) - entry.Real = real - entry.Shadow = shadow - - if f.closed { - shadow.CloseShadow() - } - } - - // Store the value - f.providers.SetValue(uid, &entry) - f.providerKeys = append(f.providerKeys, uid) - raw = &entry - } - - // Read the entry - entry, ok := raw.(*shadowComponentFactoryProviderEntry) - if !ok { - return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw) - } - - // Return - return entry.Real, entry.Shadow, entry.Err -} - -func (f *shadowComponentFactoryShared) ResourceProvisioner( - n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) { - // Determine if we already have a value - raw, ok := f.provisioners.ValueOk(uid) - if !ok { - // Build the entry - var entry shadowComponentFactoryProvisionerEntry - - // No value, initialize. Create the original - p, err := f.contextComponentFactory.ResourceProvisioner(n, uid) - if err != nil { - entry.Err = err - p = nil // Just to be sure - } - - if p != nil { - // For now, just create a mock since we don't support provisioners yet - real, shadow := newShadowResourceProvisioner(p) - entry.Real = real - entry.Shadow = shadow - - if f.closed { - shadow.CloseShadow() - } - } - - // Store the value - f.provisioners.SetValue(uid, &entry) - f.provisionerKeys = append(f.provisionerKeys, uid) - raw = &entry - } - - // Read the entry - entry, ok := raw.(*shadowComponentFactoryProvisionerEntry) - if !ok { - return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw) - } - - // Return - return entry.Real, entry.Shadow, entry.Err -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go deleted file mode 100644 index 5588af2..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go +++ /dev/null @@ -1,158 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/mitchellh/copystructure" -) - -// newShadowContext creates a new context that will shadow the given context -// when walking the graph. The resulting context should be used _only once_ -// for a graph walk. -// -// The returned Shadow should be closed after the graph walk with the -// real context is complete. Errors from the shadow can be retrieved there. -// -// Most importantly, any operations done on the shadow context (the returned -// context) will NEVER affect the real context. All structures are deep -// copied, no real providers or resources are used, etc. -func newShadowContext(c *Context) (*Context, *Context, Shadow) { - // Copy the targets - targetRaw, err := copystructure.Copy(c.targets) - if err != nil { - panic(err) - } - - // Copy the variables - varRaw, err := copystructure.Copy(c.variables) - if err != nil { - panic(err) - } - - // Copy the provider inputs - providerInputRaw, err := copystructure.Copy(c.providerInputConfig) - if err != nil { - panic(err) - } - - // The factories - componentsReal, componentsShadow := newShadowComponentFactory(c.components) - - // Create the shadow - shadow := &Context{ - components: componentsShadow, - destroy: c.destroy, - diff: c.diff.DeepCopy(), - hooks: nil, - meta: c.meta, - module: c.module, - state: c.state.DeepCopy(), - targets: targetRaw.([]string), - variables: varRaw.(map[string]interface{}), - - // NOTE(mitchellh): This is not going to work for shadows that are - // testing that input results in the proper end state. At the time - // of writing, input is not used in any state-changing graph - // walks anyways, so this checks nothing. We set it to this to avoid - // any panics but even a "nil" value worked here. - uiInput: new(MockUIInput), - - // Hardcoded to 4 since parallelism in the shadow doesn't matter - // a ton since we're doing far less compared to the real side - // and our operations are MUCH faster. - parallelSem: NewSemaphore(4), - providerInputConfig: providerInputRaw.(map[string]map[string]interface{}), - } - - // Create the real context. This is effectively just a copy of - // the context given except we need to modify some of the values - // to point to the real side of a shadow so the shadow can compare values. - real := &Context{ - // The fields below are changed. - components: componentsReal, - - // The fields below are direct copies - destroy: c.destroy, - diff: c.diff, - // diffLock - no copy - hooks: c.hooks, - meta: c.meta, - module: c.module, - sh: c.sh, - state: c.state, - // stateLock - no copy - targets: c.targets, - uiInput: c.uiInput, - variables: c.variables, - - // l - no copy - parallelSem: c.parallelSem, - providerInputConfig: c.providerInputConfig, - runContext: c.runContext, - runContextCancel: c.runContextCancel, - shadowErr: c.shadowErr, - } - - return real, shadow, &shadowContextCloser{ - Components: componentsShadow, - } -} - -// shadowContextVerify takes the real and shadow context and verifies they -// have equal diffs and states. -func shadowContextVerify(real, shadow *Context) error { - var result error - - // The states compared must be pruned so they're minimal/clean - real.state.prune() - shadow.state.prune() - - // Compare the states - if !real.state.Equal(shadow.state) { - result = multierror.Append(result, fmt.Errorf( - "Real and shadow states do not match! "+ - "Real state:\n\n%s\n\n"+ - "Shadow state:\n\n%s\n\n", - real.state, shadow.state)) - } - - // Compare the diffs - if !real.diff.Equal(shadow.diff) { - result = multierror.Append(result, fmt.Errorf( - "Real and shadow diffs do not match! "+ - "Real diff:\n\n%s\n\n"+ - "Shadow diff:\n\n%s\n\n", - real.diff, shadow.diff)) - } - - return result -} - -// shadowContextCloser is the io.Closer returned by newShadowContext that -// closes all the shadows and returns the results. -type shadowContextCloser struct { - Components *shadowComponentFactory -} - -// Close closes the shadow context. -func (c *shadowContextCloser) CloseShadow() error { - return c.Components.CloseShadow() -} - -func (c *shadowContextCloser) ShadowError() error { - err := c.Components.ShadowError() - if err == nil { - return nil - } - - // This is a sad edge case: if the configuration contains uuid() at - // any point, we cannot reason aboyt the shadow execution. Tested - // with Context2Plan_shadowUuid. - if strings.Contains(err.Error(), "uuid()") { - err = nil - } - - return err -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go deleted file mode 100644 index 9741d7e..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go +++ /dev/null @@ -1,815 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/shadow" -) - -// shadowResourceProvider implements ResourceProvider for the shadow -// eval context defined in eval_context_shadow.go. -// -// This is used to verify behavior with a real provider. This shouldn't -// be used directly. -type shadowResourceProvider interface { - ResourceProvider - Shadow -} - -// newShadowResourceProvider creates a new shadowed ResourceProvider. -// -// This will assume a well behaved real ResourceProvider. For example, -// it assumes that the `Resources` call underneath doesn't change values -// since once it is called on the real provider, it will be cached and -// returned in the shadow since number of calls to that shouldn't affect -// actual behavior. -// -// However, with calls like Apply, call order is taken into account, -// parameters are checked for equality, etc. -func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) { - // Create the shared data - shared := shadowResourceProviderShared{} - - // Create the real provider that does actual work - real := &shadowResourceProviderReal{ - ResourceProvider: p, - Shared: &shared, - } - - // Create the shadow that watches the real value - shadow := &shadowResourceProviderShadow{ - Shared: &shared, - - resources: p.Resources(), - dataSources: p.DataSources(), - } - - return real, shadow -} - -// shadowResourceProviderReal is the real resource provider. Function calls -// to this will perform real work. This records the parameters and return -// values and call order for the shadow to reproduce. -type shadowResourceProviderReal struct { - ResourceProvider - - Shared *shadowResourceProviderShared -} - -func (p *shadowResourceProviderReal) Close() error { - var result error - if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok { - result = c.Close() - } - - p.Shared.CloseErr.SetValue(result) - return result -} - -func (p *shadowResourceProviderReal) Input( - input UIInput, c *ResourceConfig) (*ResourceConfig, error) { - cCopy := c.DeepCopy() - - result, err := p.ResourceProvider.Input(input, c) - p.Shared.Input.SetValue(&shadowResourceProviderInput{ - Config: cCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) { - warns, errs := p.ResourceProvider.Validate(c) - p.Shared.Validate.SetValue(&shadowResourceProviderValidate{ - Config: c.DeepCopy(), - ResultWarn: warns, - ResultErr: errs, - }) - - return warns, errs -} - -func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error { - cCopy := c.DeepCopy() - - err := p.ResourceProvider.Configure(c) - p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{ - Config: cCopy, - Result: err, - }) - - return err -} - -func (p *shadowResourceProviderReal) Stop() error { - return p.ResourceProvider.Stop() -} - -func (p *shadowResourceProviderReal) ValidateResource( - t string, c *ResourceConfig) ([]string, []error) { - key := t - configCopy := c.DeepCopy() - - // Real operation - warns, errs := p.ResourceProvider.ValidateResource(t, c) - - // Initialize to ensure we always have a wrapper with a lock - p.Shared.ValidateResource.Init( - key, &shadowResourceProviderValidateResourceWrapper{}) - - // Get the result - raw := p.Shared.ValidateResource.Value(key) - wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper) - if !ok { - // If this fails then we just continue with our day... the shadow - // will fail to but there isn't much we can do. - log.Printf( - "[ERROR] unknown value in ValidateResource shadow value: %#v", raw) - return warns, errs - } - - // Lock the wrapper for writing and record our call - wrapper.Lock() - defer wrapper.Unlock() - - wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{ - Config: configCopy, - Warns: warns, - Errors: errs, - }) - - // With it locked, call SetValue again so that it triggers WaitForChange - p.Shared.ValidateResource.SetValue(key, wrapper) - - // Return the result - return warns, errs -} - -func (p *shadowResourceProviderReal) Apply( - info *InstanceInfo, - state *InstanceState, - diff *InstanceDiff) (*InstanceState, error) { - // Thse have to be copied before the call since call can modify - stateCopy := state.DeepCopy() - diffCopy := diff.DeepCopy() - - result, err := p.ResourceProvider.Apply(info, state, diff) - p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{ - State: stateCopy, - Diff: diffCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) Diff( - info *InstanceInfo, - state *InstanceState, - desired *ResourceConfig) (*InstanceDiff, error) { - // Thse have to be copied before the call since call can modify - stateCopy := state.DeepCopy() - desiredCopy := desired.DeepCopy() - - result, err := p.ResourceProvider.Diff(info, state, desired) - p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{ - State: stateCopy, - Desired: desiredCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) Refresh( - info *InstanceInfo, - state *InstanceState) (*InstanceState, error) { - // Thse have to be copied before the call since call can modify - stateCopy := state.DeepCopy() - - result, err := p.ResourceProvider.Refresh(info, state) - p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{ - State: stateCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) ValidateDataSource( - t string, c *ResourceConfig) ([]string, []error) { - key := t - configCopy := c.DeepCopy() - - // Real operation - warns, errs := p.ResourceProvider.ValidateDataSource(t, c) - - // Initialize - p.Shared.ValidateDataSource.Init( - key, &shadowResourceProviderValidateDataSourceWrapper{}) - - // Get the result - raw := p.Shared.ValidateDataSource.Value(key) - wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper) - if !ok { - // If this fails then we just continue with our day... the shadow - // will fail to but there isn't much we can do. - log.Printf( - "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw) - return warns, errs - } - - // Lock the wrapper for writing and record our call - wrapper.Lock() - defer wrapper.Unlock() - - wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{ - Config: configCopy, - Warns: warns, - Errors: errs, - }) - - // Set it - p.Shared.ValidateDataSource.SetValue(key, wrapper) - - // Return the result - return warns, errs -} - -func (p *shadowResourceProviderReal) ReadDataDiff( - info *InstanceInfo, - desired *ResourceConfig) (*InstanceDiff, error) { - // These have to be copied before the call since call can modify - desiredCopy := desired.DeepCopy() - - result, err := p.ResourceProvider.ReadDataDiff(info, desired) - p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{ - Desired: desiredCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) ReadDataApply( - info *InstanceInfo, - diff *InstanceDiff) (*InstanceState, error) { - // Thse have to be copied before the call since call can modify - diffCopy := diff.DeepCopy() - - result, err := p.ResourceProvider.ReadDataApply(info, diff) - p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{ - Diff: diffCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -// shadowResourceProviderShadow is the shadow resource provider. Function -// calls never affect real resources. This is paired with the "real" side -// which must be called properly to enable recording. -type shadowResourceProviderShadow struct { - Shared *shadowResourceProviderShared - - // Cached values that are expected to not change - resources []ResourceType - dataSources []DataSource - - Error error // Error is the list of errors from the shadow - ErrorLock sync.Mutex -} - -type shadowResourceProviderShared struct { - // NOTE: Anytime a value is added here, be sure to add it to - // the Close() method so that it is closed. - - CloseErr shadow.Value - Input shadow.Value - Validate shadow.Value - Configure shadow.Value - ValidateResource shadow.KeyedValue - Apply shadow.KeyedValue - Diff shadow.KeyedValue - Refresh shadow.KeyedValue - ValidateDataSource shadow.KeyedValue - ReadDataDiff shadow.KeyedValue - ReadDataApply shadow.KeyedValue -} - -func (p *shadowResourceProviderShared) Close() error { - return shadow.Close(p) -} - -func (p *shadowResourceProviderShadow) CloseShadow() error { - err := p.Shared.Close() - if err != nil { - err = fmt.Errorf("close error: %s", err) - } - - return err -} - -func (p *shadowResourceProviderShadow) ShadowError() error { - return p.Error -} - -func (p *shadowResourceProviderShadow) Resources() []ResourceType { - return p.resources -} - -func (p *shadowResourceProviderShadow) DataSources() []DataSource { - return p.dataSources -} - -func (p *shadowResourceProviderShadow) Close() error { - v := p.Shared.CloseErr.Value() - if v == nil { - return nil - } - - return v.(error) -} - -func (p *shadowResourceProviderShadow) Input( - input UIInput, c *ResourceConfig) (*ResourceConfig, error) { - // Get the result of the input call - raw := p.Shared.Input.Value() - if raw == nil { - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderInput) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'input' shadow value: %#v", raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !c.Equal(result.Config) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", - result.Config, c)) - p.ErrorLock.Unlock() - } - - // Return the results - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) { - // Get the result of the validate call - raw := p.Shared.Validate.Value() - if raw == nil { - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderValidate) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'validate' shadow value: %#v", raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !c.Equal(result.Config) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", - result.Config, c)) - p.ErrorLock.Unlock() - } - - // Return the results - return result.ResultWarn, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error { - // Get the result of the call - raw := p.Shared.Configure.Value() - if raw == nil { - return nil - } - - result, ok := raw.(*shadowResourceProviderConfigure) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'configure' shadow value: %#v", raw)) - return nil - } - - // Compare the parameters, which should be identical - if !c.Equal(result.Config) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", - result.Config, c)) - p.ErrorLock.Unlock() - } - - // Return the results - return result.Result -} - -// Stop returns immediately. -func (p *shadowResourceProviderShadow) Stop() error { - return nil -} - -func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { - // Unique key - key := t - - // Get the initial value - raw := p.Shared.ValidateResource.Value(key) - - // Find a validation with our configuration - var result *shadowResourceProviderValidateResource - for { - // Get the value - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateResource' call for %q:\n\n%#v", - key, c)) - return nil, nil - } - - wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Look for the matching call with our configuration - wrapper.RLock() - for _, call := range wrapper.Calls { - if call.Config.Equal(c) { - result = call - break - } - } - wrapper.RUnlock() - - // If we found a result, exit - if result != nil { - break - } - - // Wait for a change so we can get the wrapper again - raw = p.Shared.ValidateResource.WaitForChange(key) - } - - return result.Warns, result.Errors -} - -func (p *shadowResourceProviderShadow) Apply( - info *InstanceInfo, - state *InstanceState, - diff *InstanceDiff) (*InstanceState, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.Apply.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' call for %q:\n\n%#v\n\n%#v", - key, state, diff)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderApply) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !state.Equal(result.State) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.State, state)) - p.ErrorLock.Unlock() - } - - if !diff.Equal(result.Diff) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v", - key, result.Diff, diff)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Diff( - info *InstanceInfo, - state *InstanceState, - desired *ResourceConfig) (*InstanceDiff, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.Diff.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'diff' call for %q:\n\n%#v\n\n%#v", - key, state, desired)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderDiff) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'diff' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !state.Equal(result.State) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.State, state)) - p.ErrorLock.Unlock() - } - if !desired.Equal(result.Desired) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.Desired, desired)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Refresh( - info *InstanceInfo, - state *InstanceState) (*InstanceState, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.Refresh.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'refresh' call for %q:\n\n%#v", - key, state)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderRefresh) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'refresh' shadow value: %#v", raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !state.Equal(result.State) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.State, state)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) ValidateDataSource( - t string, c *ResourceConfig) ([]string, []error) { - // Unique key - key := t - - // Get the initial value - raw := p.Shared.ValidateDataSource.Value(key) - - // Find a validation with our configuration - var result *shadowResourceProviderValidateDataSource - for { - // Get the value - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateDataSource' call for %q:\n\n%#v", - key, c)) - return nil, nil - } - - wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateDataSource' shadow value: %#v", raw)) - return nil, nil - } - - // Look for the matching call with our configuration - wrapper.RLock() - for _, call := range wrapper.Calls { - if call.Config.Equal(c) { - result = call - break - } - } - wrapper.RUnlock() - - // If we found a result, exit - if result != nil { - break - } - - // Wait for a change so we can get the wrapper again - raw = p.Shared.ValidateDataSource.WaitForChange(key) - } - - return result.Warns, result.Errors -} - -func (p *shadowResourceProviderShadow) ReadDataDiff( - info *InstanceInfo, - desired *ResourceConfig) (*InstanceDiff, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.ReadDataDiff.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataDiff' call for %q:\n\n%#v", - key, desired)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderReadDataDiff) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !desired.Equal(result.Desired) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v", - key, result.Desired, desired)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) ReadDataApply( - info *InstanceInfo, - d *InstanceDiff) (*InstanceState, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.ReadDataApply.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataApply' call for %q:\n\n%#v", - key, d)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderReadDataApply) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !d.Equal(result.Diff) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v", - result.Diff, d)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { - panic("import not supported by shadow graph") -} - -// The structs for the various function calls are put below. These structs -// are used to carry call information across the real/shadow boundaries. - -type shadowResourceProviderInput struct { - Config *ResourceConfig - Result *ResourceConfig - ResultErr error -} - -type shadowResourceProviderValidate struct { - Config *ResourceConfig - ResultWarn []string - ResultErr []error -} - -type shadowResourceProviderConfigure struct { - Config *ResourceConfig - Result error -} - -type shadowResourceProviderValidateResourceWrapper struct { - sync.RWMutex - - Calls []*shadowResourceProviderValidateResource -} - -type shadowResourceProviderValidateResource struct { - Config *ResourceConfig - Warns []string - Errors []error -} - -type shadowResourceProviderApply struct { - State *InstanceState - Diff *InstanceDiff - Result *InstanceState - ResultErr error -} - -type shadowResourceProviderDiff struct { - State *InstanceState - Desired *ResourceConfig - Result *InstanceDiff - ResultErr error -} - -type shadowResourceProviderRefresh struct { - State *InstanceState - Result *InstanceState - ResultErr error -} - -type shadowResourceProviderValidateDataSourceWrapper struct { - sync.RWMutex - - Calls []*shadowResourceProviderValidateDataSource -} - -type shadowResourceProviderValidateDataSource struct { - Config *ResourceConfig - Warns []string - Errors []error -} - -type shadowResourceProviderReadDataDiff struct { - Desired *ResourceConfig - Result *InstanceDiff - ResultErr error -} - -type shadowResourceProviderReadDataApply struct { - Diff *InstanceDiff - Result *InstanceState - ResultErr error -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go deleted file mode 100644 index 60a4908..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go +++ /dev/null @@ -1,282 +0,0 @@ -package terraform - -import ( - "fmt" - "io" - "log" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/shadow" -) - -// shadowResourceProvisioner implements ResourceProvisioner for the shadow -// eval context defined in eval_context_shadow.go. -// -// This is used to verify behavior with a real provisioner. This shouldn't -// be used directly. -type shadowResourceProvisioner interface { - ResourceProvisioner - Shadow -} - -// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner. -func newShadowResourceProvisioner( - p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) { - // Create the shared data - shared := shadowResourceProvisionerShared{ - Validate: shadow.ComparedValue{ - Func: shadowResourceProvisionerValidateCompare, - }, - } - - // Create the real provisioner that does actual work - real := &shadowResourceProvisionerReal{ - ResourceProvisioner: p, - Shared: &shared, - } - - // Create the shadow that watches the real value - shadow := &shadowResourceProvisionerShadow{ - Shared: &shared, - } - - return real, shadow -} - -// shadowResourceProvisionerReal is the real resource provisioner. Function calls -// to this will perform real work. This records the parameters and return -// values and call order for the shadow to reproduce. -type shadowResourceProvisionerReal struct { - ResourceProvisioner - - Shared *shadowResourceProvisionerShared -} - -func (p *shadowResourceProvisionerReal) Close() error { - var result error - if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok { - result = c.Close() - } - - p.Shared.CloseErr.SetValue(result) - return result -} - -func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) { - warns, errs := p.ResourceProvisioner.Validate(c) - p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{ - Config: c, - ResultWarn: warns, - ResultErr: errs, - }) - - return warns, errs -} - -func (p *shadowResourceProvisionerReal) Apply( - output UIOutput, s *InstanceState, c *ResourceConfig) error { - err := p.ResourceProvisioner.Apply(output, s, c) - - // Write the result, grab a lock for writing. This should nver - // block long since the operations below don't block. - p.Shared.ApplyLock.Lock() - defer p.Shared.ApplyLock.Unlock() - - key := s.ID - raw, ok := p.Shared.Apply.ValueOk(key) - if !ok { - // Setup a new value - raw = &shadow.ComparedValue{ - Func: shadowResourceProvisionerApplyCompare, - } - - // Set it - p.Shared.Apply.SetValue(key, raw) - } - - compareVal, ok := raw.(*shadow.ComparedValue) - if !ok { - // Just log and return so that we don't cause the real side - // any side effects. - log.Printf("[ERROR] unknown value in 'apply': %#v", raw) - return err - } - - // Write the resulting value - compareVal.SetValue(&shadowResourceProvisionerApply{ - Config: c, - ResultErr: err, - }) - - return err -} - -func (p *shadowResourceProvisionerReal) Stop() error { - return p.ResourceProvisioner.Stop() -} - -// shadowResourceProvisionerShadow is the shadow resource provisioner. Function -// calls never affect real resources. This is paired with the "real" side -// which must be called properly to enable recording. -type shadowResourceProvisionerShadow struct { - Shared *shadowResourceProvisionerShared - - Error error // Error is the list of errors from the shadow - ErrorLock sync.Mutex -} - -type shadowResourceProvisionerShared struct { - // NOTE: Anytime a value is added here, be sure to add it to - // the Close() method so that it is closed. - - CloseErr shadow.Value - Validate shadow.ComparedValue - Apply shadow.KeyedValue - ApplyLock sync.Mutex // For writing only -} - -func (p *shadowResourceProvisionerShared) Close() error { - closers := []io.Closer{ - &p.CloseErr, - } - - for _, c := range closers { - // This should never happen, but we don't panic because a panic - // could affect the real behavior of Terraform and a shadow should - // never be able to do that. - if err := c.Close(); err != nil { - return err - } - } - - return nil -} - -func (p *shadowResourceProvisionerShadow) CloseShadow() error { - err := p.Shared.Close() - if err != nil { - err = fmt.Errorf("close error: %s", err) - } - - return err -} - -func (p *shadowResourceProvisionerShadow) ShadowError() error { - return p.Error -} - -func (p *shadowResourceProvisionerShadow) Close() error { - v := p.Shared.CloseErr.Value() - if v == nil { - return nil - } - - return v.(error) -} - -func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) { - // Get the result of the validate call - raw := p.Shared.Validate.Value(c) - if raw == nil { - return nil, nil - } - - result, ok := raw.(*shadowResourceProvisionerValidate) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'validate' shadow value: %#v", raw)) - return nil, nil - } - - // We don't need to compare configurations because we key on the - // configuration so just return right away. - return result.ResultWarn, result.ResultErr -} - -func (p *shadowResourceProvisionerShadow) Apply( - output UIOutput, s *InstanceState, c *ResourceConfig) error { - // Get the value based on the key - key := s.ID - raw := p.Shared.Apply.Value(key) - if raw == nil { - return nil - } - - compareVal, ok := raw.(*shadow.ComparedValue) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' shadow value: %#v", raw)) - return nil - } - - // With the compared value, we compare against our config - raw = compareVal.Value(c) - if raw == nil { - return nil - } - - result, ok := raw.(*shadowResourceProvisionerApply) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' shadow value: %#v", raw)) - return nil - } - - return result.ResultErr -} - -func (p *shadowResourceProvisionerShadow) Stop() error { - // For the shadow, we always just return nil since a Stop indicates - // that we were interrupted and shadows are disabled during interrupts - // anyways. - return nil -} - -// The structs for the various function calls are put below. These structs -// are used to carry call information across the real/shadow boundaries. - -type shadowResourceProvisionerValidate struct { - Config *ResourceConfig - ResultWarn []string - ResultErr []error -} - -type shadowResourceProvisionerApply struct { - Config *ResourceConfig - ResultErr error -} - -func shadowResourceProvisionerValidateCompare(k, v interface{}) bool { - c, ok := k.(*ResourceConfig) - if !ok { - return false - } - - result, ok := v.(*shadowResourceProvisionerValidate) - if !ok { - return false - } - - return c.Equal(result.Config) -} - -func shadowResourceProvisionerApplyCompare(k, v interface{}) bool { - c, ok := k.(*ResourceConfig) - if !ok { - return false - } - - result, ok := v.(*shadowResourceProvisionerApply) - if !ok { - return false - } - - return c.Equal(result.Config) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go index 0c46194..04b14a6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/state.go @@ -9,6 +9,7 @@ import ( "io" "io/ioutil" "log" + "os" "reflect" "sort" "strconv" @@ -16,10 +17,12 @@ import ( "sync" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/go-version" "github.com/hashicorp/terraform/config" "github.com/mitchellh/copystructure" - "github.com/satori/go.uuid" + + tfversion "github.com/hashicorp/terraform/version" ) const ( @@ -664,7 +667,7 @@ func (s *State) FromFutureTerraform() bool { } v := version.Must(version.NewVersion(s.TFVersion)) - return SemVersion.LessThan(v) + return tfversion.SemVer.LessThan(v) } func (s *State) Init() { @@ -704,7 +707,11 @@ func (s *State) EnsureHasLineage() { func (s *State) ensureHasLineage() { if s.Lineage == "" { - s.Lineage = uuid.NewV4().String() + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + s.Lineage = lineage log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) } else { log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) @@ -977,6 +984,10 @@ type ModuleState struct { // always disjoint, so the path represents amodule tree Path []string `json:"path"` + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + // Outputs declared by the module and maintained for each module // even though only the root module technically needs to be kept. // This allows operators to inspect values at the boundaries. @@ -1083,7 +1094,7 @@ func (m *ModuleState) Orphans(c *config.Config) []string { defer m.Unlock() keys := make(map[string]struct{}) - for k, _ := range m.Resources { + for k := range m.Resources { keys[k] = struct{}{} } @@ -1091,7 +1102,7 @@ func (m *ModuleState) Orphans(c *config.Config) []string { for _, r := range c.Resources { delete(keys, r.Id()) - for k, _ := range keys { + for k := range keys { if strings.HasPrefix(k, r.Id()+".") { delete(keys, k) } @@ -1100,7 +1111,32 @@ func (m *ModuleState) Orphans(c *config.Config) []string { } result := make([]string, 0, len(keys)) - for k, _ := range keys { + for k := range keys { + result = append(result, k) + } + + return result +} + +// RemovedOutputs returns a list of outputs that are in the State but aren't +// present in the configuration itself. +func (m *ModuleState) RemovedOutputs(c *config.Config) []string { + m.Lock() + defer m.Unlock() + + keys := make(map[string]struct{}) + for k := range m.Outputs { + keys[k] = struct{}{} + } + + if c != nil { + for _, o := range c.Outputs { + delete(keys, o.Name) + } + } + + result := make([]string, 0, len(keys)) + for k := range keys { result = append(result, k) } @@ -1308,6 +1344,10 @@ func (m *ModuleState) String() string { return buf.String() } +func (m *ModuleState) Empty() bool { + return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 +} + // ResourceStateKey is a structured representation of the key used for the // ModuleState.Resources mapping type ResourceStateKey struct { @@ -1681,7 +1721,20 @@ func (s *InstanceState) Equal(other *InstanceState) bool { // We only do the deep check if both are non-nil. If one is nil // we treat it as equal since their lengths are both zero (check // above). - if !reflect.DeepEqual(s.Meta, other.Meta) { + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { return false } } @@ -1824,11 +1877,19 @@ var ErrNoState = errors.New("no state") // ReadState reads a state structure out of a reader in the format that // was written by WriteState. func ReadState(src io.Reader) (*State, error) { + // check for a nil file specifically, since that produces a platform + // specific error if we try to use it in a bufio.Reader. + if f, ok := src.(*os.File); ok && f == nil { + return nil, ErrNoState + } + buf := bufio.NewReader(src) + if _, err := buf.Peek(1); err != nil { - // the error is either io.EOF or "invalid argument", and both are from - // an empty state. - return nil, ErrNoState + if err == io.EOF { + return nil, ErrNoState + } + return nil, err } if err := testForV0State(buf); err != nil { @@ -1891,7 +1952,7 @@ func ReadState(src io.Reader) (*State, error) { result = v3State default: return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), versionIdentifier.Version) + tfversion.SemVer.String(), versionIdentifier.Version) } // If we reached this place we must have a result set @@ -1935,7 +1996,7 @@ func ReadStateV2(jsonBytes []byte) (*State, error) { // version that we don't understand if state.Version > StateVersion { return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), state.Version) + tfversion.SemVer.String(), state.Version) } // Make sure the version is semantic @@ -1970,7 +2031,7 @@ func ReadStateV3(jsonBytes []byte) (*State, error) { // version that we don't understand if state.Version > StateVersion { return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), state.Version) + tfversion.SemVer.String(), state.Version) } // Make sure the version is semantic @@ -2126,6 +2187,19 @@ func (s moduleStateSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +// StateCompatible returns an error if the state is not compatible with the +// current version of terraform. +func CheckStateVersion(state *State) error { + if state == nil { + return nil + } + + if state.FromFutureTerraform() { + return fmt.Errorf(stateInvalidTerraformVersionErr, state.TFVersion) + } + return nil +} + const stateValidateErrMultiModule = ` Multiple modules with the same path: %s @@ -2134,3 +2208,11 @@ in your state file that point to the same module. This will cause Terraform to behave in unexpected and error prone ways and is invalid. Please back up and modify your state file manually to resolve this. ` + +const stateInvalidTerraformVersionErr = ` +Terraform doesn't allow running any operations against a state +that was written by a future Terraform version. The state is +reporting it is written by Terraform '%s' + +Please run at least that version of Terraform to continue. +` diff --git a/vendor/github.com/hashicorp/terraform/terraform/test_failure b/vendor/github.com/hashicorp/terraform/terraform/test_failure deleted file mode 100644 index 5d3ad1a..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/test_failure +++ /dev/null @@ -1,9 +0,0 @@ ---- FAIL: TestContext2Plan_moduleProviderInherit (0.01s) - context_plan_test.go:552: bad: []string{"child"} -map[string]dag.Vertex{} -"module.middle.null" -map[string]dag.Vertex{} -"module.middle.module.inner.null" -map[string]dag.Vertex{} -"aws" -FAIL diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go index f4a431a..0e47f20 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go @@ -1,6 +1,8 @@ package terraform import ( + "log" + "github.com/hashicorp/terraform/dag" ) @@ -40,6 +42,9 @@ func (t *graphTransformerMulti) Transform(g *Graph) error { if err := t.Transform(g); err != nil { return err } + log.Printf( + "[TRACE] Graph after step %T:\n\n%s", + t, g.StringWithNodeTypes()) } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go index 10506ea..39cf097 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go @@ -1,10 +1,7 @@ package terraform import ( - "log" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" ) // GraphNodeAttachProvider is an interface that must be implemented by nodes @@ -19,62 +16,3 @@ type GraphNodeAttachProvider interface { // Sets the configuration AttachProvider(*config.ProviderConfig) } - -// AttachProviderConfigTransformer goes through the graph and attaches -// provider configuration structures to nodes that implement the interfaces -// above. -// -// The attached configuration structures are directly from the configuration. -// If they're going to be modified, a copy should be made. -type AttachProviderConfigTransformer struct { - Module *module.Tree // Module is the root module for the config -} - -func (t *AttachProviderConfigTransformer) Transform(g *Graph) error { - if err := t.attachProviders(g); err != nil { - return err - } - - return nil -} - -func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error { - // Go through and find GraphNodeAttachProvider - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachProvider implementations - apn, ok := v.(GraphNodeAttachProvider) - if !ok { - continue - } - - // Determine what we're looking for - path := normalizeModulePath(apn.Path()) - path = path[1:] - name := apn.ProviderName() - log.Printf("[TRACE] Attach provider request: %#v %s", path, name) - - // Get the configuration. - tree := t.Module.Child(path) - if tree == nil { - continue - } - - // Go through the provider configs to find the matching config - for _, p := range tree.Config().ProviderConfigs { - // Build the name, which is "name.alias" if an alias exists - current := p.Name - if p.Alias != "" { - current += "." + p.Alias - } - - // If the configs match then attach! - if current == name { - log.Printf("[TRACE] Attaching provider config: %#v", p) - apn.AttachProvider(p) - break - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go index 2148cef..87a1f9c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go @@ -12,6 +12,9 @@ type DeposedTransformer struct { // View, if non-empty, is the ModuleState.View used around the state // to find deposed resources. View string + + // The provider used by the resourced which were deposed + ResolvedProvider string } func (t *DeposedTransformer) Transform(g *Graph) error { @@ -33,14 +36,16 @@ func (t *DeposedTransformer) Transform(g *Graph) error { if len(rs.Deposed) == 0 { continue } + deposed := rs.Deposed for i, _ := range deposed { g.Add(&graphNodeDeposedResource{ - Index: i, - ResourceName: k, - ResourceType: rs.Type, - Provider: rs.Provider, + Index: i, + ResourceName: k, + ResourceType: rs.Type, + ProviderName: rs.Provider, + ResolvedProvider: t.ResolvedProvider, }) } } @@ -50,18 +55,23 @@ func (t *DeposedTransformer) Transform(g *Graph) error { // graphNodeDeposedResource is the graph vertex representing a deposed resource. type graphNodeDeposedResource struct { - Index int - ResourceName string - ResourceType string - Provider string + Index int + ResourceName string + ResourceType string + ProviderName string + ResolvedProvider string } func (n *graphNodeDeposedResource) Name() string { return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index) } -func (n *graphNodeDeposedResource) ProvidedBy() []string { - return []string{resourceProvider(n.ResourceName, n.Provider)} +func (n *graphNodeDeposedResource) ProvidedBy() string { + return resourceProvider(n.ResourceName, n.ProviderName) +} + +func (n *graphNodeDeposedResource) SetProvider(p string) { + n.ResolvedProvider = p } // GraphNodeEvalable impl. @@ -81,7 +91,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, &EvalReadStateDeposed{ @@ -98,7 +108,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { &EvalWriteStateDeposed{ Name: n.ResourceName, ResourceType: n.ResourceType, - Provider: n.Provider, + Provider: n.ResolvedProvider, State: &state, Index: n.Index, }, @@ -114,7 +124,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, &EvalReadStateDeposed{ @@ -147,7 +157,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { &EvalWriteStateDeposed{ Name: n.ResourceName, ResourceType: n.ResourceType, - Provider: n.Provider, + Provider: n.ResolvedProvider, State: &state, Index: n.Index, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go index 22be1ab..a06ff29 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go @@ -119,17 +119,15 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { return &NodeApplyableProvider{NodeAbstractProvider: a} } steps := []GraphTransformer{ + // Add the local values + &LocalTransformer{Module: t.Module}, + // Add outputs and metadata &OutputTransformer{Module: t.Module}, &AttachResourceConfigTransformer{Module: t.Module}, &AttachStateTransformer{State: t.State}, - // Add providers since they can affect destroy order as well - &MissingProviderTransformer{AllowAny: true, Concrete: providerFn}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: t.Module}, + TransformProviders(nil, providerFn, t.Module), // Add all the variables. We can depend on resources through // variables due to module parameters, and we need to properly diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go index 081df2f..fcbff65 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go @@ -21,9 +21,9 @@ func (t *ImportStateTransformer) Transform(g *Graph) error { } nodes = append(nodes, &graphNodeImportState{ - Addr: addr, - ID: target.ID, - Provider: target.Provider, + Addr: addr, + ID: target.ID, + ProviderName: target.Provider, }) } @@ -36,9 +36,10 @@ func (t *ImportStateTransformer) Transform(g *Graph) error { } type graphNodeImportState struct { - Addr *ResourceAddress // Addr is the resource address to import to - ID string // ID is the ID to import as - Provider string // Provider string + Addr *ResourceAddress // Addr is the resource address to import to + ID string // ID is the ID to import as + ProviderName string // Provider string + ResolvedProvider string // provider node address states []*InstanceState } @@ -47,8 +48,12 @@ func (n *graphNodeImportState) Name() string { return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID) } -func (n *graphNodeImportState) ProvidedBy() []string { - return []string{resourceProvider(n.Addr.Type, n.Provider)} +func (n *graphNodeImportState) ProvidedBy() string { + return resourceProvider(n.Addr.Type, n.ProviderName) +} + +func (n *graphNodeImportState) SetProvider(p string) { + n.ResolvedProvider = p } // GraphNodeSubPath @@ -72,7 +77,7 @@ func (n *graphNodeImportState) EvalTree() EvalNode { return &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, &EvalImportState{ @@ -149,10 +154,11 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { // is safe. for i, state := range n.states { g.Add(&graphNodeImportStateSub{ - Target: addrs[i], - Path_: n.Path(), - State: state, - Provider: n.Provider, + Target: addrs[i], + Path_: n.Path(), + State: state, + ProviderName: n.ProviderName, + ResolvedProvider: n.ResolvedProvider, }) } @@ -170,10 +176,11 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { // and is part of the subgraph. This node is responsible for refreshing // and adding a resource to the state once it is imported. type graphNodeImportStateSub struct { - Target *ResourceAddress - State *InstanceState - Path_ []string - Provider string + Target *ResourceAddress + State *InstanceState + Path_ []string + ProviderName string + ResolvedProvider string } func (n *graphNodeImportStateSub) Name() string { @@ -216,7 +223,7 @@ func (n *graphNodeImportStateSub) EvalTree() EvalNode { return &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: resourceProvider(info.Type, n.Provider), + Name: n.ResolvedProvider, Output: &provider, }, &EvalRefresh{ @@ -233,7 +240,7 @@ func (n *graphNodeImportStateSub) EvalTree() EvalNode { &EvalWriteState{ Name: key.String(), ResourceType: info.Type, - Provider: resourceProvider(info.Type, n.Provider), + Provider: n.ResolvedProvider, State: &state, }, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go new file mode 100644 index 0000000..95ecfc0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go @@ -0,0 +1,40 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" +) + +// LocalTransformer is a GraphTransformer that adds all the local values +// from the configuration to the graph. +type LocalTransformer struct { + Module *module.Tree +} + +func (t *LocalTransformer) Transform(g *Graph) error { + return t.transformModule(g, t.Module) +} + +func (t *LocalTransformer) transformModule(g *Graph, m *module.Tree) error { + if m == nil { + // Can't have any locals if there's no config + return nil + } + + for _, local := range m.Config().Locals { + node := &NodeLocal{ + PathValue: normalizeModulePath(m.Path()), + Config: local, + } + + g.Add(node) + } + + // Also populate locals for child modules + for _, c := range m.Children() { + if err := t.transformModule(g, c); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go index 49568d5..aea2bd0 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go @@ -21,43 +21,32 @@ func (t *OrphanOutputTransformer) Transform(g *Graph) error { return nil } - return t.transform(g, t.Module) -} - -func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error { - // Get our configuration, and recurse into children - var c *config.Config - if m != nil { - c = m.Config() - for _, child := range m.Children() { - if err := t.transform(g, child); err != nil { - return err - } + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err } } + return nil +} - // Get the state. If there is no state, then we have no orphans! - path := normalizeModulePath(m.Path()) - state := t.State.ModuleByPath(path) - if state == nil { +func (t *OrphanOutputTransformer) transform(g *Graph, ms *ModuleState) error { + if ms == nil { return nil } - // Make a map of the valid outputs - valid := make(map[string]struct{}) - for _, o := range c.Outputs { - valid[o.Name] = struct{}{} - } + path := normalizeModulePath(ms.Path) - // Go through the outputs and find the ones that aren't in our config. - for n, _ := range state.Outputs { - // If it is in the valid map, then ignore - if _, ok := valid[n]; ok { - continue - } + // Get the config for this path, which is nil if the entire module has been + // removed. + var c *config.Config + if m := t.Module.Child(path[1:]); m != nil { + c = m.Config() + } - // Orphan! + // add all the orphaned outputs to the graph + for _, n := range ms.RemovedOutputs(c) { g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path}) + } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go index b260f4c..faa25e4 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go @@ -1,7 +1,10 @@ package terraform import ( + "log" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" ) // OutputTransformer is a GraphTransformer that adds all the outputs @@ -41,11 +44,6 @@ func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error { // Add all outputs here for _, o := range os { - // Build the node. - // - // NOTE: For now this is just an "applyable" output. As we build - // new graph builders for the other operations I suspect we'll - // find a way to parameterize this, require new transforms, etc. node := &NodeApplyableOutput{ PathValue: normalizeModulePath(m.Path()), Config: o, @@ -57,3 +55,41 @@ func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error { return nil } + +// DestroyOutputTransformer is a GraphTransformer that adds nodes to delete +// outputs during destroy. We need to do this to ensure that no stale outputs +// are ever left in the state. +type DestroyOutputTransformer struct { +} + +func (t *DestroyOutputTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + output, ok := v.(*NodeApplyableOutput) + if !ok { + continue + } + + // create the destroy node for this output + node := &NodeDestroyableOutput{ + PathValue: output.PathValue, + Config: output.Config, + } + + log.Printf("[TRACE] creating %s", node.Name()) + g.Add(node) + + deps, err := g.Descendents(v) + if err != nil { + return err + } + + // the destroy node must depend on the eval node + deps.Add(v) + + for _, d := range deps.List() { + log.Printf("[TRACE] %s depends on %s", node.Name(), dag.VertexName(d)) + g.Connect(dag.BasicEdge(node, d)) + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go index b9695d5..c4772b4 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go @@ -1,19 +1,46 @@ package terraform import ( + "errors" "fmt" "log" "strings" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" ) +func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, mod *module.Tree) GraphTransformer { + return GraphTransformMulti( + // Add providers from the config + &ProviderConfigTransformer{ + Module: mod, + Providers: providers, + Concrete: concrete, + }, + // Add any remaining missing providers + &MissingProviderTransformer{ + Providers: providers, + Concrete: concrete, + }, + // Connect the providers + &ProviderTransformer{}, + // Remove unused providers and proxies + &PruneProviderTransformer{}, + // Connect provider to their parent provider nodes + &ParentProviderTransformer{}, + ) +} + // GraphNodeProvider is an interface that nodes that can be a provider -// must implement. The ProviderName returned is the name of the provider -// they satisfy. +// must implement. +// ProviderName returns the name of the provider this satisfies. +// Name returns the full name of the provider in the config. type GraphNodeProvider interface { ProviderName() string + Name() string } // GraphNodeCloseProvider is an interface that nodes that can be a close @@ -25,9 +52,12 @@ type GraphNodeCloseProvider interface { // GraphNodeProviderConsumer is an interface that nodes that require // a provider must implement. ProvidedBy must return the name of the provider -// to use. +// to use. This may be a provider by type, type.alias or a fully resolved +// provider name type GraphNodeProviderConsumer interface { - ProvidedBy() []string + ProvidedBy() string + // Set the resolved provider address for this resource. + SetProvider(string) } // ProviderTransformer is a GraphTransformer that maps resources to @@ -41,18 +71,52 @@ func (t *ProviderTransformer) Transform(g *Graph) error { m := providerVertexMap(g) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProviderConsumer); ok { - for _, p := range pv.ProvidedBy() { - target := m[providerMapKey(p, pv)] - if target == nil { - println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv))) - err = multierror.Append(err, fmt.Errorf( - "%s: provider %s couldn't be found", - dag.VertexName(v), p)) - continue + p := pv.ProvidedBy() + + key := providerMapKey(p, pv) + target := m[key] + + sp, ok := pv.(GraphNodeSubPath) + if !ok && target == nil { + // no target, and no path to walk up + err = multierror.Append(err, fmt.Errorf( + "%s: provider %s couldn't be found", + dag.VertexName(v), p)) + break + } + + // if we don't have a provider at this level, walk up the path looking for one + for i := 1; target == nil; i++ { + path := normalizeModulePath(sp.Path()) + if len(path) < i { + break + } + + key = ResolveProviderName(p, path[:len(path)-i]) + target = m[key] + if target != nil { + break } + } + + if target == nil { + err = multierror.Append(err, fmt.Errorf( + "%s: configuration for %s is not present; a provider configuration block is required for all operations", + dag.VertexName(v), p, + )) + break + } - g.Connect(dag.BasicEdge(v, target)) + // see if this in an inherited provider + if p, ok := target.(*graphNodeProxyProvider); ok { + g.Remove(p) + target = p.Target() + key = target.(GraphNodeProvider).Name() } + + log.Printf("[DEBUG] resource %s using provider %s", dag.VertexName(pv), key) + pv.SetProvider(key) + g.Connect(dag.BasicEdge(v, target)) } } @@ -67,36 +131,32 @@ type CloseProviderTransformer struct{} func (t *CloseProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) - cpm := closeProviderVertexMap(g) + cpm := make(map[string]*graphNodeCloseProvider) var err error - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProviderConsumer); ok { - for _, p := range pv.ProvidedBy() { - key := p - source := cpm[key] - - if source == nil { - // Create a new graphNodeCloseProvider and add it to the graph - source = &graphNodeCloseProvider{ProviderNameValue: p} - g.Add(source) - - // Close node needs to depend on provider - provider, ok := pm[key] - if !ok { - err = multierror.Append(err, fmt.Errorf( - "%s: provider %s couldn't be found for closing", - dag.VertexName(v), p)) - continue - } - g.Connect(dag.BasicEdge(source, provider)) - - // Make sure we also add the new graphNodeCloseProvider to the map - // so we don't create and add any duplicate graphNodeCloseProviders. - cpm[key] = source - } - // Close node depends on all nodes provided by the provider - g.Connect(dag.BasicEdge(source, v)) + for _, v := range pm { + p := v.(GraphNodeProvider) + + // get the close provider of this type if we alread created it + closer := cpm[p.Name()] + + if closer == nil { + // create a closer for this provider type + closer = &graphNodeCloseProvider{ProviderNameValue: p.Name()} + g.Add(closer) + cpm[p.Name()] = closer + } + + // Close node depends on the provider itself + // this is added unconditionally, so it will connect to all instances + // of the provider. Extra edges will be removed by transitive + // reduction. + g.Connect(dag.BasicEdge(closer, p)) + + // connect all the provider's resources to the close node + for _, s := range g.UpEdges(p).List() { + if _, ok := s.(GraphNodeProviderConsumer); ok { + g.Connect(dag.BasicEdge(closer, s)) } } } @@ -104,18 +164,14 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error { return err } -// MissingProviderTransformer is a GraphTransformer that adds nodes -// for missing providers into the graph. Specifically, it creates provider -// configuration nodes for all the providers that we support. These are -// pruned later during an optimization pass. +// MissingProviderTransformer is a GraphTransformer that adds nodes for all +// required providers into the graph. Specifically, it creates provider +// configuration nodes for all the providers that we support. These are pruned +// later during an optimization pass. type MissingProviderTransformer struct { // Providers is the list of providers we support. Providers []string - // AllowAny will not check that a provider is supported before adding - // it to the graph. - AllowAny bool - // Concrete, if set, overrides how the providers are made. Concrete ConcreteProviderNodeFunc } @@ -128,99 +184,57 @@ func (t *MissingProviderTransformer) Transform(g *Graph) error { } } - // Create a set of our supported providers - supported := make(map[string]struct{}, len(t.Providers)) - for _, v := range t.Providers { - supported[v] = struct{}{} - } - - // Get the map of providers we already have in our graph + var err error m := providerVertexMap(g) - - // Go through all the provider consumers and make sure we add - // that provider if it is missing. We use a for loop here instead - // of "range" since we'll modify check as we go to add more to check. - check := g.Vertices() - for i := 0; i < len(check); i++ { - v := check[i] - + for _, v := range g.Vertices() { pv, ok := v.(GraphNodeProviderConsumer) if !ok { continue } - // If this node has a subpath, then we use that as a prefix - // into our map to check for an existing provider. - var path []string - if sp, ok := pv.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - path = raw - } - } + p := pv.ProvidedBy() + // this may be the resolved provider from the state, so we need to get + // the base provider name. + parts := strings.SplitAfter(p, "provider.") + p = parts[len(parts)-1] - for _, p := range pv.ProvidedBy() { - key := providerMapKey(p, pv) - if _, ok := m[key]; ok { - // This provider already exists as a configure node - continue - } + key := ResolveProviderName(p, nil) + provider := m[key] - // If the provider has an alias in it, we just want the type - ptype := p - if idx := strings.IndexRune(p, '.'); idx != -1 { - ptype = p[:idx] - } + // we already have it + if provider != nil { + continue + } - if !t.AllowAny { - if _, ok := supported[ptype]; !ok { - // If we don't support the provider type, skip it. - // Validation later will catch this as an error. - continue - } - } + // we don't implicitly create aliased providers + if strings.Contains(p, ".") { + log.Println("[DEBUG] not adding missing provider alias:", p) + continue + } - // Add the missing provider node to the graph - v := t.Concrete(&NodeAbstractProvider{ - NameValue: p, - PathValue: path, - }).(dag.Vertex) - if len(path) > 0 { - // We'll need the parent provider as well, so let's - // add a dummy node to check to make sure that we add - // that parent provider. - check = append(check, &graphNodeProviderConsumerDummy{ - ProviderValue: p, - PathValue: path[:len(path)-1], - }) - } + log.Println("[DEBUG] adding missing provider:", p) - m[key] = g.Add(v) - } + // create the misisng top-level provider + provider = t.Concrete(&NodeAbstractProvider{ + NameValue: p, + }).(dag.Vertex) + + m[key] = g.Add(provider) } - return nil + return err } // ParentProviderTransformer connects provider nodes to their parents. // // This works by finding nodes that are both GraphNodeProviders and // GraphNodeSubPath. It then connects the providers to their parent -// path. +// path. The parent provider is always at the root level. type ParentProviderTransformer struct{} func (t *ParentProviderTransformer) Transform(g *Graph) error { - // Make a mapping of path to dag.Vertex, where path is: "path.name" - m := make(map[string]dag.Vertex) - - // Also create a map that maps a provider to its parent - parentMap := make(map[dag.Vertex]string) - for _, raw := range g.Vertices() { - // If it is the flat version, then make it the non-flat version. - // We eventually want to get rid of the flat version entirely so - // this is a stop-gap while it still exists. - var v dag.Vertex = raw - + pm := providerVertexMap(g) + for _, v := range g.Vertices() { // Only care about providers pn, ok := v.(GraphNodeProvider) if !ok || pn.ProviderName() == "" { @@ -228,53 +242,48 @@ func (t *ParentProviderTransformer) Transform(g *Graph) error { } // Also require a subpath, if there is no subpath then we - // just totally ignore it. The expectation of this transform is - // that it is used with a graph builder that is already flattened. - var path []string - if pn, ok := raw.(GraphNodeSubPath); ok { - path = pn.Path() - } - path = normalizeModulePath(path) - - // Build the key with path.name i.e. "child.subchild.aws" - key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName()) - m[key] = raw - - // Determine the parent if we're non-root. This is length 1 since - // the 0 index should be "root" since we normalize above. - if len(path) > 1 { - path = path[:len(path)-1] - key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName()) - parentMap[raw] = key + // can't have a parent. + if pn, ok := v.(GraphNodeSubPath); ok { + if len(normalizeModulePath(pn.Path())) <= 1 { + continue + } } - } - // Connect! - for v, key := range parentMap { - if parent, ok := m[key]; ok { + // this provider may be disabled, but we can only get it's name from + // the ProviderName string + name := ResolveProviderName(strings.SplitN(pn.ProviderName(), " ", 2)[0], nil) + parent := pm[name] + if parent != nil { g.Connect(dag.BasicEdge(v, parent)) } - } + } return nil } -// PruneProviderTransformer is a GraphTransformer that prunes all the -// providers that aren't needed from the graph. A provider is unneeded if -// no resource or module is using that provider. +// PruneProviderTransformer removes any providers that are not actually used by +// anything, and provider proxies. This avoids the provider being initialized +// and configured. This both saves resources but also avoids errors since +// configuration may imply initialization which may require auth. type PruneProviderTransformer struct{} func (t *PruneProviderTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { - // We only care about the providers - if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" { + // We only care about providers + pn, ok := v.(GraphNodeProvider) + if !ok || pn.ProviderName() == "" { continue } - // Does anything depend on this? If not, then prune it. - if s := g.UpEdges(v); s.Len() == 0 { - if nv, ok := v.(dag.NamedVertex); ok { - log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name()) - } + + // ProxyProviders will have up edges, but we're now done with them in the graph + if _, ok := v.(*graphNodeProxyProvider); ok { + log.Printf("[DEBUG] pruning proxy provider %s", dag.VertexName(v)) + g.Remove(v) + } + + // Remove providers with no dependencies. + if g.UpEdges(v).Len() == 0 { + log.Printf("[DEBUG] pruning unused provider %s", dag.VertexName(v)) g.Remove(v) } } @@ -285,23 +294,26 @@ func (t *PruneProviderTransformer) Transform(g *Graph) error { // providerMapKey is a helper that gives us the key to use for the // maps returned by things such as providerVertexMap. func providerMapKey(k string, v dag.Vertex) string { - pathPrefix := "" - if sp, ok := v.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - pathPrefix = modulePrefixStr(raw) + "." - } + if strings.Contains(k, "provider.") { + // this is already resolved + return k } - return pathPrefix + k + // we create a dummy provider to + var path []string + if sp, ok := v.(GraphNodeSubPath); ok { + path = normalizeModulePath(sp.Path()) + } + return ResolveProviderName(k, path) } func providerVertexMap(g *Graph) map[string]dag.Vertex { m := make(map[string]dag.Vertex) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProvider); ok { - key := providerMapKey(pv.ProviderName(), v) - m[key] = v + // TODO: The Name may have meta info, like " (disabled)" + name := strings.SplitN(pv.Name(), " ", 2)[0] + m[name] = v } } @@ -324,7 +336,7 @@ type graphNodeCloseProvider struct { } func (n *graphNodeCloseProvider) Name() string { - return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) + return n.ProviderNameValue + " (close)" } // GraphNodeEvalable impl. @@ -362,19 +374,233 @@ func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { return true } -// graphNodeProviderConsumerDummy is a struct that never enters the real -// graph (though it could to no ill effect). It implements -// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force -// certain transformations. -type graphNodeProviderConsumerDummy struct { - ProviderValue string - PathValue []string +// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to +// store the name and value of a provider node for inheritance between modules. +// These nodes are only used to store the data while loading the provider +// configurations, and are removed after all the resources have been connected +// to their providers. +type graphNodeProxyProvider struct { + nameValue string + path []string + target GraphNodeProvider +} + +func (n *graphNodeProxyProvider) ProviderName() string { + return n.Target().ProviderName() +} + +func (n *graphNodeProxyProvider) Name() string { + return ResolveProviderName(n.nameValue, n.path) +} + +// find the concrete provider instance +func (n *graphNodeProxyProvider) Target() GraphNodeProvider { + switch t := n.target.(type) { + case *graphNodeProxyProvider: + return t.Target() + default: + return n.target + } +} + +// ProviderConfigTransformer adds all provider nodes from the configuration and +// attaches the configs. +type ProviderConfigTransformer struct { + Providers []string + Concrete ConcreteProviderNodeFunc + + // each provider node is stored here so that the proxy nodes can look up + // their targets by name. + providers map[string]GraphNodeProvider + // record providers that can be overriden with a proxy + proxiable map[string]bool + + // Module is the module to add resources from. + Module *module.Tree } -func (n *graphNodeProviderConsumerDummy) Path() []string { - return n.PathValue +func (t *ProviderConfigTransformer) Transform(g *Graph) error { + // If no module is given, we don't do anything + if t.Module == nil { + return nil + } + + // If the module isn't loaded, that is simply an error + if !t.Module.Loaded() { + return errors.New("module must be loaded for ProviderConfigTransformer") + } + + t.providers = make(map[string]GraphNodeProvider) + t.proxiable = make(map[string]bool) + + // Start the transformation process + if err := t.transform(g, t.Module); err != nil { + return err + } + + // finally attach the configs to the new nodes + return t.attachProviderConfigs(g) } -func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string { - return []string{n.ProviderValue} +func (t *ProviderConfigTransformer) transform(g *Graph, m *module.Tree) error { + // If no config, do nothing + if m == nil { + return nil + } + + // Add our resources + if err := t.transformSingle(g, m); err != nil { + return err + } + + // Transform all the children. + for _, c := range m.Children() { + if err := t.transform(g, c); err != nil { + return err + } + } + return nil +} + +func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { + log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path()) + + // Get the configuration for this module + conf := m.Config() + + // Build the path we're at + path := m.Path() + if len(path) > 0 { + path = append([]string{RootModuleName}, path...) + } + + // add all providers from the configuration + for _, p := range conf.ProviderConfigs { + name := p.Name + if p.Alias != "" { + name += "." + p.Alias + } + + v := t.Concrete(&NodeAbstractProvider{ + NameValue: name, + PathValue: path, + }) + + // Add it to the graph + g.Add(v) + fullName := ResolveProviderName(name, path) + t.providers[fullName] = v.(GraphNodeProvider) + t.proxiable[fullName] = len(p.RawConfig.RawMap()) == 0 + } + + // Now replace the provider nodes with proxy nodes if a provider was being + // passed in, and create implicit proxies if there was no config. Any extra + // proxies will be removed in the prune step. + return t.addProxyProviders(g, m) +} + +func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) error { + path := m.Path() + + // can't add proxies at the root + if len(path) == 0 { + return nil + } + + parentPath := path[:len(path)-1] + parent := t.Module.Child(parentPath) + if parent == nil { + return nil + } + + var parentCfg *config.Module + for _, mod := range parent.Config().Modules { + if mod.Name == m.Name() { + parentCfg = mod + break + } + } + + if parentCfg == nil { + // this can't really happen during normal execution. + return fmt.Errorf("parent module config not found for %s", m.Name()) + } + + // Go through all the providers the parent is passing in, and add proxies to + // the parent provider nodes. + for name, parentName := range parentCfg.Providers { + fullName := ResolveProviderName(name, path) + fullParentName := ResolveProviderName(parentName, parentPath) + + parentProvider := t.providers[fullParentName] + + if parentProvider == nil { + return fmt.Errorf("missing provider %s", fullParentName) + } + + proxy := &graphNodeProxyProvider{ + nameValue: name, + path: path, + target: parentProvider, + } + + concreteProvider := t.providers[fullName] + + // replace the concrete node with the provider passed in + if concreteProvider != nil && t.proxiable[fullName] { + g.Replace(concreteProvider, proxy) + t.providers[fullName] = proxy + continue + } + + // aliased providers can't be implicitly passed in + if strings.Contains(name, ".") { + continue + } + + // There was no concrete provider, so add this as an implicit provider. + // The extra proxy will be pruned later if it's unused. + g.Add(proxy) + t.providers[fullName] = proxy + } + return nil +} + +func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachProvider implementations + apn, ok := v.(GraphNodeAttachProvider) + if !ok { + continue + } + + // Determine what we're looking for + path := normalizeModulePath(apn.Path())[1:] + name := apn.ProviderName() + log.Printf("[TRACE] Attach provider request: %#v %s", path, name) + + // Get the configuration. + tree := t.Module.Child(path) + if tree == nil { + continue + } + + // Go through the provider configs to find the matching config + for _, p := range tree.Config().ProviderConfigs { + // Build the name, which is "name.alias" if an alias exists + current := p.Name + if p.Alias != "" { + current += "." + p.Alias + } + + // If the configs match then attach! + if current == name { + log.Printf("[TRACE] Attaching provider config: %#v", p) + apn.AttachProvider(p) + break + } + } + } + + return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go deleted file mode 100644 index d9919f3..0000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go +++ /dev/null @@ -1,50 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// DisableProviderTransformer "disables" any providers that are not actually -// used by anything. This avoids the provider being initialized and configured. -// This both saves resources but also avoids errors since configuration -// may imply initialization which may require auth. -type DisableProviderTransformer struct{} - -func (t *DisableProviderTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - // We only care about providers - pn, ok := v.(GraphNodeProvider) - if !ok || pn.ProviderName() == "" { - continue - } - - // If we have dependencies, then don't disable - if g.UpEdges(v).Len() > 0 { - continue - } - - // Get the path - var path []string - if pn, ok := v.(GraphNodeSubPath); ok { - path = pn.Path() - } - - // Disable the provider by replacing it with a "disabled" provider - disabled := &NodeDisabledProvider{ - NodeAbstractProvider: &NodeAbstractProvider{ - NameValue: pn.ProviderName(), - PathValue: path, - }, - } - - if !g.Replace(v, disabled) { - panic(fmt.Sprintf( - "vertex disappeared from under us: %s", - dag.VertexName(v))) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go index c545235..be8c7f9 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go @@ -76,6 +76,85 @@ func (t *ReferenceTransformer) Transform(g *Graph) error { return nil } +// DestroyReferenceTransformer is a GraphTransformer that reverses the edges +// for locals and outputs that depend on other nodes which will be +// removed during destroy. If a destroy node is evaluated before the local or +// output value, it will be removed from the state, and the later interpolation +// will fail. +type DestroyValueReferenceTransformer struct{} + +func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error { + vs := g.Vertices() + for _, v := range vs { + switch v.(type) { + case *NodeApplyableOutput, *NodeLocal: + // OK + default: + continue + } + + // reverse any outgoing edges so that the value is evaluated first. + for _, e := range g.EdgesFrom(v) { + target := e.Target() + + // only destroy nodes will be evaluated in reverse + if _, ok := target.(GraphNodeDestroyer); !ok { + continue + } + + log.Printf("[TRACE] output dep: %s", dag.VertexName(target)) + + g.RemoveEdge(e) + g.Connect(&DestroyEdge{S: target, T: v}) + } + } + + return nil +} + +// PruneUnusedValuesTransformer is s GraphTransformer that removes local and +// output values which are not referenced in the graph. Since outputs and +// locals always need to be evaluated, if they reference a resource that is not +// available in the state the interpolation could fail. +type PruneUnusedValuesTransformer struct{} + +func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error { + // this might need multiple runs in order to ensure that pruning a value + // doesn't effect a previously checked value. + for removed := 0; ; removed = 0 { + for _, v := range g.Vertices() { + switch v.(type) { + case *NodeApplyableOutput, *NodeLocal: + // OK + default: + continue + } + + dependants := g.UpEdges(v) + + switch dependants.Len() { + case 0: + // nothing at all depends on this + g.Remove(v) + removed++ + case 1: + // because an output's destroy node always depends on the output, + // we need to check for the case of a single destroy node. + d := dependants.List()[0] + if _, ok := d.(*NodeDestroyableOutput); ok { + g.Remove(v) + removed++ + } + } + } + if removed == 0 { + break + } + } + + return nil +} + // ReferenceMap is a structure that can be used to efficiently check // for references on a graph. type ReferenceMap struct { @@ -96,6 +175,7 @@ func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) { var matches []dag.Vertex var missing []string prefix := m.prefix(v) + for _, ns := range rn.References() { found := false for _, n := range strings.Split(ns, "/") { @@ -108,19 +188,14 @@ func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) { // Mark that we found a match found = true - // Make sure this isn't a self reference, which isn't included - selfRef := false for _, p := range parents { + // don't include self-references if p == v { - selfRef = true - break + continue } - } - if selfRef { - continue + matches = append(matches, p) } - matches = append(matches, parents...) break } @@ -296,14 +371,21 @@ func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string { return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)} case *config.UserVariable: return []string{fmt.Sprintf("var.%s", v.Name)} + case *config.LocalVariable: + return []string{fmt.Sprintf("local.%s", v.Name)} default: return nil } } func modulePrefixStr(p []string) string { + // strip "root" + if len(p) > 0 && p[0] == rootModulePath[0] { + p = p[1:] + } + parts := make([]string, 0, len(p)*2) - for _, p := range p[1:] { + for _, p := range p { parts = append(parts, "module", p) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go new file mode 100644 index 0000000..2e05edb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go @@ -0,0 +1,32 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config/module" +) + +// RemoveModuleTransformer implements GraphTransformer to add nodes indicating +// when a module was removed from the configuration. +type RemovedModuleTransformer struct { + Module *module.Tree // root module + State *State +} + +func (t *RemovedModuleTransformer) Transform(g *Graph) error { + // nothing to remove if there's no state! + if t.State == nil { + return nil + } + + for _, m := range t.State.Modules { + c := t.Module.Child(m.Path[1:]) + if c != nil { + continue + } + + log.Printf("[DEBUG] module %s no longer in config\n", modulePrefixStr(m.Path)) + g.Add(&NodeModuleRemoved{PathValue: m.Path}) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go index cda35cb..e528b37 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go @@ -37,7 +37,9 @@ func (t *ResourceCountTransformer) Transform(g *Graph) error { addr.Index = index // Build the abstract node and the concrete one - abstract := &NodeAbstractResource{Addr: addr} + abstract := &NodeAbstractResource{ + Addr: addr, + } var node dag.Vertex = abstract if f := t.Concrete; f != nil { node = f(abstract) diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go index 4f117b4..af6defe 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go @@ -73,9 +73,11 @@ func (t *TargetsTransformer) Transform(g *Graph) error { if _, ok := v.(GraphNodeResource); ok { removable = true } + if vr, ok := v.(RemovableIfNotTargeted); ok { removable = vr.RemoveIfNotTargeted() } + if removable && !targetedNodes.Include(v) { log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) g.Remove(v) @@ -135,7 +137,10 @@ func (t *TargetsTransformer) selectTargetedNodes( } } } + return t.addDependencies(targetedNodes, g) +} +func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (*dag.Set, error) { // Handle nodes that need to be included if their dependencies are included. // This requires multiple passes since we need to catch transitive // dependencies if and only if they are via other nodes that also @@ -157,11 +162,6 @@ func (t *TargetsTransformer) selectTargetedNodes( } dependers = dependers.Filter(func(dv interface{}) bool { - // Can ignore nodes that are already targeted - /*if targetedNodes.Include(dv) { - return false - }*/ - _, ok := dv.(GraphNodeTargetDownstream) return ok }) @@ -180,6 +180,7 @@ func (t *TargetsTransformer) selectTargetedNodes( // depending on in case that informs its decision about whether // it is safe to be targeted. deps := g.DownEdges(v) + depsTargeted := deps.Intersection(targetedNodes) depsUntargeted := deps.Difference(depsTargeted) @@ -193,7 +194,50 @@ func (t *TargetsTransformer) selectTargetedNodes( } } - return targetedNodes, nil + return targetedNodes.Filter(func(dv interface{}) bool { + return filterPartialOutputs(dv, targetedNodes, g) + }), nil +} + +// Outputs may have been included transitively, but if any of their +// dependencies have been pruned they won't be resolvable. +// If nothing depends on the output, and the output is missing any +// dependencies, remove it from the graph. +// This essentially maintains the previous behavior where interpolation in +// outputs would fail silently, but can now surface errors where the output +// is required. +func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool { + // should this just be done with TargetDownstream? + if _, ok := v.(*NodeApplyableOutput); !ok { + return true + } + + dependers := g.UpEdges(v) + for _, d := range dependers.List() { + if _, ok := d.(*NodeCountBoundary); ok { + continue + } + + if !targetedNodes.Include(d) { + // this one is going to be removed, so it doesn't count + continue + } + + // as soon as we see a real dependency, we mark this as + // non-removable + return true + } + + depends := g.DownEdges(v) + + for _, d := range depends.List() { + if !targetedNodes.Include(d) { + log.Printf("[WARN] %s missing targeted dependency %s, removing from the graph", + dag.VertexName(v), dag.VertexName(d)) + return false + } + } + return true } func (t *TargetsTransformer) nodeIsTarget( diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go index 7852bc4..d828c92 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go @@ -1,13 +1,18 @@ package terraform +import "sync" + // MockUIOutput is an implementation of UIOutput that can be used for tests. type MockUIOutput struct { + sync.Mutex OutputCalled bool OutputMessage string OutputFn func(string) } func (o *MockUIOutput) Output(v string) { + o.Lock() + defer o.Unlock() o.OutputCalled = true o.OutputMessage = v if o.OutputFn != nil { diff --git a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go index 700be2a..a42613e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go +++ b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go @@ -1,14 +1,13 @@ package terraform import ( - "fmt" - "runtime" + "github.com/hashicorp/terraform/httpclient" ) -// The standard Terraform User-Agent format -const UserAgent = "Terraform %s (%s)" - // Generate a UserAgent string +// +// Deprecated: Use httpclient.UserAgentString if you are setting your +// own User-Agent header. func UserAgentString() string { - return fmt.Sprintf(UserAgent, VersionString(), runtime.Version()) + return httpclient.UserAgentString() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go index d61b11e..ac73015 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/version.go +++ b/vendor/github.com/hashicorp/terraform/terraform/version.go @@ -1,31 +1,10 @@ package terraform import ( - "fmt" - - "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/version" ) -// The main version number that is being run at the moment. -const Version = "0.10.0" - -// A pre-release marker for the version. If this is "" (empty string) -// then it means that it is a final release. Otherwise, this is a pre-release -// such as "dev" (in development), "beta", "rc1", etc. -var VersionPrerelease = "dev" - -// SemVersion is an instance of version.Version. This has the secondary -// benefit of verifying during tests and init time that our version is a -// proper semantic version, which should always be the case. -var SemVersion = version.Must(version.NewVersion(Version)) - -// VersionHeader is the header name used to send the current terraform version -// in http requests. -const VersionHeader = "Terraform-Version" - +// TODO: update providers to use the version package directly func VersionString() string { - if VersionPrerelease != "" { - return fmt.Sprintf("%s-%s", Version, VersionPrerelease) - } - return Version + return version.String() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go index 3cbbf56..1f43045 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/version_required.go +++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go @@ -6,19 +6,21 @@ import ( "github.com/hashicorp/go-version" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" + + tfversion "github.com/hashicorp/terraform/version" ) -// checkRequiredVersion verifies that any version requirements specified by +// CheckRequiredVersion verifies that any version requirements specified by // the configuration are met. // // This checks the root module as well as any additional version requirements // from child modules. // // This is tested in context_test.go. -func checkRequiredVersion(m *module.Tree) error { +func CheckRequiredVersion(m *module.Tree) error { // Check any children for _, c := range m.Children() { - if err := checkRequiredVersion(c); err != nil { + if err := CheckRequiredVersion(c); err != nil { return err } } @@ -49,7 +51,7 @@ func checkRequiredVersion(m *module.Tree) error { tf.RequiredVersion, err) } - if !cs.Check(SemVersion) { + if !cs.Check(tfversion.SemVer) { return fmt.Errorf( "The currently running version of Terraform doesn't meet the\n"+ "version requirements explicitly specified by the configuration.\n"+ @@ -62,7 +64,7 @@ func checkRequiredVersion(m *module.Tree) error { " Current version: %s", module, tf.RequiredVersion, - SemVersion) + tfversion.SemVer) } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go index cbd78dd..4cfc528 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go @@ -2,7 +2,7 @@ package terraform -import "fmt" +import "strconv" const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" @@ -10,7 +10,7 @@ var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96} func (i walkOperation) String() string { if i >= walkOperation(len(_walkOperation_index)-1) { - return fmt.Sprintf("walkOperation(%d)", i) + return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" } return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go new file mode 100644 index 0000000..2c23f76 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go @@ -0,0 +1,26 @@ +package tfdiags + +type Diagnostic interface { + Severity() Severity + Description() Description + Source() Source +} + +type Severity rune + +//go:generate stringer -type=Severity + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +type Description struct { + Summary string + Detail string +} + +type Source struct { + Subject *SourceRange + Context *SourceRange +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go new file mode 100644 index 0000000..667ba80 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go @@ -0,0 +1,181 @@ +package tfdiags + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl2/hcl" +) + +// Diagnostics is a list of diagnostics. Diagnostics is intended to be used +// where a Go "error" might normally be used, allowing richer information +// to be conveyed (more context, support for warnings). +// +// A nil Diagnostics is a valid, empty diagnostics list, thus allowing +// heap allocation to be avoided in the common case where there are no +// diagnostics to report at all. +type Diagnostics []Diagnostic + +// Append is the main interface for constructing Diagnostics lists, taking +// an existing list (which may be nil) and appending the new objects to it +// after normalizing them to be implementations of Diagnostic. +// +// The usual pattern for a function that natively "speaks" diagnostics is: +// +// // Create a nil Diagnostics at the start of the function +// var diags diag.Diagnostics +// +// // At later points, build on it if errors / warnings occur: +// foo, err := DoSomethingRisky() +// if err != nil { +// diags = diags.Append(err) +// } +// +// // Eventually return the result and diagnostics in place of error +// return result, diags +// +// Append accepts a variety of different diagnostic-like types, including +// native Go errors and HCL diagnostics. It also knows how to unwrap +// a multierror.Error into separate error diagnostics. It can be passed +// another Diagnostics to concatenate the two lists. If given something +// it cannot handle, this function will panic. +func (diags Diagnostics) Append(new ...interface{}) Diagnostics { + for _, item := range new { + if item == nil { + continue + } + + switch ti := item.(type) { + case Diagnostic: + diags = append(diags, ti) + case Diagnostics: + diags = append(diags, ti...) // flatten + case diagnosticsAsError: + diags = diags.Append(ti.Diagnostics) // unwrap + case hcl.Diagnostics: + for _, hclDiag := range ti { + diags = append(diags, hclDiagnostic{hclDiag}) + } + case *hcl.Diagnostic: + diags = append(diags, hclDiagnostic{ti}) + case *multierror.Error: + for _, err := range ti.Errors { + diags = append(diags, nativeError{err}) + } + case error: + switch { + case errwrap.ContainsType(ti, Diagnostics(nil)): + // If we have an errwrap wrapper with a Diagnostics hiding + // inside then we'll unpick it here to get access to the + // individual diagnostics. + diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil))) + case errwrap.ContainsType(ti, hcl.Diagnostics(nil)): + // Likewise, if we have HCL diagnostics we'll unpick that too. + diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil))) + default: + diags = append(diags, nativeError{ti}) + } + default: + panic(fmt.Errorf("can't construct diagnostic(s) from %T", item)) + } + } + + // Given the above, we should never end up with a non-nil empty slice + // here, but we'll make sure of that so callers can rely on empty == nil + if len(diags) == 0 { + return nil + } + + return diags +} + +// HasErrors returns true if any of the diagnostics in the list have +// a severity of Error. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity() == Error { + return true + } + } + return false +} + +// ForRPC returns a version of the receiver that has been simplified so that +// it is friendly to RPC protocols. +// +// Currently this means that it can be serialized with encoding/gob and +// subsequently re-inflated. It may later grow to include other serialization +// formats. +// +// Note that this loses information about the original objects used to +// construct the diagnostics, so e.g. the errwrap API will not work as +// expected on an error-wrapped Diagnostics that came from ForRPC. +func (diags Diagnostics) ForRPC() Diagnostics { + ret := make(Diagnostics, len(diags)) + for i := range diags { + ret[i] = makeRPCFriendlyDiag(diags[i]) + } + return ret +} + +// Err flattens a diagnostics list into a single Go error, or to nil +// if the diagnostics list does not include any error-level diagnostics. +// +// This can be used to smuggle diagnostics through an API that deals in +// native errors, but unfortunately it will lose naked warnings (warnings +// that aren't accompanied by at least one error) since such APIs have no +// mechanism through which to report these. +// +// return result, diags.Error() +func (diags Diagnostics) Err() error { + if !diags.HasErrors() { + return nil + } + return diagnosticsAsError{diags} +} + +type diagnosticsAsError struct { + Diagnostics +} + +func (dae diagnosticsAsError) Error() string { + diags := dae.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + for _, diag := range dae.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped +// diagnostics object can be picked apart by errwrap-aware code. +func (dae diagnosticsAsError) WrappedErrors() []error { + var errs []error + for _, diag := range dae.Diagnostics { + if wrapper, isErr := diag.(nativeError); isErr { + errs = append(errs, wrapper.err) + } + } + return errs +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform/tfdiags/doc.go new file mode 100644 index 0000000..c427879 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/doc.go @@ -0,0 +1,16 @@ +// Package tfdiags is a utility package for representing errors and +// warnings in a manner that allows us to produce good messages for the +// user. +// +// "diag" is short for "diagnostics", and is meant as a general word for +// feedback to a user about potential or actual problems. +// +// A design goal for this package is for it to be able to provide rich +// messaging where possible but to also be pragmatic about dealing with +// generic errors produced by system components that _can't_ provide +// such rich messaging. As a consequence, the main types in this package -- +// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" +// over an error channel and then be unpacked at the other end, so that +// error diagnostics (at least) can transit through APIs that are not +// aware of this package. +package tfdiags diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/error.go b/vendor/github.com/hashicorp/terraform/tfdiags/error.go new file mode 100644 index 0000000..35edc30 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/error.go @@ -0,0 +1,23 @@ +package tfdiags + +// nativeError is a Diagnostic implementation that wraps a normal Go error +type nativeError struct { + err error +} + +var _ Diagnostic = nativeError{} + +func (e nativeError) Severity() Severity { + return Error +} + +func (e nativeError) Description() Description { + return Description{ + Summary: e.err.Error(), + } +} + +func (e nativeError) Source() Source { + // No source information available for a native error + return Source{} +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go new file mode 100644 index 0000000..24851f4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go @@ -0,0 +1,77 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic +type hclDiagnostic struct { + diag *hcl.Diagnostic +} + +var _ Diagnostic = hclDiagnostic{} + +func (d hclDiagnostic) Severity() Severity { + switch d.diag.Severity { + case hcl.DiagWarning: + return Warning + default: + return Error + } +} + +func (d hclDiagnostic) Description() Description { + return Description{ + Summary: d.diag.Summary, + Detail: d.diag.Detail, + } +} + +func (d hclDiagnostic) Source() Source { + var ret Source + if d.diag.Subject != nil { + rng := SourceRangeFromHCL(*d.diag.Subject) + ret.Subject = &rng + } + if d.diag.Context != nil { + rng := SourceRangeFromHCL(*d.diag.Context) + ret.Context = &rng + } + return ret +} + +// SourceRangeFromHCL constructs a SourceRange from the corresponding range +// type within the HCL package. +func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { + return SourceRange{ + Filename: hclRange.Filename, + Start: SourcePos{ + Line: hclRange.Start.Line, + Column: hclRange.Start.Column, + Byte: hclRange.Start.Byte, + }, + End: SourcePos{ + Line: hclRange.End.Line, + Column: hclRange.End.Column, + Byte: hclRange.End.Byte, + }, + } +} + +// ToHCL constructs a HCL Range from the receiving SourceRange. This is the +// opposite of SourceRangeFromHCL. +func (r SourceRange) ToHCL() hcl.Range { + return hcl.Range{ + Filename: r.Filename, + Start: hcl.Pos{ + Line: r.Start.Line, + Column: r.Start.Column, + Byte: r.Start.Byte, + }, + End: hcl.Pos{ + Line: r.End.Line, + Column: r.End.Column, + Byte: r.End.Byte, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go new file mode 100644 index 0000000..6cc95cc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go @@ -0,0 +1,53 @@ +package tfdiags + +import ( + "encoding/gob" +) + +type rpcFriendlyDiag struct { + Severity_ Severity + Summary_ string + Detail_ string + Subject_ *SourceRange + Context_ *SourceRange +} + +// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to +// RPC. +// +// In particular, it currently returns an object that can be serialized and +// later re-inflated using gob. This definition may grow to include other +// serializations later. +func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { + desc := diag.Description() + source := diag.Source() + return &rpcFriendlyDiag{ + Severity_: diag.Severity(), + Summary_: desc.Summary, + Detail_: desc.Detail, + Subject_: source.Subject, + Context_: source.Context, + } +} + +func (d *rpcFriendlyDiag) Severity() Severity { + return d.Severity_ +} + +func (d *rpcFriendlyDiag) Description() Description { + return Description{ + Summary: d.Summary_, + Detail: d.Detail_, + } +} + +func (d *rpcFriendlyDiag) Source() Source { + return Source{ + Subject: d.Subject_, + Context: d.Context_, + } +} + +func init() { + gob.Register((*rpcFriendlyDiag)(nil)) +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go new file mode 100644 index 0000000..0b1249b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go @@ -0,0 +1,21 @@ +// Code generated by "stringer -type=Severity"; DO NOT EDIT. + +package tfdiags + +import "strconv" + +const ( + _Severity_name_0 = "Error" + _Severity_name_1 = "Warning" +) + +func (i Severity) String() string { + switch { + case i == 69: + return _Severity_name_0 + case i == 87: + return _Severity_name_1 + default: + return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go new file mode 100644 index 0000000..fb3ac98 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go @@ -0,0 +1,25 @@ +package tfdiags + +type simpleWarning string + +var _ Diagnostic = simpleWarning("") + +// SimpleWarning constructs a simple (summary-only) warning diagnostic. +func SimpleWarning(msg string) Diagnostic { + return simpleWarning(msg) +} + +func (e simpleWarning) Severity() Severity { + return Warning +} + +func (e simpleWarning) Description() Description { + return Description{ + Summary: string(e), + } +} + +func (e simpleWarning) Source() Source { + // No source information available for a native error + return Source{} +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go new file mode 100644 index 0000000..3031168 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go @@ -0,0 +1,35 @@ +package tfdiags + +import ( + "fmt" + "os" + "path/filepath" +) + +type SourceRange struct { + Filename string + Start, End SourcePos +} + +type SourcePos struct { + Line, Column, Byte int +} + +// StartString returns a string representation of the start of the range, +// including the filename and the line and column numbers. +func (r SourceRange) StartString() string { + filename := r.Filename + + // We'll try to relative-ize our filename here so it's less verbose + // in the common case of being in the current working directory. If not, + // we'll just show the full path. + wd, err := os.Getwd() + if err == nil { + relFn, err := filepath.Rel(wd, filename) + if err == nil { + filename = relFn + } + } + + return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column) +} diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go new file mode 100644 index 0000000..b21b297 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/version/version.go @@ -0,0 +1,36 @@ +// The version package provides a location to set the release versions for all +// packages to consume, without creating import cycles. +// +// This package should not import any other terraform packages. +package version + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +var Version = "0.11.12" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var Prerelease = "dev" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer = version.Must(version.NewVersion(Version)) + +// Header is the header name used to send the current terraform version +// in http requests. +const Header = "Terraform-Version" + +// String returns the complete version string, including prerelease +func String() string { + if Prerelease != "" { + return fmt.Sprintf("%s-%s", Version, Prerelease) + } + return Version +} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 0000000..65dc692 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 0000000..74845de --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,37 @@ +# go-isatty + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 0000000..17d4f90 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go new file mode 100644 index 0000000..83c5887 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go @@ -0,0 +1,9 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 0000000..42f2514 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go new file mode 100644 index 0000000..9d24bac --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 0000000..1f0c6bf --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 0000000..83c398b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,19 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") +var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/mitchellh/cli/.travis.yml b/vendor/github.com/mitchellh/cli/.travis.yml new file mode 100644 index 0000000..974234b --- /dev/null +++ b/vendor/github.com/mitchellh/cli/.travis.yml @@ -0,0 +1,13 @@ +sudo: false + +language: go + +go: + - 1.8 + - 1.9 + +branches: + only: + - master + +script: make updatedeps test testrace diff --git a/vendor/github.com/mitchellh/cli/LICENSE b/vendor/github.com/mitchellh/cli/LICENSE new file mode 100644 index 0000000..c33dcc7 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/mitchellh/cli/Makefile b/vendor/github.com/mitchellh/cli/Makefile new file mode 100644 index 0000000..4874b00 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/Makefile @@ -0,0 +1,20 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code +test: + go list $(TEST) | xargs -n1 go test -timeout=60s -parallel=10 $(TESTARGS) + +# testrace runs the race checker +testrace: + go list $(TEST) | xargs -n1 go test -race $(TESTARGS) + +# updatedeps installs all the dependencies to run and build +updatedeps: + go list ./... \ + | xargs go list -f '{{ join .Deps "\n" }}{{ printf "\n" }}{{ join .TestImports "\n" }}' \ + | grep -v github.com/mitchellh/cli \ + | xargs go get -f -u -v + +.PHONY: test testrace updatedeps diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md new file mode 100644 index 0000000..8f02cdd --- /dev/null +++ b/vendor/github.com/mitchellh/cli/README.md @@ -0,0 +1,67 @@ +# Go CLI Library [![GoDoc](https://godoc.org/github.com/mitchellh/cli?status.png)](https://godoc.org/github.com/mitchellh/cli) + +cli is a library for implementing powerful command-line interfaces in Go. +cli is the library that powers the CLI for +[Packer](https://github.com/mitchellh/packer), +[Serf](https://github.com/hashicorp/serf), +[Consul](https://github.com/hashicorp/consul), +[Vault](https://github.com/hashicorp/vault), +[Terraform](https://github.com/hashicorp/terraform), and +[Nomad](https://github.com/hashicorp/nomad). + +## Features + +* Easy sub-command based CLIs: `cli foo`, `cli bar`, etc. + +* Support for nested subcommands such as `cli foo bar`. + +* Optional support for default subcommands so `cli` does something + other than error. + +* Support for shell autocompletion of subcommands, flags, and arguments + with callbacks in Go. You don't need to write any shell code. + +* Automatic help generation for listing subcommands + +* Automatic help flag recognition of `-h`, `--help`, etc. + +* Automatic version flag recognition of `-v`, `--version`. + +* Helpers for interacting with the terminal, such as outputting information, + asking for input, etc. These are optional, you can always interact with the + terminal however you choose. + +* Use of Go interfaces/types makes augmenting various parts of the library a + piece of cake. + +## Example + +Below is a simple example of creating and running a CLI + +```go +package main + +import ( + "log" + "os" + + "github.com/mitchellh/cli" +) + +func main() { + c := cli.NewCLI("app", "1.0.0") + c.Args = os.Args[1:] + c.Commands = map[string]cli.CommandFactory{ + "foo": fooCommandFactory, + "bar": barCommandFactory, + } + + exitStatus, err := c.Run() + if err != nil { + log.Println(err) + } + + os.Exit(exitStatus) +} +``` + diff --git a/vendor/github.com/mitchellh/cli/autocomplete.go b/vendor/github.com/mitchellh/cli/autocomplete.go new file mode 100644 index 0000000..3bec625 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/autocomplete.go @@ -0,0 +1,43 @@ +package cli + +import ( + "github.com/posener/complete/cmd/install" +) + +// autocompleteInstaller is an interface to be implemented to perform the +// autocomplete installation and uninstallation with a CLI. +// +// This interface is not exported because it only exists for unit tests +// to be able to test that the installation is called properly. +type autocompleteInstaller interface { + Install(string) error + Uninstall(string) error +} + +// realAutocompleteInstaller uses the real install package to do the +// install/uninstall. +type realAutocompleteInstaller struct{} + +func (i *realAutocompleteInstaller) Install(cmd string) error { + return install.Install(cmd) +} + +func (i *realAutocompleteInstaller) Uninstall(cmd string) error { + return install.Uninstall(cmd) +} + +// mockAutocompleteInstaller is used for tests to record the install/uninstall. +type mockAutocompleteInstaller struct { + InstallCalled bool + UninstallCalled bool +} + +func (i *mockAutocompleteInstaller) Install(cmd string) error { + i.InstallCalled = true + return nil +} + +func (i *mockAutocompleteInstaller) Uninstall(cmd string) error { + i.UninstallCalled = true + return nil +} diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go new file mode 100644 index 0000000..a25a582 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/cli.go @@ -0,0 +1,715 @@ +package cli + +import ( + "fmt" + "io" + "os" + "regexp" + "sort" + "strings" + "sync" + "text/template" + + "github.com/armon/go-radix" + "github.com/posener/complete" +) + +// CLI contains the state necessary to run subcommands and parse the +// command line arguments. +// +// CLI also supports nested subcommands, such as "cli foo bar". To use +// nested subcommands, the key in the Commands mapping below contains the +// full subcommand. In this example, it would be "foo bar". +// +// If you use a CLI with nested subcommands, some semantics change due to +// ambiguities: +// +// * We use longest prefix matching to find a matching subcommand. This +// means if you register "foo bar" and the user executes "cli foo qux", +// the "foo" command will be executed with the arg "qux". It is up to +// you to handle these args. One option is to just return the special +// help return code `RunResultHelp` to display help and exit. +// +// * The help flag "-h" or "-help" will look at all args to determine +// the help function. For example: "otto apps list -h" will show the +// help for "apps list" but "otto apps -h" will show it for "apps". +// In the normal CLI, only the first subcommand is used. +// +// * The help flag will list any subcommands that a command takes +// as well as the command's help itself. If there are no subcommands, +// it will note this. If the CLI itself has no subcommands, this entire +// section is omitted. +// +// * Any parent commands that don't exist are automatically created as +// no-op commands that just show help for other subcommands. For example, +// if you only register "foo bar", then "foo" is automatically created. +// +type CLI struct { + // Args is the list of command-line arguments received excluding + // the name of the app. For example, if the command "./cli foo bar" + // was invoked, then Args should be []string{"foo", "bar"}. + Args []string + + // Commands is a mapping of subcommand names to a factory function + // for creating that Command implementation. If there is a command + // with a blank string "", then it will be used as the default command + // if no subcommand is specified. + // + // If the key has a space in it, this will create a nested subcommand. + // For example, if the key is "foo bar", then to access it our CLI + // must be accessed with "./cli foo bar". See the docs for CLI for + // notes on how this changes some other behavior of the CLI as well. + // + // The factory should be as cheap as possible, ideally only allocating + // a struct. The factory may be called multiple times in the course + // of a command execution and certain events such as help require the + // instantiation of all commands. Expensive initialization should be + // deferred to function calls within the interface implementation. + Commands map[string]CommandFactory + + // HiddenCommands is a list of commands that are "hidden". Hidden + // commands are not given to the help function callback and do not + // show up in autocomplete. The values in the slice should be equivalent + // to the keys in the command map. + HiddenCommands []string + + // Name defines the name of the CLI. + Name string + + // Version of the CLI. + Version string + + // Autocomplete enables or disables subcommand auto-completion support. + // This is enabled by default when NewCLI is called. Otherwise, this + // must enabled explicitly. + // + // Autocomplete requires the "Name" option to be set on CLI. This name + // should be set exactly to the binary name that is autocompleted. + // + // Autocompletion is supported via the github.com/posener/complete + // library. This library supports both bash and zsh. To add support + // for other shells, please see that library. + // + // AutocompleteInstall and AutocompleteUninstall are the global flag + // names for installing and uninstalling the autocompletion handlers + // for the user's shell. The flag should omit the hyphen(s) in front of + // the value. Both single and double hyphens will automatically be supported + // for the flag name. These default to `autocomplete-install` and + // `autocomplete-uninstall` respectively. + // + // AutocompleteNoDefaultFlags is a boolean which controls if the default auto- + // complete flags like -help and -version are added to the output. + // + // AutocompleteGlobalFlags are a mapping of global flags for + // autocompletion. The help and version flags are automatically added. + Autocomplete bool + AutocompleteInstall string + AutocompleteUninstall string + AutocompleteNoDefaultFlags bool + AutocompleteGlobalFlags complete.Flags + autocompleteInstaller autocompleteInstaller // For tests + + // HelpFunc and HelpWriter are used to output help information, if + // requested. + // + // HelpFunc is the function called to generate the generic help + // text that is shown if help must be shown for the CLI that doesn't + // pertain to a specific command. + // + // HelpWriter is the Writer where the help text is outputted to. If + // not specified, it will default to Stderr. + HelpFunc HelpFunc + HelpWriter io.Writer + + //--------------------------------------------------------------- + // Internal fields set automatically + + once sync.Once + autocomplete *complete.Complete + commandTree *radix.Tree + commandNested bool + commandHidden map[string]struct{} + subcommand string + subcommandArgs []string + topFlags []string + + // These are true when special global flags are set. We can/should + // probably use a bitset for this one day. + isHelp bool + isVersion bool + isAutocompleteInstall bool + isAutocompleteUninstall bool +} + +// NewClI returns a new CLI instance with sensible defaults. +func NewCLI(app, version string) *CLI { + return &CLI{ + Name: app, + Version: version, + HelpFunc: BasicHelpFunc(app), + Autocomplete: true, + } + +} + +// IsHelp returns whether or not the help flag is present within the +// arguments. +func (c *CLI) IsHelp() bool { + c.once.Do(c.init) + return c.isHelp +} + +// IsVersion returns whether or not the version flag is present within the +// arguments. +func (c *CLI) IsVersion() bool { + c.once.Do(c.init) + return c.isVersion +} + +// Run runs the actual CLI based on the arguments given. +func (c *CLI) Run() (int, error) { + c.once.Do(c.init) + + // If this is a autocompletion request, satisfy it. This must be called + // first before anything else since its possible to be autocompleting + // -help or -version or other flags and we want to show completions + // and not actually write the help or version. + if c.Autocomplete && c.autocomplete.Complete() { + return 0, nil + } + + // Just show the version and exit if instructed. + if c.IsVersion() && c.Version != "" { + c.HelpWriter.Write([]byte(c.Version + "\n")) + return 0, nil + } + + // Just print the help when only '-h' or '--help' is passed. + if c.IsHelp() && c.Subcommand() == "" { + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.Subcommand())) + "\n")) + return 0, nil + } + + // If we're attempting to install or uninstall autocomplete then handle + if c.Autocomplete { + // Autocomplete requires the "Name" to be set so that we know what + // command to setup the autocomplete on. + if c.Name == "" { + return 1, fmt.Errorf( + "internal error: CLI.Name must be specified for autocomplete to work") + } + + // If both install and uninstall flags are specified, then error + if c.isAutocompleteInstall && c.isAutocompleteUninstall { + return 1, fmt.Errorf( + "Either the autocomplete install or uninstall flag may " + + "be specified, but not both.") + } + + // If the install flag is specified, perform the install or uninstall + if c.isAutocompleteInstall { + if err := c.autocompleteInstaller.Install(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + + if c.isAutocompleteUninstall { + if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + } + + // Attempt to get the factory function for creating the command + // implementation. If the command is invalid or blank, it is an error. + raw, ok := c.commandTree.Get(c.Subcommand()) + if !ok { + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) + return 127, nil + } + + command, err := raw.(CommandFactory)() + if err != nil { + return 1, err + } + + // If we've been instructed to just print the help, then print it + if c.IsHelp() { + c.commandHelp(command) + return 0, nil + } + + // If there is an invalid flag, then error + if len(c.topFlags) > 0 { + c.HelpWriter.Write([]byte( + "Invalid flags before the subcommand. If these flags are for\n" + + "the subcommand, please put them after the subcommand.\n\n")) + c.commandHelp(command) + return 1, nil + } + + code := command.Run(c.SubcommandArgs()) + if code == RunResultHelp { + // Requesting help + c.commandHelp(command) + return 1, nil + } + + return code, nil +} + +// Subcommand returns the subcommand that the CLI would execute. For +// example, a CLI from "--version version --help" would return a Subcommand +// of "version" +func (c *CLI) Subcommand() string { + c.once.Do(c.init) + return c.subcommand +} + +// SubcommandArgs returns the arguments that will be passed to the +// subcommand. +func (c *CLI) SubcommandArgs() []string { + c.once.Do(c.init) + return c.subcommandArgs +} + +// subcommandParent returns the parent of this subcommand, if there is one. +// If there isn't on, "" is returned. +func (c *CLI) subcommandParent() string { + // Get the subcommand, if it is "" alread just return + sub := c.Subcommand() + if sub == "" { + return sub + } + + // Clear any trailing spaces and find the last space + sub = strings.TrimRight(sub, " ") + idx := strings.LastIndex(sub, " ") + + if idx == -1 { + // No space means our parent is root + return "" + } + + return sub[:idx] +} + +func (c *CLI) init() { + if c.HelpFunc == nil { + c.HelpFunc = BasicHelpFunc("app") + + if c.Name != "" { + c.HelpFunc = BasicHelpFunc(c.Name) + } + } + + if c.HelpWriter == nil { + c.HelpWriter = os.Stderr + } + + // Build our hidden commands + if len(c.HiddenCommands) > 0 { + c.commandHidden = make(map[string]struct{}) + for _, h := range c.HiddenCommands { + c.commandHidden[h] = struct{}{} + } + } + + // Build our command tree + c.commandTree = radix.New() + c.commandNested = false + for k, v := range c.Commands { + k = strings.TrimSpace(k) + c.commandTree.Insert(k, v) + if strings.ContainsRune(k, ' ') { + c.commandNested = true + } + } + + // Go through the key and fill in any missing parent commands + if c.commandNested { + var walkFn radix.WalkFn + toInsert := make(map[string]struct{}) + walkFn = func(k string, raw interface{}) bool { + idx := strings.LastIndex(k, " ") + if idx == -1 { + // If there is no space, just ignore top level commands + return false + } + + // Trim up to that space so we can get the expected parent + k = k[:idx] + if _, ok := c.commandTree.Get(k); ok { + // Yay we have the parent! + return false + } + + // We're missing the parent, so let's insert this + toInsert[k] = struct{}{} + + // Call the walk function recursively so we check this one too + return walkFn(k, nil) + } + + // Walk! + c.commandTree.Walk(walkFn) + + // Insert any that we're missing + for k := range toInsert { + var f CommandFactory = func() (Command, error) { + return &MockCommand{ + HelpText: "This command is accessed by using one of the subcommands below.", + RunResult: RunResultHelp, + }, nil + } + + c.commandTree.Insert(k, f) + } + } + + // Setup autocomplete if we have it enabled. We have to do this after + // the command tree is setup so we can use the radix tree to easily find + // all subcommands. + if c.Autocomplete { + c.initAutocomplete() + } + + // Process the args + c.processArgs() +} + +func (c *CLI) initAutocomplete() { + if c.AutocompleteInstall == "" { + c.AutocompleteInstall = defaultAutocompleteInstall + } + + if c.AutocompleteUninstall == "" { + c.AutocompleteUninstall = defaultAutocompleteUninstall + } + + if c.autocompleteInstaller == nil { + c.autocompleteInstaller = &realAutocompleteInstaller{} + } + + // Build the root command + cmd := c.initAutocompleteSub("") + + // For the root, we add the global flags to the "Flags". This way + // they don't show up on every command. + if !c.AutocompleteNoDefaultFlags { + cmd.Flags = map[string]complete.Predictor{ + "-" + c.AutocompleteInstall: complete.PredictNothing, + "-" + c.AutocompleteUninstall: complete.PredictNothing, + "-help": complete.PredictNothing, + "-version": complete.PredictNothing, + } + } + cmd.GlobalFlags = c.AutocompleteGlobalFlags + + c.autocomplete = complete.New(c.Name, cmd) +} + +// initAutocompleteSub creates the complete.Command for a subcommand with +// the given prefix. This will continue recursively for all subcommands. +// The prefix "" (empty string) can be used for the root command. +func (c *CLI) initAutocompleteSub(prefix string) complete.Command { + var cmd complete.Command + walkFn := func(k string, raw interface{}) bool { + // Keep track of the full key so that we can nest further if necessary + fullKey := k + + if len(prefix) > 0 { + // If we have a prefix, trim the prefix + 1 (for the space) + // Example: turns "sub one" to "one" with prefix "sub" + k = k[len(prefix)+1:] + } + + if idx := strings.Index(k, " "); idx >= 0 { + // If there is a space, we trim up to the space. This turns + // "sub sub2 sub3" into "sub". The prefix trim above will + // trim our current depth properly. + k = k[:idx] + } + + if _, ok := cmd.Sub[k]; ok { + // If we already tracked this subcommand then ignore + return false + } + + // If the command is hidden, don't record it at all + if _, ok := c.commandHidden[fullKey]; ok { + return false + } + + if cmd.Sub == nil { + cmd.Sub = complete.Commands(make(map[string]complete.Command)) + } + subCmd := c.initAutocompleteSub(fullKey) + + // Instantiate the command so that we can check if the command is + // a CommandAutocomplete implementation. If there is an error + // creating the command, we just ignore it since that will be caught + // later. + impl, err := raw.(CommandFactory)() + if err != nil { + impl = nil + } + + // Check if it implements ComandAutocomplete. If so, setup the autocomplete + if c, ok := impl.(CommandAutocomplete); ok { + subCmd.Args = c.AutocompleteArgs() + subCmd.Flags = c.AutocompleteFlags() + } + + cmd.Sub[k] = subCmd + return false + } + + walkPrefix := prefix + if walkPrefix != "" { + walkPrefix += " " + } + + c.commandTree.WalkPrefix(walkPrefix, walkFn) + return cmd +} + +func (c *CLI) commandHelp(command Command) { + // Get the template to use + tpl := strings.TrimSpace(defaultHelpTemplate) + if t, ok := command.(CommandHelpTemplate); ok { + tpl = t.HelpTemplate() + } + if !strings.HasSuffix(tpl, "\n") { + tpl += "\n" + } + + // Parse it + t, err := template.New("root").Parse(tpl) + if err != nil { + t = template.Must(template.New("root").Parse(fmt.Sprintf( + "Internal error! Failed to parse command help template: %s\n", err))) + } + + // Template data + data := map[string]interface{}{ + "Name": c.Name, + "Help": command.Help(), + } + + // Build subcommand list if we have it + var subcommandsTpl []map[string]interface{} + if c.commandNested { + // Get the matching keys + subcommands := c.helpCommands(c.Subcommand()) + keys := make([]string, 0, len(subcommands)) + for k := range subcommands { + keys = append(keys, k) + } + + // Sort the keys + sort.Strings(keys) + + // Figure out the padding length + var longest int + for _, k := range keys { + if v := len(k); v > longest { + longest = v + } + } + + // Go through and create their structures + subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands)) + for _, k := range keys { + // Get the command + raw, ok := subcommands[k] + if !ok { + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Error getting subcommand %q", k))) + } + sub, err := raw() + if err != nil { + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Error instantiating %q: %s", k, err))) + } + + // Find the last space and make sure we only include that last part + name := k + if idx := strings.LastIndex(k, " "); idx > -1 { + name = name[idx+1:] + } + + subcommandsTpl = append(subcommandsTpl, map[string]interface{}{ + "Name": name, + "NameAligned": name + strings.Repeat(" ", longest-len(k)), + "Help": sub.Help(), + "Synopsis": sub.Synopsis(), + }) + } + } + data["Subcommands"] = subcommandsTpl + + // Write + err = t.Execute(c.HelpWriter, data) + if err == nil { + return + } + + // An error, just output... + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Internal error rendering help: %s", err))) +} + +// helpCommands returns the subcommands for the HelpFunc argument. +// This will only contain immediate subcommands. +func (c *CLI) helpCommands(prefix string) map[string]CommandFactory { + // If our prefix isn't empty, make sure it ends in ' ' + if prefix != "" && prefix[len(prefix)-1] != ' ' { + prefix += " " + } + + // Get all the subkeys of this command + var keys []string + c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { + // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar" + if !strings.Contains(k[len(prefix):], " ") { + keys = append(keys, k) + } + + return false + }) + + // For each of the keys return that in the map + result := make(map[string]CommandFactory, len(keys)) + for _, k := range keys { + raw, ok := c.commandTree.Get(k) + if !ok { + // We just got it via WalkPrefix above, so we just panic + panic("not found: " + k) + } + + // If this is a hidden command, don't show it + if _, ok := c.commandHidden[k]; ok { + continue + } + + result[k] = raw.(CommandFactory) + } + + return result +} + +func (c *CLI) processArgs() { + for i, arg := range c.Args { + if arg == "--" { + break + } + + // Check for help flags. + if arg == "-h" || arg == "-help" || arg == "--help" { + c.isHelp = true + continue + } + + // Check for autocomplete flags + if c.Autocomplete { + if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall { + c.isAutocompleteInstall = true + continue + } + + if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall { + c.isAutocompleteUninstall = true + continue + } + } + + if c.subcommand == "" { + // Check for version flags if not in a subcommand. + if arg == "-v" || arg == "-version" || arg == "--version" { + c.isVersion = true + continue + } + + if arg != "" && arg[0] == '-' { + // Record the arg... + c.topFlags = append(c.topFlags, arg) + } + } + + // If we didn't find a subcommand yet and this is the first non-flag + // argument, then this is our subcommand. + if c.subcommand == "" && arg != "" && arg[0] != '-' { + c.subcommand = arg + if c.commandNested { + // If the command has a space in it, then it is invalid. + // Set a blank command so that it fails. + if strings.ContainsRune(arg, ' ') { + c.subcommand = "" + return + } + + // Determine the argument we look to to end subcommands. + // We look at all arguments until one has a space. This + // disallows commands like: ./cli foo "bar baz". An argument + // with a space is always an argument. + j := 0 + for k, v := range c.Args[i:] { + if strings.ContainsRune(v, ' ') { + break + } + + j = i + k + 1 + } + + // Nested CLI, the subcommand is actually the entire + // arg list up to a flag that is still a valid subcommand. + searchKey := strings.Join(c.Args[i:j], " ") + k, _, ok := c.commandTree.LongestPrefix(searchKey) + if ok { + // k could be a prefix that doesn't contain the full + // command such as "foo" instead of "foobar", so we + // need to verify that we have an entire key. To do that, + // we look for an ending in a space or an end of string. + reVerify := regexp.MustCompile(regexp.QuoteMeta(k) + `( |$)`) + if reVerify.MatchString(searchKey) { + c.subcommand = k + i += strings.Count(k, " ") + } + } + } + + // The remaining args the subcommand arguments + c.subcommandArgs = c.Args[i+1:] + } + } + + // If we never found a subcommand and support a default command, then + // switch to using that. + if c.subcommand == "" { + if _, ok := c.Commands[""]; ok { + args := c.topFlags + args = append(args, c.subcommandArgs...) + c.topFlags = nil + c.subcommandArgs = args + } + } +} + +// defaultAutocompleteInstall and defaultAutocompleteUninstall are the +// default values for the autocomplete install and uninstall flags. +const defaultAutocompleteInstall = "autocomplete-install" +const defaultAutocompleteUninstall = "autocomplete-uninstall" + +const defaultHelpTemplate = ` +{{.Help}}{{if gt (len .Subcommands) 0}} + +Subcommands: +{{- range $value := .Subcommands }} + {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }} +{{- end }} +` diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go new file mode 100644 index 0000000..bed11fa --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command.go @@ -0,0 +1,67 @@ +package cli + +import ( + "github.com/posener/complete" +) + +const ( + // RunResultHelp is a value that can be returned from Run to signal + // to the CLI to render the help output. + RunResultHelp = -18511 +) + +// A command is a runnable sub-command of a CLI. +type Command interface { + // Help should return long-form help text that includes the command-line + // usage, a brief few sentences explaining the function of the command, + // and the complete list of flags the command accepts. + Help() string + + // Run should run the actual command with the given CLI instance and + // command-line arguments. It should return the exit status when it is + // finished. + // + // There are a handful of special exit codes this can return documented + // above that change behavior. + Run(args []string) int + + // Synopsis should return a one-line, short synopsis of the command. + // This should be less than 50 characters ideally. + Synopsis() string +} + +// CommandAutocomplete is an extension of Command that enables fine-grained +// autocompletion. Subcommand autocompletion will work even if this interface +// is not implemented. By implementing this interface, more advanced +// autocompletion is enabled. +type CommandAutocomplete interface { + // AutocompleteArgs returns the argument predictor for this command. + // If argument completion is not supported, this should return + // complete.PredictNothing. + AutocompleteArgs() complete.Predictor + + // AutocompleteFlags returns a mapping of supported flags and autocomplete + // options for this command. The map key for the Flags map should be the + // complete flag such as "-foo" or "--foo". + AutocompleteFlags() complete.Flags +} + +// CommandHelpTemplate is an extension of Command that also has a function +// for returning a template for the help rather than the help itself. In +// this scenario, both Help and HelpTemplate should be implemented. +// +// If CommandHelpTemplate isn't implemented, the Help is output as-is. +type CommandHelpTemplate interface { + // HelpTemplate is the template in text/template format to use for + // displaying the Help. The keys available are: + // + // * ".Help" - The help text itself + // * ".Subcommands" + // + HelpTemplate() string +} + +// CommandFactory is a type of function that is a factory for commands. +// We need a factory because we may need to setup some state on the +// struct that implements the command itself. +type CommandFactory func() (Command, error) diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go new file mode 100644 index 0000000..7a584b7 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command_mock.go @@ -0,0 +1,63 @@ +package cli + +import ( + "github.com/posener/complete" +) + +// MockCommand is an implementation of Command that can be used for tests. +// It is publicly exported from this package in case you want to use it +// externally. +type MockCommand struct { + // Settable + HelpText string + RunResult int + SynopsisText string + + // Set by the command + RunCalled bool + RunArgs []string +} + +func (c *MockCommand) Help() string { + return c.HelpText +} + +func (c *MockCommand) Run(args []string) int { + c.RunCalled = true + c.RunArgs = args + + return c.RunResult +} + +func (c *MockCommand) Synopsis() string { + return c.SynopsisText +} + +// MockCommandAutocomplete is an implementation of CommandAutocomplete. +type MockCommandAutocomplete struct { + MockCommand + + // Settable + AutocompleteArgsValue complete.Predictor + AutocompleteFlagsValue complete.Flags +} + +func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor { + return c.AutocompleteArgsValue +} + +func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags { + return c.AutocompleteFlagsValue +} + +// MockCommandHelpTemplate is an implementation of CommandHelpTemplate. +type MockCommandHelpTemplate struct { + MockCommand + + // Settable + HelpTemplateText string +} + +func (c *MockCommandHelpTemplate) HelpTemplate() string { + return c.HelpTemplateText +} diff --git a/vendor/github.com/mitchellh/cli/help.go b/vendor/github.com/mitchellh/cli/help.go new file mode 100644 index 0000000..f5ca58f --- /dev/null +++ b/vendor/github.com/mitchellh/cli/help.go @@ -0,0 +1,79 @@ +package cli + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" +) + +// HelpFunc is the type of the function that is responsible for generating +// the help output when the CLI must show the general help text. +type HelpFunc func(map[string]CommandFactory) string + +// BasicHelpFunc generates some basic help output that is usually good enough +// for most CLI applications. +func BasicHelpFunc(app string) HelpFunc { + return func(commands map[string]CommandFactory) string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf( + "Usage: %s [--version] [--help] []\n\n", + app)) + buf.WriteString("Available commands are:\n") + + // Get the list of keys so we can sort them, and also get the maximum + // key length so they can be aligned properly. + keys := make([]string, 0, len(commands)) + maxKeyLen := 0 + for key := range commands { + if len(key) > maxKeyLen { + maxKeyLen = len(key) + } + + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + commandFunc, ok := commands[key] + if !ok { + // This should never happen since we JUST built the list of + // keys. + panic("command not found: " + key) + } + + command, err := commandFunc() + if err != nil { + log.Printf("[ERR] cli: Command '%s' failed to load: %s", + key, err) + continue + } + + key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) + buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) + } + + return buf.String() + } +} + +// FilteredHelpFunc will filter the commands to only include the keys +// in the include parameter. +func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc { + return func(commands map[string]CommandFactory) string { + set := make(map[string]struct{}) + for _, k := range include { + set[k] = struct{}{} + } + + filtered := make(map[string]CommandFactory) + for k, f := range commands { + if _, ok := set[k]; ok { + filtered[k] = f + } + } + + return f(filtered) + } +} diff --git a/vendor/github.com/mitchellh/cli/ui.go b/vendor/github.com/mitchellh/cli/ui.go new file mode 100644 index 0000000..a2d6f94 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui.go @@ -0,0 +1,187 @@ +package cli + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/signal" + "strings" + + "github.com/bgentry/speakeasy" + "github.com/mattn/go-isatty" +) + +// Ui is an interface for interacting with the terminal, or "interface" +// of a CLI. This abstraction doesn't have to be used, but helps provide +// a simple, layerable way to manage user interactions. +type Ui interface { + // Ask asks the user for input using the given query. The response is + // returned as the given string, or an error. + Ask(string) (string, error) + + // AskSecret asks the user for input using the given query, but does not echo + // the keystrokes to the terminal. + AskSecret(string) (string, error) + + // Output is called for normal standard output. + Output(string) + + // Info is called for information related to the previous output. + // In general this may be the exact same as Output, but this gives + // Ui implementors some flexibility with output formats. + Info(string) + + // Error is used for any error messages that might appear on standard + // error. + Error(string) + + // Warn is used for any warning messages that might appear on standard + // error. + Warn(string) +} + +// BasicUi is an implementation of Ui that just outputs to the given +// writer. This UI is not threadsafe by default, but you can wrap it +// in a ConcurrentUi to make it safe. +type BasicUi struct { + Reader io.Reader + Writer io.Writer + ErrorWriter io.Writer +} + +func (u *BasicUi) Ask(query string) (string, error) { + return u.ask(query, false) +} + +func (u *BasicUi) AskSecret(query string) (string, error) { + return u.ask(query, true) +} + +func (u *BasicUi) ask(query string, secret bool) (string, error) { + if _, err := fmt.Fprint(u.Writer, query+" "); err != nil { + return "", err + } + + // Register for interrupts so that we can catch it and immediately + // return... + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer signal.Stop(sigCh) + + // Ask for input in a go-routine so that we can ignore it. + errCh := make(chan error, 1) + lineCh := make(chan string, 1) + go func() { + var line string + var err error + if secret && isatty.IsTerminal(os.Stdin.Fd()) { + line, err = speakeasy.Ask("") + } else { + r := bufio.NewReader(u.Reader) + line, err = r.ReadString('\n') + } + if err != nil { + errCh <- err + return + } + + lineCh <- strings.TrimRight(line, "\r\n") + }() + + select { + case err := <-errCh: + return "", err + case line := <-lineCh: + return line, nil + case <-sigCh: + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(u.Writer) + + return "", errors.New("interrupted") + } +} + +func (u *BasicUi) Error(message string) { + w := u.Writer + if u.ErrorWriter != nil { + w = u.ErrorWriter + } + + fmt.Fprint(w, message) + fmt.Fprint(w, "\n") +} + +func (u *BasicUi) Info(message string) { + u.Output(message) +} + +func (u *BasicUi) Output(message string) { + fmt.Fprint(u.Writer, message) + fmt.Fprint(u.Writer, "\n") +} + +func (u *BasicUi) Warn(message string) { + u.Error(message) +} + +// PrefixedUi is an implementation of Ui that prefixes messages. +type PrefixedUi struct { + AskPrefix string + AskSecretPrefix string + OutputPrefix string + InfoPrefix string + ErrorPrefix string + WarnPrefix string + Ui Ui +} + +func (u *PrefixedUi) Ask(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskPrefix, query) + } + + return u.Ui.Ask(query) +} + +func (u *PrefixedUi) AskSecret(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query) + } + + return u.Ui.AskSecret(query) +} + +func (u *PrefixedUi) Error(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.ErrorPrefix, message) + } + + u.Ui.Error(message) +} + +func (u *PrefixedUi) Info(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.InfoPrefix, message) + } + + u.Ui.Info(message) +} + +func (u *PrefixedUi) Output(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.OutputPrefix, message) + } + + u.Ui.Output(message) +} + +func (u *PrefixedUi) Warn(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.WarnPrefix, message) + } + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_colored.go b/vendor/github.com/mitchellh/cli/ui_colored.go new file mode 100644 index 0000000..e3d5131 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_colored.go @@ -0,0 +1,69 @@ +package cli + +import ( + "fmt" +) + +// UiColor is a posix shell color code to use. +type UiColor struct { + Code int + Bold bool +} + +// A list of colors that are useful. These are all non-bolded by default. +var ( + UiColorNone UiColor = UiColor{-1, false} + UiColorRed = UiColor{31, false} + UiColorGreen = UiColor{32, false} + UiColorYellow = UiColor{33, false} + UiColorBlue = UiColor{34, false} + UiColorMagenta = UiColor{35, false} + UiColorCyan = UiColor{36, false} +) + +// ColoredUi is a Ui implementation that colors its output according +// to the given color schemes for the given type of output. +type ColoredUi struct { + OutputColor UiColor + InfoColor UiColor + ErrorColor UiColor + WarnColor UiColor + Ui Ui +} + +func (u *ColoredUi) Ask(query string) (string, error) { + return u.Ui.Ask(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) AskSecret(query string) (string, error) { + return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) Output(message string) { + u.Ui.Output(u.colorize(message, u.OutputColor)) +} + +func (u *ColoredUi) Info(message string) { + u.Ui.Info(u.colorize(message, u.InfoColor)) +} + +func (u *ColoredUi) Error(message string) { + u.Ui.Error(u.colorize(message, u.ErrorColor)) +} + +func (u *ColoredUi) Warn(message string) { + u.Ui.Warn(u.colorize(message, u.WarnColor)) +} + +func (u *ColoredUi) colorize(message string, color UiColor) string { + if color.Code == -1 { + return message + } + + attr := 0 + if color.Bold { + attr = 1 + } + + return fmt.Sprintf("\033[%d;%dm%s\033[0m", attr, color.Code, message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent.go b/vendor/github.com/mitchellh/cli/ui_concurrent.go new file mode 100644 index 0000000..b4f4dbf --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_concurrent.go @@ -0,0 +1,54 @@ +package cli + +import ( + "sync" +) + +// ConcurrentUi is a wrapper around a Ui interface (and implements that +// interface) making the underlying Ui concurrency safe. +type ConcurrentUi struct { + Ui Ui + l sync.Mutex +} + +func (u *ConcurrentUi) Ask(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.Ask(query) +} + +func (u *ConcurrentUi) AskSecret(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.AskSecret(query) +} + +func (u *ConcurrentUi) Error(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Error(message) +} + +func (u *ConcurrentUi) Info(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Info(message) +} + +func (u *ConcurrentUi) Output(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Output(message) +} + +func (u *ConcurrentUi) Warn(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go new file mode 100644 index 0000000..0bfe0a1 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_mock.go @@ -0,0 +1,111 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "sync" +) + +// NewMockUi returns a fully initialized MockUi instance +// which is safe for concurrent use. +func NewMockUi() *MockUi { + m := new(MockUi) + m.once.Do(m.init) + return m +} + +// MockUi is a mock UI that is used for tests and is exported publicly +// for use in external tests if needed as well. Do not instantite this +// directly since the buffers will be initialized on the first write. If +// there is no write then you will get a nil panic. Please use the +// NewMockUi() constructor function instead. You can fix your code with +// +// sed -i -e 's/new(cli.MockUi)/cli.NewMockUi()/g' *_test.go +type MockUi struct { + InputReader io.Reader + ErrorWriter *syncBuffer + OutputWriter *syncBuffer + + once sync.Once +} + +func (u *MockUi) Ask(query string) (string, error) { + u.once.Do(u.init) + + var result string + fmt.Fprint(u.OutputWriter, query) + if _, err := fmt.Fscanln(u.InputReader, &result); err != nil { + return "", err + } + + return result, nil +} + +func (u *MockUi) AskSecret(query string) (string, error) { + return u.Ask(query) +} + +func (u *MockUi) Error(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) Info(message string) { + u.Output(message) +} + +func (u *MockUi) Output(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.OutputWriter, message) + fmt.Fprint(u.OutputWriter, "\n") +} + +func (u *MockUi) Warn(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) init() { + u.ErrorWriter = new(syncBuffer) + u.OutputWriter = new(syncBuffer) +} + +type syncBuffer struct { + sync.RWMutex + b bytes.Buffer +} + +func (b *syncBuffer) Write(data []byte) (int, error) { + b.Lock() + defer b.Unlock() + return b.b.Write(data) +} + +func (b *syncBuffer) Read(data []byte) (int, error) { + b.RLock() + defer b.RUnlock() + return b.b.Read(data) +} + +func (b *syncBuffer) Reset() { + b.Lock() + b.b.Reset() + b.Unlock() +} + +func (b *syncBuffer) String() string { + return string(b.Bytes()) +} + +func (b *syncBuffer) Bytes() []byte { + b.RLock() + data := b.b.Bytes() + b.RUnlock() + return data +} diff --git a/vendor/github.com/mitchellh/cli/ui_writer.go b/vendor/github.com/mitchellh/cli/ui_writer.go new file mode 100644 index 0000000..1e1db3c --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_writer.go @@ -0,0 +1,18 @@ +package cli + +// UiWriter is an io.Writer implementation that can be used with +// loggers that writes every line of log output data to a Ui at the +// Info level. +type UiWriter struct { + Ui Ui +} + +func (w *UiWriter) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + p = p[:n-1] + } + + w.Ui.Info(string(p)) + return n, nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go index 0e725ea..1404352 100644 --- a/vendor/github.com/mitchellh/copystructure/copystructure.go +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -156,9 +156,13 @@ func (w *walker) Exit(l reflectwalk.Location) error { } switch l { + case reflectwalk.Array: + fallthrough case reflectwalk.Map: fallthrough case reflectwalk.Slice: + w.replacePointerMaybe() + // Pop map off our container w.cs = w.cs[:len(w.cs)-1] case reflectwalk.MapValue: @@ -171,16 +175,27 @@ func (w *walker) Exit(l reflectwalk.Location) error { // or in this case never adds it. We need to create a properly typed // zero value so that this key can be set. if !mv.IsValid() { - mv = reflect.Zero(m.Type().Elem()) + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } } - m.SetMapIndex(mk, mv) case reflectwalk.SliceElem: // Pop off the value and the index and set it on the slice v := w.valPop() i := w.valPop().Interface().(int) if v.IsValid() { s := w.cs[len(w.cs)-1] - se := s.Index(i) + se := s.Elem().Index(i) if se.CanSet() { se.Set(v) } @@ -220,9 +235,9 @@ func (w *walker) Map(m reflect.Value) error { // Create the map. If the map itself is nil, then just make a nil map var newMap reflect.Value if m.IsNil() { - newMap = reflect.Indirect(reflect.New(m.Type())) + newMap = reflect.New(m.Type()) } else { - newMap = reflect.MakeMap(m.Type()) + newMap = wrapPtr(reflect.MakeMap(m.Type())) } w.cs = append(w.cs, newMap) @@ -287,9 +302,9 @@ func (w *walker) Slice(s reflect.Value) error { var newS reflect.Value if s.IsNil() { - newS = reflect.Indirect(reflect.New(s.Type())) + newS = reflect.New(s.Type()) } else { - newS = reflect.MakeSlice(s.Type(), s.Len(), s.Cap()) + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) } w.cs = append(w.cs, newS) @@ -309,6 +324,31 @@ func (w *walker) SliceElem(i int, elem reflect.Value) error { return nil } +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + func (w *walker) Struct(s reflect.Value) error { if w.ignoring() { return nil @@ -326,7 +366,10 @@ func (w *walker) Struct(s reflect.Value) error { return err } - v = reflect.ValueOf(dup) + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) } else { // No copier, we copy ourselves and allow reflectwalk to guide // us deeper into the structure for copying. @@ -405,6 +448,23 @@ func (w *walker) replacePointerMaybe() { } v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + for i := 1; i < w.ps[w.depth]; i++ { if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { iface := reflect.New(iType).Elem() @@ -475,3 +535,14 @@ func (w *walker) lock(v reflect.Value) { locker.Lock() w.locks[w.depth] = locker } + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml new file mode 100644 index 0000000..4c83109 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.8 + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE new file mode 100644 index 0000000..a3866a2 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md new file mode 100644 index 0000000..26781bb --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/README.md @@ -0,0 +1,52 @@ +# go-testing-interface + +go-testing-interface is a Go library that exports an interface that +`*testing.T` implements as well as a runtime version you can use in its +place. + +The purpose of this library is so that you can export test helpers as a +public API without depending on the "testing" package, since you can't +create a `*testing.T` struct manually. This lets you, for example, use the +public testing APIs to generate mock data at runtime, rather than just at +test time. + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). + +Given a test helper written using `go-testing-interface` like this: + + import "github.com/mitchellh/go-testing-interface" + + func TestHelper(t testing.T) { + t.Fatal("I failed") + } + +You can call the test helper in a real test easily: + + import "testing" + + func TestThing(t *testing.T) { + TestHelper(t) + } + +You can also call the test helper at runtime if needed: + + import "github.com/mitchellh/go-testing-interface" + + func main() { + TestHelper(&testing.RuntimeT{}) + } + +## Why?! + +**Why would I call a test helper that takes a *testing.T at runtime?** + +You probably shouldn't. The only use case I've seen (and I've had) for this +is to implement a "dev mode" for a service where the test helpers are used +to populate mock data, create a mock DB, perhaps run service dependencies +in-memory, etc. + +Outside of a "dev mode", I've never seen a use case for this and I think +there shouldn't be one since the point of the `testing.T` interface is that +you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go new file mode 100644 index 0000000..204afb4 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -0,0 +1,84 @@ +// +build !go1.9 + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. Name and Skip methods are +// unimplemented noops. +type RuntimeT struct { + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.Fail() +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.FailNow() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Skip(args ...interface{}) {} +func (t *RuntimeT) SkipNow() {} +func (t *RuntimeT) Skipf(format string, args ...interface{}) {} +func (t *RuntimeT) Skipped() bool { return false } diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go new file mode 100644 index 0000000..07fbcb5 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -0,0 +1,80 @@ +// +build go1.9 + +// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition +// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC +// and is set for release shortly. We'll support this on master as the default +// as soon as 1.9 is released. + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Helper() + Log(args ...interface{}) + Logf(format string, args ...interface{}) +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. +type RuntimeT struct { + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.Fail() +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.FailNow() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Helper() {} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md new file mode 100644 index 0000000..2298515 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md new file mode 100644 index 0000000..60ae311 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go new file mode 100644 index 0000000..ac67205 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -0,0 +1,73 @@ +package wordwrap + +import ( + "bytes" + "unicode" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) > lim { + current = 0 + } else { + current += uint(spaceBuf.Len()) + spaceBuf.WriteTo(buf) + } + spaceBuf.Reset() + } else { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + + spaceBuf.WriteRune(char) + } else { + + wordBuf.WriteRune(char) + + if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go index 7c59d76..6a7f176 100644 --- a/vendor/github.com/mitchellh/reflectwalk/location.go +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -11,6 +11,8 @@ const ( MapValue Slice SliceElem + Array + ArrayElem Struct StructField WalkLoc diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go index d3cfe85..70760cf 100644 --- a/vendor/github.com/mitchellh/reflectwalk/location_string.go +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -1,15 +1,15 @@ -// generated by stringer -type=Location location.go; DO NOT EDIT +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. package reflectwalk import "fmt" -const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc" +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" -var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59} +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} func (i Location) String() string { - if i+1 >= Location(len(_Location_index)) { + if i >= Location(len(_Location_index)-1) { return fmt.Sprintf("Location(%d)", i) } return _Location_name[_Location_index[i]:_Location_index[i+1]] diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go index ec0a623..d7ab7b6 100644 --- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -39,6 +39,13 @@ type SliceWalker interface { SliceElem(int, reflect.Value) error } +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + // StructWalker is an interface that has methods that are called for // structs when a Walk is done. type StructWalker interface { @@ -65,6 +72,7 @@ type PointerWalker interface { // SkipEntry can be returned from walk functions to skip walking // the value of this field. This is only valid in the following functions: // +// - Struct: skips all fields from being walked // - StructField: skips walking the struct value // var SkipEntry = errors.New("skip this entry") @@ -179,6 +187,9 @@ func walk(v reflect.Value, w interface{}) (err error) { case reflect.Struct: err = walkStruct(v, w) return + case reflect.Array: + err = walkArray(v, w) + return default: panic("unsupported type: " + k.String()) } @@ -286,48 +297,99 @@ func walkSlice(v reflect.Value, w interface{}) (err error) { return nil } +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + func walkStruct(v reflect.Value, w interface{}) (err error) { ew, ewok := w.(EnterExitWalker) if ewok { ew.Enter(Struct) } + skip := false if sw, ok := w.(StructWalker); ok { - if err = sw.Struct(v); err != nil { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { return } } - vt := v.Type() - for i := 0; i < vt.NumField(); i++ { - sf := vt.Field(i) - f := v.FieldByIndex([]int{i}) + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) - if sw, ok := w.(StructWalker); ok { - err = sw.StructField(sf, f) + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) - // SkipEntry just pretends this field doesn't even exist - if err == SkipEntry { - continue + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) } + err = walk(f, w) if err != nil { return } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(StructField) - } - err = walk(f, w) - if err != nil { - return - } - - if ok { - ew.Exit(StructField) + if ok { + ew.Exit(StructField) + } } } diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore new file mode 100644 index 0000000..a1338d6 --- /dev/null +++ b/vendor/github.com/oklog/run/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml new file mode 100644 index 0000000..362bdd4 --- /dev/null +++ b/vendor/github.com/oklog/run/.travis.yml @@ -0,0 +1,12 @@ +language: go +sudo: false +go: + - 1.x + - tip +install: + - go get -v github.com/golang/lint/golint + - go build ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/oklog/run/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md new file mode 100644 index 0000000..a7228cd --- /dev/null +++ b/vendor/github.com/oklog/run/README.md @@ -0,0 +1,73 @@ +# run + +[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) +[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run) +[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) + +run.Group is a universal mechanism to manage goroutine lifecycles. + +Create a zero-value run.Group, and then add actors to it. Actors are defined as +a pair of functions: an **execute** function, which should run synchronously; +and an **interrupt** function, which, when invoked, should cause the execute +function to return. Finally, invoke Run, which blocks until the first actor +returns. This general-purpose API allows callers to model pretty much any +runnable task, and achieve well-defined lifecycle semantics for the group. + +run.Group was written to manage component lifecycles in func main for +[OK Log](https://github.com/oklog/oklog). +But it's useful in any circumstance where you need to orchestrate multiple +goroutines as a unit whole. +[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a +video of a talk where run.Group is described. + +## Examples + +### context.Context + +```go +ctx, cancel := context.WithCancel(context.Background()) +g.Add(func() error { + return myProcess(ctx, ...) +}, func(error) { + cancel() +}) +``` + +### net.Listener + +```go +ln, _ := net.Listen("tcp", ":8080") +g.Add(func() error { + return http.Serve(ln, nil) +}, func(error) { + ln.Close() +}) +``` + +### io.ReadCloser + +```go +var conn io.ReadCloser = ... +g.Add(func() error { + s := bufio.NewScanner(conn) + for s.Scan() { + println(s.Text()) + } + return s.Err() +}, func(error) { + conn.Close() +}) +``` + +## Comparisons + +Package run is somewhat similar to package +[errgroup](https://godoc.org/golang.org/x/sync/errgroup), +except it doesn't require actor goroutines to understand context semantics. + +It's somewhat similar to package +[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or +[tomb.v2](https://godoc.org/gopkg.in/tomb.v2), +except it has a much smaller API surface, delegating e.g. staged shutdown of +goroutines to the caller. diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go new file mode 100644 index 0000000..832d47d --- /dev/null +++ b/vendor/github.com/oklog/run/group.go @@ -0,0 +1,62 @@ +// Package run implements an actor-runner with deterministic teardown. It is +// somewhat similar to package errgroup, except it does not require actor +// goroutines to understand context semantics. This makes it suitable for use in +// more circumstances; for example, goroutines which are handling connections +// from net.Listeners, or scanning input from a closable io.Reader. +package run + +// Group collects actors (functions) and runs them concurrently. +// When one actor (function) returns, all actors are interrupted. +// The zero value of a Group is useful. +type Group struct { + actors []actor +} + +// Add an actor (function) to the group. Each actor must be pre-emptable by an +// interrupt function. That is, if interrupt is invoked, execute should return. +// Also, it must be safe to call interrupt even after execute has returned. +// +// The first actor (function) to return interrupts all running actors. +// The error is passed to the interrupt functions, and is returned by Run. +func (g *Group) Add(execute func() error, interrupt func(error)) { + g.actors = append(g.actors, actor{execute, interrupt}) +} + +// Run all actors (functions) concurrently. +// When the first actor returns, all others are interrupted. +// Run only returns when all actors have exited. +// Run returns the error returned by the first exiting actor. +func (g *Group) Run() error { + if len(g.actors) == 0 { + return nil + } + + // Run each actor. + errors := make(chan error, len(g.actors)) + for _, a := range g.actors { + go func(a actor) { + errors <- a.execute() + }(a) + } + + // Wait for the first actor to stop. + err := <-errors + + // Signal all actors to stop. + for _, a := range g.actors { + a.interrupt(err) + } + + // Wait for all actors to stop. + for i := 1; i < cap(errors); i++ { + <-errors + } + + // Return the original error. + return err +} + +type actor struct { + execute func() error + interrupt func(error) +} diff --git a/vendor/github.com/posener/complete/.gitignore b/vendor/github.com/posener/complete/.gitignore new file mode 100644 index 0000000..1363720 --- /dev/null +++ b/vendor/github.com/posener/complete/.gitignore @@ -0,0 +1,2 @@ +.idea +coverage.txt diff --git a/vendor/github.com/posener/complete/.travis.yml b/vendor/github.com/posener/complete/.travis.yml new file mode 100644 index 0000000..c2798f8 --- /dev/null +++ b/vendor/github.com/posener/complete/.travis.yml @@ -0,0 +1,17 @@ +language: go +sudo: false +go: + - 1.9 + - 1.8 + +before_install: + - go get -u -t ./... + - go get -u gopkg.in/alecthomas/gometalinter.v1 + - gometalinter.v1 --install + +script: + - gometalinter.v1 --config metalinter.json ./... + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/posener/complete/LICENSE.txt b/vendor/github.com/posener/complete/LICENSE.txt new file mode 100644 index 0000000..16249b4 --- /dev/null +++ b/vendor/github.com/posener/complete/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2017 Eyal Posener + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go new file mode 100644 index 0000000..1ba4d69 --- /dev/null +++ b/vendor/github.com/posener/complete/args.go @@ -0,0 +1,102 @@ +package complete + +import ( + "os" + "path/filepath" + "strings" + "unicode" +) + +// Args describes command line arguments +type Args struct { + // All lists of all arguments in command line (not including the command itself) + All []string + // Completed lists of all completed arguments in command line, + // If the last one is still being typed - no space after it, + // it won't appear in this list of arguments. + Completed []string + // Last argument in command line, the one being typed, if the last + // character in the command line is a space, this argument will be empty, + // otherwise this would be the last word. + Last string + // LastCompleted is the last argument that was fully typed. + // If the last character in the command line is space, this would be the + // last word, otherwise, it would be the word before that. + LastCompleted string +} + +// Directory gives the directory of the current written +// last argument if it represents a file name being written. +// in case that it is not, we fall back to the current directory. +func (a Args) Directory() string { + if info, err := os.Stat(a.Last); err == nil && info.IsDir() { + return fixPathForm(a.Last, a.Last) + } + dir := filepath.Dir(a.Last) + if info, err := os.Stat(dir); err != nil || !info.IsDir() { + return "./" + } + return fixPathForm(a.Last, dir) +} + +func newArgs(line string) Args { + var ( + all []string + completed []string + ) + parts := splitFields(line) + if len(parts) > 0 { + all = parts[1:] + completed = removeLast(parts[1:]) + } + return Args{ + All: all, + Completed: completed, + Last: last(parts), + LastCompleted: last(completed), + } +} + +func splitFields(line string) []string { + parts := strings.Fields(line) + if len(line) > 0 && unicode.IsSpace(rune(line[len(line)-1])) { + parts = append(parts, "") + } + parts = splitLastEqual(parts) + return parts +} + +func splitLastEqual(line []string) []string { + if len(line) == 0 { + return line + } + parts := strings.Split(line[len(line)-1], "=") + return append(line[:len(line)-1], parts...) +} + +func (a Args) from(i int) Args { + if i > len(a.All) { + i = len(a.All) + } + a.All = a.All[i:] + + if i > len(a.Completed) { + i = len(a.Completed) + } + a.Completed = a.Completed[i:] + return a +} + +func removeLast(a []string) []string { + if len(a) > 0 { + return a[:len(a)-1] + } + return a +} + +func last(args []string) string { + if len(args) == 0 { + return "" + } + return args[len(args)-1] +} diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go new file mode 100644 index 0000000..7137dee --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/cmd.go @@ -0,0 +1,128 @@ +// Package cmd used for command line options for the complete tool +package cmd + +import ( + "errors" + "flag" + "fmt" + "os" + "strings" + + "github.com/posener/complete/cmd/install" +) + +// CLI for command line +type CLI struct { + Name string + InstallName string + UninstallName string + + install bool + uninstall bool + yes bool +} + +const ( + defaultInstallName = "install" + defaultUninstallName = "uninstall" +) + +// Run is used when running complete in command line mode. +// this is used when the complete is not completing words, but to +// install it or uninstall it. +func (f *CLI) Run() bool { + err := f.validate() + if err != nil { + os.Stderr.WriteString(err.Error() + "\n") + os.Exit(1) + } + + switch { + case f.install: + f.prompt() + err = install.Install(f.Name) + case f.uninstall: + f.prompt() + err = install.Uninstall(f.Name) + default: + // non of the action flags matched, + // returning false should make the real program execute + return false + } + + if err != nil { + fmt.Printf("%s failed! %s\n", f.action(), err) + os.Exit(3) + } + fmt.Println("Done!") + return true +} + +// prompt use for approval +// exit if approval was not given +func (f *CLI) prompt() { + defer fmt.Println(f.action() + "ing...") + if f.yes { + return + } + fmt.Printf("%s completion for %s? ", f.action(), f.Name) + var answer string + fmt.Scanln(&answer) + + switch strings.ToLower(answer) { + case "y", "yes": + return + default: + fmt.Println("Cancelling...") + os.Exit(1) + } +} + +// AddFlags adds the CLI flags to the flag set. +// If flags is nil, the default command line flags will be taken. +// Pass non-empty strings as installName and uninstallName to override the default +// flag names. +func (f *CLI) AddFlags(flags *flag.FlagSet) { + if flags == nil { + flags = flag.CommandLine + } + + if f.InstallName == "" { + f.InstallName = defaultInstallName + } + if f.UninstallName == "" { + f.UninstallName = defaultUninstallName + } + + if flags.Lookup(f.InstallName) == nil { + flags.BoolVar(&f.install, f.InstallName, false, + fmt.Sprintf("Install completion for %s command", f.Name)) + } + if flags.Lookup(f.UninstallName) == nil { + flags.BoolVar(&f.uninstall, f.UninstallName, false, + fmt.Sprintf("Uninstall completion for %s command", f.Name)) + } + if flags.Lookup("y") == nil { + flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes'") + } +} + +// validate the CLI +func (f *CLI) validate() error { + if f.install && f.uninstall { + return errors.New("Install and uninstall are mutually exclusive") + } + return nil +} + +// action name according to the CLI values. +func (f *CLI) action() string { + switch { + case f.install: + return "Install" + case f.uninstall: + return "Uninstall" + default: + return "unknown" + } +} diff --git a/vendor/github.com/posener/complete/cmd/install/bash.go b/vendor/github.com/posener/complete/cmd/install/bash.go new file mode 100644 index 0000000..a287f99 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/bash.go @@ -0,0 +1,32 @@ +package install + +import "fmt" + +// (un)install in bash +// basically adds/remove from .bashrc: +// +// complete -C +type bash struct { + rc string +} + +func (b bash) Install(cmd, bin string) error { + completeCmd := b.cmd(cmd, bin) + if lineInFile(b.rc, completeCmd) { + return fmt.Errorf("already installed in %s", b.rc) + } + return appendToFile(b.rc, completeCmd) +} + +func (b bash) Uninstall(cmd, bin string) error { + completeCmd := b.cmd(cmd, bin) + if !lineInFile(b.rc, completeCmd) { + return fmt.Errorf("does not installed in %s", b.rc) + } + + return removeFromFile(b.rc, completeCmd) +} + +func (bash) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go new file mode 100644 index 0000000..082a226 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/install.go @@ -0,0 +1,92 @@ +package install + +import ( + "errors" + "os" + "os/user" + "path/filepath" + + "github.com/hashicorp/go-multierror" +) + +type installer interface { + Install(cmd, bin string) error + Uninstall(cmd, bin string) error +} + +// Install complete command given: +// cmd: is the command name +func Install(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to install") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Install(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +// Uninstall complete command given: +// cmd: is the command name +func Uninstall(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to uninstall") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Uninstall(cmd, bin) + if errI != nil { + multierror.Append(err, errI) + } + } + + return err +} + +func installers() (i []installer) { + for _, rc := range [...]string{".bashrc", ".bash_profile", ".bash_login", ".profile"} { + if f := rcFile(rc); f != "" { + i = append(i, bash{f}) + break + } + } + if f := rcFile(".zshrc"); f != "" { + i = append(i, zsh{f}) + } + return +} + +func getBinaryPath() (string, error) { + bin, err := os.Executable() + if err != nil { + return "", err + } + return filepath.Abs(bin) +} + +func rcFile(name string) string { + u, err := user.Current() + if err != nil { + return "" + } + path := filepath.Join(u.HomeDir, name) + if _, err := os.Stat(path); err != nil { + return "" + } + return path +} diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go new file mode 100644 index 0000000..2c8b44c --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/utils.go @@ -0,0 +1,118 @@ +package install + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" +) + +func lineInFile(name string, lookFor string) bool { + f, err := os.Open(name) + if err != nil { + return false + } + defer f.Close() + r := bufio.NewReader(f) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + return false + } + if err != nil { + return false + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + if string(line) == lookFor { + return true + } + prefix = prefix[:0] + } +} + +func appendToFile(name string, content string) error { + f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fmt.Sprintf("\n%s\n", content)) + return err +} + +func removeFromFile(name string, content string) error { + backup := name + ".bck" + err := copyFile(name, backup) + if err != nil { + return err + } + temp, err := removeContentToTempFile(name, content) + if err != nil { + return err + } + + err = copyFile(temp, name) + if err != nil { + return err + } + + return os.Remove(backup) +} + +func removeContentToTempFile(name, content string) (string, error) { + rf, err := os.Open(name) + if err != nil { + return "", err + } + defer rf.Close() + wf, err := ioutil.TempFile("/tmp", "complete-") + if err != nil { + return "", err + } + defer wf.Close() + + r := bufio.NewReader(rf) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + break + } + if err != nil { + return "", err + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + str := string(line) + if str == content { + continue + } + wf.WriteString(str + "\n") + prefix = prefix[:0] + } + return wf.Name(), nil +} + +func copyFile(src string, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + _, err = io.Copy(out, in) + return err +} diff --git a/vendor/github.com/posener/complete/cmd/install/zsh.go b/vendor/github.com/posener/complete/cmd/install/zsh.go new file mode 100644 index 0000000..a625f53 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/zsh.go @@ -0,0 +1,39 @@ +package install + +import "fmt" + +// (un)install in zsh +// basically adds/remove from .zshrc: +// +// autoload -U +X bashcompinit && bashcompinit" +// complete -C +type zsh struct { + rc string +} + +func (z zsh) Install(cmd, bin string) error { + completeCmd := z.cmd(cmd, bin) + if lineInFile(z.rc, completeCmd) { + return fmt.Errorf("already installed in %s", z.rc) + } + + bashCompInit := "autoload -U +X bashcompinit && bashcompinit" + if !lineInFile(z.rc, bashCompInit) { + completeCmd = bashCompInit + "\n" + completeCmd + } + + return appendToFile(z.rc, completeCmd) +} + +func (z zsh) Uninstall(cmd, bin string) error { + completeCmd := z.cmd(cmd, bin) + if !lineInFile(z.rc, completeCmd) { + return fmt.Errorf("does not installed in %s", z.rc) + } + + return removeFromFile(z.rc, completeCmd) +} + +func (zsh) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -o nospace -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/command.go b/vendor/github.com/posener/complete/command.go new file mode 100644 index 0000000..82d37d5 --- /dev/null +++ b/vendor/github.com/posener/complete/command.go @@ -0,0 +1,111 @@ +package complete + +// Command represents a command line +// It holds the data that enables auto completion of command line +// Command can also be a sub command. +type Command struct { + // Sub is map of sub commands of the current command + // The key refer to the sub command name, and the value is it's + // Command descriptive struct. + Sub Commands + + // Flags is a map of flags that the command accepts. + // The key is the flag name, and the value is it's predictions. + Flags Flags + + // GlobalFlags is a map of flags that the command accepts. + // Global flags that can appear also after a sub command. + GlobalFlags Flags + + // Args are extra arguments that the command accepts, those who are + // given without any flag before. + Args Predictor +} + +// Predict returns all possible predictions for args according to the command struct +func (c *Command) Predict(a Args) []string { + options, _ := c.predict(a) + return options +} + +// Commands is the type of Sub member, it maps a command name to a command struct +type Commands map[string]Command + +// Predict completion of sub command names names according to command line arguments +func (c Commands) Predict(a Args) (prediction []string) { + for sub := range c { + prediction = append(prediction, sub) + } + return +} + +// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions. +type Flags map[string]Predictor + +// Predict completion of flags names according to command line arguments +func (f Flags) Predict(a Args) (prediction []string) { + for flag := range f { + // If the flag starts with a hyphen, we avoid emitting the prediction + // unless the last typed arg contains a hyphen as well. + flagHyphenStart := len(flag) != 0 && flag[0] == '-' + lastHyphenStart := len(a.Last) != 0 && a.Last[0] == '-' + if flagHyphenStart && !lastHyphenStart { + continue + } + prediction = append(prediction, flag) + } + return +} + +// predict options +// only is set to true if no more options are allowed to be returned +// those are in cases of special flag that has specific completion arguments, +// and other flags or sub commands can't come after it. +func (c *Command) predict(a Args) (options []string, only bool) { + + // search sub commands for predictions first + subCommandFound := false + for i, arg := range a.Completed { + if cmd, ok := c.Sub[arg]; ok { + subCommandFound = true + + // recursive call for sub command + options, only = cmd.predict(a.from(i)) + if only { + return + } + + // We matched so stop searching. Continuing to search can accidentally + // match a subcommand with current set of commands, see issue #46. + break + } + } + + // if last completed word is a global flag that we need to complete + if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to global flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.GlobalFlags.Predict(a)...) + + // if a sub command was entered, we won't add the parent command + // completions and we return here. + if subCommandFound { + return + } + + // if last completed word is a command flag that we need to complete + if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.Sub.Predict(a)...) + options = append(options, c.Flags.Predict(a)...) + if c.Args != nil { + options = append(options, c.Args.Predict(a)...) + } + + return +} diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go new file mode 100644 index 0000000..185d1e8 --- /dev/null +++ b/vendor/github.com/posener/complete/complete.go @@ -0,0 +1,95 @@ +// Package complete provides a tool for bash writing bash completion in go. +// +// Writing bash completion scripts is a hard work. This package provides an easy way +// to create bash completion scripts for any command, and also an easy way to install/uninstall +// the completion of the command. +package complete + +import ( + "flag" + "fmt" + "io" + "os" + + "github.com/posener/complete/cmd" + "github.com/posener/complete/match" +) + +const ( + envComplete = "COMP_LINE" + envDebug = "COMP_DEBUG" +) + +// Complete structs define completion for a command with CLI options +type Complete struct { + Command Command + cmd.CLI + Out io.Writer +} + +// New creates a new complete command. +// name is the name of command we want to auto complete. +// IMPORTANT: it must be the same name - if the auto complete +// completes the 'go' command, name must be equal to "go". +// command is the struct of the command completion. +func New(name string, command Command) *Complete { + return &Complete{ + Command: command, + CLI: cmd.CLI{Name: name}, + Out: os.Stdout, + } +} + +// Run runs the completion and add installation flags beforehand. +// The flags are added to the main flag CommandLine variable. +func (c *Complete) Run() bool { + c.AddFlags(nil) + flag.Parse() + return c.Complete() +} + +// Complete a command from completion line in environment variable, +// and print out the complete options. +// returns success if the completion ran or if the cli matched +// any of the given flags, false otherwise +// For installation: it assumes that flags were added and parsed before +// it was called. +func (c *Complete) Complete() bool { + line, ok := getLine() + if !ok { + // make sure flags parsed, + // in case they were not added in the main program + return c.CLI.Run() + } + Log("Completing line: %s", line) + a := newArgs(line) + Log("Completing last field: %s", a.Last) + options := c.Command.Predict(a) + Log("Options: %s", options) + + // filter only options that match the last argument + matches := []string{} + for _, option := range options { + if match.Prefix(option, a.Last) { + matches = append(matches, option) + } + } + Log("Matches: %s", matches) + c.output(matches) + return true +} + +func getLine() (string, bool) { + line := os.Getenv(envComplete) + if line == "" { + return "", false + } + return line, true +} + +func (c *Complete) output(options []string) { + // stdout of program defines the complete options + for _, option := range options { + fmt.Fprintln(c.Out, option) + } +} diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go new file mode 100644 index 0000000..797a80c --- /dev/null +++ b/vendor/github.com/posener/complete/log.go @@ -0,0 +1,23 @@ +package complete + +import ( + "io" + "io/ioutil" + "log" + "os" +) + +// Log is used for debugging purposes +// since complete is running on tab completion, it is nice to +// have logs to the stderr (when writing your own completer) +// to write logs, set the COMP_DEBUG environment variable and +// use complete.Log in the complete program +var Log = getLogger() + +func getLogger() func(format string, args ...interface{}) { + var logfile io.Writer = ioutil.Discard + if os.Getenv(envDebug) != "" { + logfile = os.Stderr + } + return log.New(logfile, "complete ", log.Flags()).Printf +} diff --git a/vendor/github.com/posener/complete/match/file.go b/vendor/github.com/posener/complete/match/file.go new file mode 100644 index 0000000..051171e --- /dev/null +++ b/vendor/github.com/posener/complete/match/file.go @@ -0,0 +1,19 @@ +package match + +import "strings" + +// File returns true if prefix can match the file +func File(file, prefix string) bool { + // special case for current directory completion + if file == "./" && (prefix == "." || prefix == "") { + return true + } + if prefix == "." && strings.HasPrefix(file, ".") { + return true + } + + file = strings.TrimPrefix(file, "./") + prefix = strings.TrimPrefix(prefix, "./") + + return strings.HasPrefix(file, prefix) +} diff --git a/vendor/github.com/posener/complete/match/match.go b/vendor/github.com/posener/complete/match/match.go new file mode 100644 index 0000000..812fcac --- /dev/null +++ b/vendor/github.com/posener/complete/match/match.go @@ -0,0 +1,6 @@ +package match + +// Match matches two strings +// it is used for comparing a term to the last typed +// word, the prefix, and see if it is a possible auto complete option. +type Match func(term, prefix string) bool diff --git a/vendor/github.com/posener/complete/match/prefix.go b/vendor/github.com/posener/complete/match/prefix.go new file mode 100644 index 0000000..9a01ba6 --- /dev/null +++ b/vendor/github.com/posener/complete/match/prefix.go @@ -0,0 +1,9 @@ +package match + +import "strings" + +// Prefix is a simple Matcher, if the word is it's prefix, there is a match +// Match returns true if a has the prefix as prefix +func Prefix(long, prefix string) bool { + return strings.HasPrefix(long, prefix) +} diff --git a/vendor/github.com/posener/complete/metalinter.json b/vendor/github.com/posener/complete/metalinter.json new file mode 100644 index 0000000..799c1d0 --- /dev/null +++ b/vendor/github.com/posener/complete/metalinter.json @@ -0,0 +1,21 @@ +{ + "Vendor": true, + "DisableAll": true, + "Enable": [ + "gofmt", + "goimports", + "interfacer", + "goconst", + "misspell", + "unconvert", + "gosimple", + "golint", + "structcheck", + "deadcode", + "vet" + ], + "Exclude": [ + "initTests is unused" + ], + "Deadline": "2m" +} diff --git a/vendor/github.com/posener/complete/predict.go b/vendor/github.com/posener/complete/predict.go new file mode 100644 index 0000000..8207063 --- /dev/null +++ b/vendor/github.com/posener/complete/predict.go @@ -0,0 +1,41 @@ +package complete + +// Predictor implements a predict method, in which given +// command line arguments returns a list of options it predicts. +type Predictor interface { + Predict(Args) []string +} + +// PredictOr unions two predicate functions, so that the result predicate +// returns the union of their predication +func PredictOr(predictors ...Predictor) Predictor { + return PredictFunc(func(a Args) (prediction []string) { + for _, p := range predictors { + if p == nil { + continue + } + prediction = append(prediction, p.Predict(a)...) + } + return + }) +} + +// PredictFunc determines what terms can follow a command or a flag +// It is used for auto completion, given last - the last word in the already +// in the command line, what words can complete it. +type PredictFunc func(Args) []string + +// Predict invokes the predict function and implements the Predictor interface +func (p PredictFunc) Predict(a Args) []string { + if p == nil { + return nil + } + return p(a) +} + +// PredictNothing does not expect anything after. +var PredictNothing Predictor + +// PredictAnything expects something, but nothing particular, such as a number +// or arbitrary name. +var PredictAnything = PredictFunc(func(Args) []string { return nil }) diff --git a/vendor/github.com/posener/complete/predict_files.go b/vendor/github.com/posener/complete/predict_files.go new file mode 100644 index 0000000..c8adf7e --- /dev/null +++ b/vendor/github.com/posener/complete/predict_files.go @@ -0,0 +1,108 @@ +package complete + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/posener/complete/match" +) + +// PredictDirs will search for directories in the given started to be typed +// path, if no path was started to be typed, it will complete to directories +// in the current working directory. +func PredictDirs(pattern string) Predictor { + return files(pattern, false) +} + +// PredictFiles will search for files matching the given pattern in the started to +// be typed path, if no path was started to be typed, it will complete to files that +// match the pattern in the current working directory. +// To match any file, use "*" as pattern. To match go files use "*.go", and so on. +func PredictFiles(pattern string) Predictor { + return files(pattern, true) +} + +func files(pattern string, allowFiles bool) PredictFunc { + + // search for files according to arguments, + // if only one directory has matched the result, search recursively into + // this directory to give more results. + return func(a Args) (prediction []string) { + prediction = predictFiles(a, pattern, allowFiles) + + // if the number of prediction is not 1, we either have many results or + // have no results, so we return it. + if len(prediction) != 1 { + return + } + + // only try deeper, if the one item is a directory + if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() { + return + } + + a.Last = prediction[0] + return predictFiles(a, pattern, allowFiles) + } +} + +func predictFiles(a Args, pattern string, allowFiles bool) []string { + if strings.HasSuffix(a.Last, "/..") { + return nil + } + + dir := a.Directory() + files := listFiles(dir, pattern, allowFiles) + + // add dir if match + files = append(files, dir) + + return PredictFilesSet(files).Predict(a) +} + +// PredictFilesSet predict according to file rules to a given set of file names +func PredictFilesSet(files []string) PredictFunc { + return func(a Args) (prediction []string) { + // add all matching files to prediction + for _, f := range files { + f = fixPathForm(a.Last, f) + + // test matching of file to the argument + if match.File(f, a.Last) { + prediction = append(prediction, f) + } + } + return + } +} + +func listFiles(dir, pattern string, allowFiles bool) []string { + // set of all file names + m := map[string]bool{} + + // list files + if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil { + for _, f := range files { + if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles { + m[f] = true + } + } + } + + // list directories + if dirs, err := ioutil.ReadDir(dir); err == nil { + for _, d := range dirs { + if d.IsDir() { + m[filepath.Join(dir, d.Name())] = true + } + } + } + + list := make([]string, 0, len(m)) + for k := range m { + list = append(list, k) + } + return list +} diff --git a/vendor/github.com/posener/complete/predict_set.go b/vendor/github.com/posener/complete/predict_set.go new file mode 100644 index 0000000..fa4a34a --- /dev/null +++ b/vendor/github.com/posener/complete/predict_set.go @@ -0,0 +1,12 @@ +package complete + +// PredictSet expects specific set of terms, given in the options argument. +func PredictSet(options ...string) Predictor { + return predictSet(options) +} + +type predictSet []string + +func (p predictSet) Predict(a Args) []string { + return p +} diff --git a/vendor/github.com/posener/complete/readme.md b/vendor/github.com/posener/complete/readme.md new file mode 100644 index 0000000..74077e3 --- /dev/null +++ b/vendor/github.com/posener/complete/readme.md @@ -0,0 +1,116 @@ +# complete + +[![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete) +[![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete) +[![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete) +[![Go Report Card](https://goreportcard.com/badge/github.com/posener/complete)](https://goreportcard.com/report/github.com/posener/complete) + +A tool for bash writing bash completion in go. + +Writing bash completion scripts is a hard work. This package provides an easy way +to create bash completion scripts for any command, and also an easy way to install/uninstall +the completion of the command. + +## go command bash completion + +In [gocomplete](./gocomplete) there is an example for bash completion for the `go` command line. + +This is an example that uses the `complete` package on the `go` command - the `complete` package +can also be used to implement any completions, see [Usage](#usage). + +### Install + +1. Type in your shell: +``` +go get -u github.com/posener/complete/gocomplete +gocomplete -install +``` + +2. Restart your shell + +Uninstall by `gocomplete -uninstall` + +### Features + +- Complete `go` command, including sub commands and all flags. +- Complete packages names or `.go` files when necessary. +- Complete test names after `-run` flag. + +## complete package + +Supported shells: + +- [x] bash +- [x] zsh + +### Usage + +Assuming you have program called `run` and you want to have bash completion +for it, meaning, if you type `run` then space, then press the `Tab` key, +the shell will suggest relevant complete options. + +In that case, we will create a program called `runcomplete`, a go program, +with a `func main()` and so, that will make the completion of the `run` +program. Once the `runcomplete` will be in a binary form, we could +`runcomplete -install` and that will add to our shell all the bash completion +options for `run`. + +So here it is: + +```go +import "github.com/posener/complete" + +func main() { + + // create a Command object, that represents the command we want + // to complete. + run := complete.Command{ + + // Sub defines a list of sub commands of the program, + // this is recursive, since every command is of type command also. + Sub: complete.Commands{ + + // add a build sub command + "build": complete.Command { + + // define flags of the build sub command + Flags: complete.Flags{ + // build sub command has a flag '-cpus', which + // expects number of cpus after it. in that case + // anything could complete this flag. + "-cpus": complete.PredictAnything, + }, + }, + }, + + // define flags of the 'run' main command + Flags: complete.Flags{ + // a flag -o, which expects a file ending with .out after + // it, the tab completion will auto complete for files matching + // the given pattern. + "-o": complete.PredictFiles("*.out"), + }, + + // define global flags of the 'run' main command + // those will show up also when a sub command was entered in the + // command line + GlobalFlags: complete.Flags{ + + // a flag '-h' which does not expects anything after it + "-h": complete.PredictNothing, + }, + } + + // run the command completion, as part of the main() function. + // this triggers the autocompletion when needed. + // name must be exactly as the binary that we want to complete. + complete.New("run", run).Run() +} +``` + +### Self completing program + +In case that the program that we want to complete is written in go we +can make it self completing. + +Here is an [example](./example/self/main.go) diff --git a/vendor/github.com/posener/complete/test.sh b/vendor/github.com/posener/complete/test.sh new file mode 100644 index 0000000..56bfcf1 --- /dev/null +++ b/vendor/github.com/posener/complete/test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -v -race -coverprofile=profile.out -covermode=atomic $d + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done \ No newline at end of file diff --git a/vendor/github.com/posener/complete/utils.go b/vendor/github.com/posener/complete/utils.go new file mode 100644 index 0000000..58b8b79 --- /dev/null +++ b/vendor/github.com/posener/complete/utils.go @@ -0,0 +1,46 @@ +package complete + +import ( + "os" + "path/filepath" + "strings" +) + +// fixPathForm changes a file name to a relative name +func fixPathForm(last string, file string) string { + // get wording directory for relative name + workDir, err := os.Getwd() + if err != nil { + return file + } + + abs, err := filepath.Abs(file) + if err != nil { + return file + } + + // if last is absolute, return path as absolute + if filepath.IsAbs(last) { + return fixDirPath(abs) + } + + rel, err := filepath.Rel(workDir, abs) + if err != nil { + return file + } + + // fix ./ prefix of path + if rel != "." && strings.HasPrefix(last, ".") { + rel = "./" + rel + } + + return fixDirPath(rel) +} + +func fixDirPath(path string) string { + info, err := os.Stat(path) + if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") { + path += "/" + } + return path +} diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml deleted file mode 100644 index bf90ad5..0000000 --- a/vendor/github.com/satori/go.uuid/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -sudo: false -go: - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip -matrix: - allow_failures: - - go: tip - fast_finish: true -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci -notifications: - email: false diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE deleted file mode 100644 index 488357b..0000000 --- a/vendor/github.com/satori/go.uuid/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013-2016 by Maxim Bublis - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md deleted file mode 100644 index b6aad1c..0000000 --- a/vendor/github.com/satori/go.uuid/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# UUID package for Go language - -[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) -[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) -[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) - -This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. - -With 100% test coverage and benchmarks out of box. - -Supported versions: -* Version 1, based on timestamp and MAC address (RFC 4122) -* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) -* Version 3, based on MD5 hashing (RFC 4122) -* Version 4, based on random numbers (RFC 4122) -* Version 5, based on SHA-1 hashing (RFC 4122) - -## Installation - -Use the `go` command: - - $ go get github.com/satori/go.uuid - -## Requirements - -UUID package requires Go >= 1.2. - -## Example - -```go -package main - -import ( - "fmt" - "github.com/satori/go.uuid" -) - -func main() { - // Creating UUID Version 4 - u1 := uuid.NewV4() - fmt.Printf("UUIDv4: %s\n", u1) - - // Parsing UUID from string input - u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - if err != nil { - fmt.Printf("Something gone wrong: %s", err) - } - fmt.Printf("Successfully parsed: %s", u2) -} -``` - -## Documentation - -[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. - -## Links -* [RFC 4122](http://tools.ietf.org/html/rfc4122) -* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) - -## Copyright - -Copyright (C) 2013-2016 by Maxim Bublis . - -UUID package released under MIT License. -See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go deleted file mode 100644 index 295f3fc..0000000 --- a/vendor/github.com/satori/go.uuid/uuid.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright (C) 2013-2015 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package uuid provides implementation of Universally Unique Identifier (UUID). -// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and -// version 2 (as specified in DCE 1.1). -package uuid - -import ( - "bytes" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "database/sql/driver" - "encoding/binary" - "encoding/hex" - "fmt" - "hash" - "net" - "os" - "sync" - "time" -) - -// UUID layout variants. -const ( - VariantNCS = iota - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// UUID DCE domains. -const ( - DomainPerson = iota - DomainGroup - DomainOrg -) - -// Difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). -const epochStart = 122192928000000000 - -// Used in string method conversion -const dash byte = '-' - -// UUID v1/v2 storage. -var ( - storageMutex sync.Mutex - storageOnce sync.Once - epochFunc = unixTimeFunc - clockSequence uint16 - lastTime uint64 - hardwareAddr [6]byte - posixUID = uint32(os.Getuid()) - posixGID = uint32(os.Getgid()) -) - -// String parse helpers. -var ( - urnPrefix = []byte("urn:uuid:") - byteGroups = []int{8, 4, 4, 4, 12} -) - -func initClockSequence() { - buf := make([]byte, 2) - safeRandom(buf) - clockSequence = binary.BigEndian.Uint16(buf) -} - -func initHardwareAddr() { - interfaces, err := net.Interfaces() - if err == nil { - for _, iface := range interfaces { - if len(iface.HardwareAddr) >= 6 { - copy(hardwareAddr[:], iface.HardwareAddr) - return - } - } - } - - // Initialize hardwareAddr randomly in case - // of real network interfaces absence - safeRandom(hardwareAddr[:]) - - // Set multicast bit as recommended in RFC 4122 - hardwareAddr[0] |= 0x01 -} - -func initStorage() { - initClockSequence() - initHardwareAddr() -} - -func safeRandom(dest []byte) { - if _, err := rand.Read(dest); err != nil { - panic(err) - } -} - -// Returns difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and current time. -// This is default epoch calculation function. -func unixTimeFunc() uint64 { - return epochStart + uint64(time.Now().UnixNano()/100) -} - -// UUID representation compliant with specification -// described in RFC 4122. -type UUID [16]byte - -// NullUUID can be used with the standard sql package to represent a -// UUID value that can be NULL in the database -type NullUUID struct { - UUID UUID - Valid bool -} - -// The nil UUID is special form of UUID that is specified to have all -// 128 bits set to zero. -var Nil = UUID{} - -// Predefined namespace UUIDs. -var ( - NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8") -) - -// And returns result of binary AND of two UUIDs. -func And(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] & u2[i] - } - return u -} - -// Or returns result of binary OR of two UUIDs. -func Or(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] | u2[i] - } - return u -} - -// Equal returns true if u1 and u2 equals, otherwise returns false. -func Equal(u1 UUID, u2 UUID) bool { - return bytes.Equal(u1[:], u2[:]) -} - -// Version returns algorithm version used to generate UUID. -func (u UUID) Version() uint { - return uint(u[6] >> 4) -} - -// Variant returns UUID layout variant. -func (u UUID) Variant() uint { - switch { - case (u[8] & 0x80) == 0x00: - return VariantNCS - case (u[8]&0xc0)|0x80 == 0x80: - return VariantRFC4122 - case (u[8]&0xe0)|0xc0 == 0xc0: - return VariantMicrosoft - } - return VariantFuture -} - -// Bytes returns bytes slice representation of UUID. -func (u UUID) Bytes() []byte { - return u[:] -} - -// Returns canonical string representation of UUID: -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = dash - hex.Encode(buf[9:13], u[4:6]) - buf[13] = dash - hex.Encode(buf[14:18], u[6:8]) - buf[18] = dash - hex.Encode(buf[19:23], u[8:10]) - buf[23] = dash - hex.Encode(buf[24:], u[10:]) - - return string(buf) -} - -// SetVersion sets version bits. -func (u *UUID) SetVersion(v byte) { - u[6] = (u[6] & 0x0f) | (v << 4) -} - -// SetVariant sets variant bits as described in RFC 4122. -func (u *UUID) SetVariant() { - u[8] = (u[8] & 0xbf) | 0x80 -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The encoding is the same as returned by String. -func (u UUID) MarshalText() (text []byte, err error) { - text = []byte(u.String()) - return -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Following formats are supported: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -func (u *UUID) UnmarshalText(text []byte) (err error) { - if len(text) < 32 { - err = fmt.Errorf("uuid: UUID string too short: %s", text) - return - } - - t := text[:] - braced := false - - if bytes.Equal(t[:9], urnPrefix) { - t = t[9:] - } else if t[0] == '{' { - braced = true - t = t[1:] - } - - b := u[:] - - for i, byteGroup := range byteGroups { - if i > 0 { - if t[0] != '-' { - err = fmt.Errorf("uuid: invalid string format") - return - } - t = t[1:] - } - - if len(t) < byteGroup { - err = fmt.Errorf("uuid: UUID string too short: %s", text) - return - } - - if i == 4 && len(t) > byteGroup && - ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) { - err = fmt.Errorf("uuid: UUID string too long: %s", text) - return - } - - _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup]) - if err != nil { - return - } - - t = t[byteGroup:] - b = b[byteGroup/2:] - } - - return -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (u UUID) MarshalBinary() (data []byte, err error) { - data = u.Bytes() - return -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It will return error if the slice isn't 16 bytes long. -func (u *UUID) UnmarshalBinary(data []byte) (err error) { - if len(data) != 16 { - err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) - return - } - copy(u[:], data) - - return -} - -// Value implements the driver.Valuer interface. -func (u UUID) Value() (driver.Value, error) { - return u.String(), nil -} - -// Scan implements the sql.Scanner interface. -// A 16-byte slice is handled by UnmarshalBinary, while -// a longer byte slice or a string is handled by UnmarshalText. -func (u *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - if len(src) == 16 { - return u.UnmarshalBinary(src) - } - return u.UnmarshalText(src) - - case string: - return u.UnmarshalText([]byte(src)) - } - - return fmt.Errorf("uuid: cannot convert %T to UUID", src) -} - -// Value implements the driver.Valuer interface. -func (u NullUUID) Value() (driver.Value, error) { - if !u.Valid { - return nil, nil - } - // Delegate to UUID Value function - return u.UUID.Value() -} - -// Scan implements the sql.Scanner interface. -func (u *NullUUID) Scan(src interface{}) error { - if src == nil { - u.UUID, u.Valid = Nil, false - return nil - } - - // Delegate to UUID Scan function - u.Valid = true - return u.UUID.Scan(src) -} - -// FromBytes returns UUID converted from raw byte slice input. -// It will return error if the slice isn't 16 bytes long. -func FromBytes(input []byte) (u UUID, err error) { - err = u.UnmarshalBinary(input) - return -} - -// FromBytesOrNil returns UUID converted from raw byte slice input. -// Same behavior as FromBytes, but returns a Nil UUID on error. -func FromBytesOrNil(input []byte) UUID { - uuid, err := FromBytes(input) - if err != nil { - return Nil - } - return uuid -} - -// FromString returns UUID parsed from string input. -// Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (u UUID, err error) { - err = u.UnmarshalText([]byte(input)) - return -} - -// FromStringOrNil returns UUID parsed from string input. -// Same behavior as FromString, but returns a Nil UUID on error. -func FromStringOrNil(input string) UUID { - uuid, err := FromString(input) - if err != nil { - return Nil - } - return uuid -} - -// Returns UUID v1/v2 storage state. -// Returns epoch timestamp, clock sequence, and hardware address. -func getStorage() (uint64, uint16, []byte) { - storageOnce.Do(initStorage) - - storageMutex.Lock() - defer storageMutex.Unlock() - - timeNow := epochFunc() - // Clock changed backwards since last UUID generation. - // Should increase clock sequence. - if timeNow <= lastTime { - clockSequence++ - } - lastTime = timeNow - - return timeNow, clockSequence, hardwareAddr[:] -} - -// NewV1 returns UUID based on current timestamp and MAC address. -func NewV1() UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := getStorage() - - binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - - copy(u[10:], hardwareAddr) - - u.SetVersion(1) - u.SetVariant() - - return u -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func NewV2(domain byte) UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := getStorage() - - switch domain { - case DomainPerson: - binary.BigEndian.PutUint32(u[0:], posixUID) - case DomainGroup: - binary.BigEndian.PutUint32(u[0:], posixGID) - } - - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - u[9] = domain - - copy(u[10:], hardwareAddr) - - u.SetVersion(2) - u.SetVariant() - - return u -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func NewV3(ns UUID, name string) UUID { - u := newFromHash(md5.New(), ns, name) - u.SetVersion(3) - u.SetVariant() - - return u -} - -// NewV4 returns random generated UUID. -func NewV4() UUID { - u := UUID{} - safeRandom(u[:]) - u.SetVersion(4) - u.SetVariant() - - return u -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func NewV5(ns UUID, name string) UUID { - u := newFromHash(sha1.New(), ns, name) - u.SetVersion(5) - u.SetVariant() - - return u -} - -// Returns UUID based on hashing of namespace UUID and name. -func newFromHash(h hash.Hash, ns UUID, name string) UUID { - u := UUID{} - h.Write(ns[:]) - h.Write([]byte(name)) - copy(u[:], h.Sum(nil)) - - return u -} diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore new file mode 100644 index 0000000..e3c2fc2 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/.gitignore @@ -0,0 +1,25 @@ +# .gitignore + +TODO.html +README.html + +lzma/writer.txt +lzma/reader.txt + +cmd/gxz/gxz +cmd/xb/xb + +# test executables +*.test + +# profile files +*.out + +# vim swap file +.*.swp + +# executables on windows +*.exe + +# default compression test file +enwik8* diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 0000000..58ebdc1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2016 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 0000000..969ae7a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,71 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +# Using the API + +The following example program shows how to use the API. + + package main + + import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" + ) + + func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } + } + +# Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md new file mode 100644 index 0000000..7b34c0c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -0,0 +1,315 @@ +# TODO list + +## Release v0.6 + +1. Review encoder and check for lzma improvements under xz. +2. Fix binary tree matcher. +3. Compare compression ratio with xz tool using comparable parameters + and optimize parameters +4. Do some optimizations + - rename operation action and make it a simple type of size 8 + - make maxMatches, wordSize parameters + - stop searching after a certain length is found (parameter sweetLen) + +## Release v0.7 + +1. Optimize code +2. Do statistical analysis to get linear presets. +3. Test sync.Pool compatability for xz and lzma Writer and Reader +3. Fuzz optimized code. + +## Release v0.8 + +1. Support parallel go routines for writing and reading xz files. +2. Support a ReaderAt interface for xz files with small block sizes. +3. Improve compatibility between gxz and xz +4. Provide manual page for gxz + +## Release v0.9 + +1. Improve documentation +2. Fuzz again + +## Release v1.0 + +1. Full functioning gxz +2. Add godoc URL to README.md (godoc.org) +3. Resolve all issues. +4. Define release candidates. +5. Public announcement. + +## Package lzma + +### Release v0.6 + +- Rewrite Encoder into a simple greedy one-op-at-a-time encoder + including + + simple scan at the dictionary head for the same byte + + use the killer byte (requiring matches to get longer, the first + test should be the byte that would make the match longer) + + +## Optimizations + +- There may be a lot of false sharing in lzma.State; check whether this + can be improved by reorganizing the internal structure of it. +- Check whether batching encoding and decoding improves speed. + +### DAG optimizations + +- Use full buffer to create minimal bit-length above range encoder. +- Might be too slow (see v0.4) + +### Different match finders + +- hashes with 2, 3 characters additional to 4 characters +- binary trees with 2-7 characters (uint64 as key, use uint32 as + pointers into a an array) +- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + into an array with bit-steeling for the colors) + +## Release Procedure + +- execute goch -l for all packages; probably with lower param like 0.5. +- check orthography with gospell +- Write release notes in doc/relnotes. +- Update README.md +- xb copyright . in xz directory to ensure all new files have Copyright + header +- VERSION= go generate github.com/ulikunitz/xz/... to update + version files +- Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +- Update TODO.md - write short log entry +- git checkout master && git merge dev +- git tag -a +- git push + +## Log + +### 2017-06-05 + +Release v0.5.4 fixes issues #15 of another problem with the padding size +check for the xz block header. I removed the check completely. + +### 2017-02-15 + +Release v0.5.3 fixes issue #12 regarding the decompression of an empty +XZ stream. Many thanks to Tomasz Kłak, who reported the issue. + +### 2016-12-02 + +Release v0.5.2 became necessary to allow the decoding of xz files with +4-byte padding in the block header. Many thanks to Greg, who reported +the issue. + +### 2016-07-23 + +Release v0.5.1 became necessary to fix problems with 32-bit platforms. +Many thanks to Bruno Brigas, who reported the issue. + +### 2016-07-04 + +Release v0.5 provides improvements to the compressor and provides support for +the decompression of xz files with multiple xz streams. + +### 2016-01-31 + +Another compression rate increase by checking the byte at length of the +best match first, before checking the whole prefix. This makes the +compressor even faster. We have now a large time budget to beat the +compression ratio of the xz tool. For enwik8 we have now over 40 seconds +to reduce the compressed file size for another 7 MiB. + +### 2016-01-30 + +I simplified the encoder. Speed and compression rate increased +dramatically. A high compression rate affects also the decompression +speed. The approach with the buffer and optimizing for operation +compression rate has not been successful. Going for the maximum length +appears to be the best approach. + +### 2016-01-28 + +The release v0.4 is ready. It provides a working xz implementation, +which is rather slow, but works and is interoperable with the xz tool. +It is an important milestone. + +### 2016-01-10 + +I have the first working implementation of an xz reader and writer. I'm +happy about reaching this milestone. + +### 2015-12-02 + +I'm now ready to implement xz because, I have a working LZMA2 +implementation. I decided today that v0.4 will use the slow encoder +using the operations buffer to be able to go back, if I intend to do so. + +### 2015-10-21 + +I have restarted the work on the library. While trying to implement +LZMA2, I discovered that I need to resimplify the encoder and decoder +functions. The option approach is too complicated. Using a limited byte +writer and not caring for written bytes at all and not to try to handle +uncompressed data simplifies the LZMA encoder and decoder much. +Processing uncompressed data and handling limits is a feature of the +LZMA2 format not of LZMA. + +I learned an interesting method from the LZO format. If the last copy is +too far away they are moving the head one 2 bytes and not 1 byte to +reduce processing times. + +### 2015-08-26 + +I have now reimplemented the lzma package. The code is reasonably fast, +but can still be optimized. The next step is to implement LZMA2 and then +xz. + +### 2015-07-05 + +Created release v0.3. The version is the foundation for a full xz +implementation that is the target of v0.4. + +### 2015-06-11 + +The gflag package has been developed because I couldn't use flag and +pflag for a fully compatible support of gzip's and lzma's options. It +seems to work now quite nicely. + +### 2015-06-05 + +The overflow issue was interesting to research, however Henry S. Warren +Jr. Hacker's Delight book was very helpful as usual and had the issue +explained perfectly. Fefe's information on his website was based on the +C FAQ and quite bad, because it didn't address the issue of -MININT == +MININT. + +### 2015-06-04 + +It has been a productive day. I improved the interface of lzma.Reader +and lzma.Writer and fixed the error handling. + +### 2015-06-01 + +By computing the bit length of the LZMA operations I was able to +improve the greedy algorithm implementation. By using an 8 MByte buffer +the compression rate was not as good as for xz but already better then +gzip default. + +Compression is currently slow, but this is something we will be able to +improve over time. + +### 2015-05-26 + +Checked the license of ogier/pflag. The binary lzmago binary should +include the license terms for the pflag library. + +I added the endorsement clause as used by Google for the Go sources the +LICENSE file. + +### 2015-05-22 + +The package lzb contains now the basic implementation for creating or +reading LZMA byte streams. It allows the support for the implementation +of the DAG-shortest-path algorithm for the compression function. + +### 2015-04-23 + +Completed yesterday the lzbase classes. I'm a little bit concerned that +using the components may require too much code, but on the other hand +there is a lot of flexibility. + +### 2015-04-22 + +Implemented Reader and Writer during the Bayern game against Porto. The +second half gave me enough time. + +### 2015-04-21 + +While showering today morning I discovered that the design for OpEncoder +and OpDecoder doesn't work, because encoding/decoding might depend on +the current status of the dictionary. This is not exactly the right way +to start the day. + +Therefore we need to keep the Reader and Writer design. This time around +we simplify it by ignoring size limits. These can be added by wrappers +around the Reader and Writer interfaces. The Parameters type isn't +needed anymore. + +However I will implement a ReaderState and WriterState type to use +static typing to ensure the right State object is combined with the +right lzbase.Reader and lzbase.Writer. + +As a start I have implemented ReaderState and WriterState to ensure +that the state for reading is only used by readers and WriterState only +used by Writers. + +### 2015-04-20 + +Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. + +### 2015-04-08 + +Came up with a new simplified design for lzbase. I implemented already +the type State that replaces OpCodec. + +### 2015-04-06 + +The new lzma package is now fully usable and lzmago is using it now. The +old lzma package has been completely removed. + +### 2015-04-05 + +Implemented lzma.Reader and tested it. + +### 2015-04-04 + +Implemented baseReader by adapting code form lzma.Reader. + +### 2015-04-03 + +The opCodec has been copied yesterday to lzma2. opCodec has a high +number of dependencies on other files in lzma2. Therefore I had to copy +almost all files from lzma. + +### 2015-03-31 + +Removed only a TODO item. + +However in Francesco Campoy's presentation "Go for Javaneros +(Javaïstes?)" is the the idea that using an embedded field E, all the +methods of E will be defined on T. If E is an interface T satisfies E. + +https://talks.golang.org/2014/go4java.slide#51 + +I have never used this, but it seems to be a cool idea. + +### 2015-03-30 + +Finished the type writerDict and wrote a simple test. + +### 2015-03-25 + +I started to implement the writerDict. + +### 2015-03-24 + +After thinking long about the LZMA2 code and several false starts, I +have now a plan to create a self-sufficient lzma2 package that supports +the classic LZMA format as well as LZMA2. The core idea is to support a +baseReader and baseWriter type that support the basic LZMA stream +without any headers. Both types must support the reuse of dictionaries +and the opCodec. + +### 2015-01-10 + +1. Implemented simple lzmago tool +2. Tested tool against large 4.4G file + - compression worked correctly; tested decompression with lzma + - decompression hits a full buffer condition +3. Fixed a bug in the compressor and wrote a test for it +4. Executed full cycle for 4.4 GB file; performance can be improved ;-) + +### 2015-01-11 + +- Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 0000000..fadc1a5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,74 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if b < 0x80 { + if i > 10 || i == 10 && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + CRC32 byte = 0x1 + CRC64 = 0x4 + SHA256 = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +// writeFilters writes the filters. +func writeFilters(w io.Writer, filters []filter) (n int, err error) { + for _, f := range filters { + p, err := f.MarshalBinary() + if err != nil { + return n, err + } + k, err := w.Write(p) + n += k + if err != nil { + return n, err + } + } + return n, nil +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz new file mode 100644 index 0000000..4b820bd Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/fox.xz differ diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 0000000..a328878 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 0000000..f99ec22 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 0000000..58635b1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 0000000..ab6a19c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 0000000..0ba45e8 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,457 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +// +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 0000000..a781bd1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,523 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bufio" + "errors" + "fmt" + "io" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 0000000..e9bab01 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,45 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 0000000..5350d81 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 0000000..50e0b6d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 0000000..a3696ba --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 0000000..16e14db --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 0000000..564a12b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,135 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } + +// Buffered returns the number of bytes currently buffered in the +// decoder dictionary. +func (d *decoderDict) buffered() int { return d.buf.Buffered() } + +// Peek gets data from the buffer without advancing the rear index. +func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 0000000..e08eb98 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,49 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "fmt" + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// makeDirectCodec creates a directCodec. The function panics if the number of +// bits is not in the range [1,32]. +func makeDirectCodec(bits int) directCodec { + if !(1 <= bits && bits <= 32) { + panic(fmt.Errorf("bits=%d out of range", bits)) + } + return directCodec(bits) +} + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 0000000..b053a2d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,156 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 + // maximum position slot + maxPosSlot = 63 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// distBits returns the number of bits required to encode dist. +func distBits(dist uint32) int { + if dist < startPosModel { + return 6 + } + // slot s > 3, dist d + // s = 2(bits(d)-1) + bit(d, bits(d)-2) + // s>>1 = bits(d)-1 + // bits(d) = 32-nlz32(d) + // s>>1=31-nlz32(d) + // n = 5 + (s>>1) = 36 - nlz32(d) + return 36 - nlz32(dist) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 0000000..18ce009 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 10 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 0000000..9d0fbc7 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// addtional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma new file mode 100644 index 0000000..5edad63 Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma differ diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 0000000..d786a97 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 0000000..bc70896 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,167 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// header represents the header of an LZMA file. +type header struct { + properties Properties + dictCap int + // uncompressed size; negative value if no size is given + size int64 +} + +// marshalBinary marshals the header. +func (h *header) marshalBinary() (data []byte, err error) { + if err = h.properties.verify(); err != nil { + return nil, err + } + if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.dictCap) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.dictCap)) + + // uncompressed size + var s uint64 + if h.size > 0 { + s = uint64(h.size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.dictCap = int(uint32LE(data[1:])) + if h.dictCap < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.size = -1 + } else { + h.size = int64(s) + if h.size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictCap checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictCap(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictCap(h.dictCap) { + return false + } + return h.size < 0 || h.size <= 1<<38 +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 0000000..ac6a71a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 0000000..e517730 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,129 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// lBits gives the number of bits used for the encoding of the l value +// provided to the range encoder. +func lBits(l uint32) int { + switch { + case l < 8: + return 4 + case l < 16: + return 5 + default: + return 10 + } +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +// +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 0000000..c949d6e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,132 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) + +// minState and maxState define a range for the state values stored in +// the State values. +const ( + minState = 0 + maxState = 11 +) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 0000000..4a244eb --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 0000000..733bb99 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,80 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// verify checks whether the match is valid. If that is not the case an +// error is returned. +func (m match) verify() error { + if !(minDistance <= m.distance && m.distance <= maxDistance) { + return errors.New("distance out of range") + } + if !(1 <= m.n && m.n <= maxMatchLen) { + return errors.New("length out of range") + } + return nil +} + +// l return the l-value for the match, which is the difference of length +// n and 2. +func (m match) l() uint32 { + return uint32(m.n - minMatchLen) +} + +// dist returns the dist value for the match, which is one less of the +// distance stored in the match. +func (m match) dist() uint32 { + return uint32(m.distance - minDistance) +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 0000000..24d50ec --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 0000000..23418e2 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 0000000..6361c5e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,248 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// init initializes the range decoder, by reading from the byte reader. +func (d *rangeDecoder) init() error { + d.nrange = 0xffffffff + d.code = 0 + + b, err := d.br.ReadByte() + if err != nil { + return err + } + if b != 0 { + return errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return err + } + } + + if d.code >= d.nrange { + return errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return nil +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 0000000..2ef3dca --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,100 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and doesn't rely on any external +// library. +package lzma + +import ( + "errors" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +type Reader struct { + lzma io.Reader + h header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.h.unmarshalBinary(data); err != nil { + return nil, err + } + if r.h.dictCap < MinDictCap { + return nil, errors.New("lzma: dictionary capacity too small") + } + dictCap := r.h.dictCap + if c.DictCap > dictCap { + dictCap = c.DictCap + } + + state := newState(r.h.properties) + dict, err := newDecoderDict(dictCap) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + if err != nil { + return nil, err + } + return r, nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 0000000..a55cfaa --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,232 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState + ctype chunkType +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 0000000..5023510 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,151 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// initState initializes the state. +func initState(s *state, p Properties) { + *s = state{Properties: p} + s.Reset() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 0000000..504b3d7 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() header { + h := header{ + properties: *c.Properties, + dictCap: c.DictCap, + size: -1, + } + if c.SizeInHeader { + h.size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.properties) + m, err := c.Matcher.new(w.h.dictCap) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.size >= 0 { + m := w.h.size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 0000000..7c1afe1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 0000000..69cf5f7 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs new file mode 100644 index 0000000..a8c612c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/make-docs @@ -0,0 +1,5 @@ +#!/bin/sh + +set -x +pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md +pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 0000000..0634c6b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,373 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// fill replaces all zero values with their default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// errIndex indicates an error with the xz file index. +var errIndex = errors.New("xz: error in xz file index") + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if len(index) != len(r.index) { + return fmt.Errorf("xz: index length is %d; want %d", + len(index), len(r.index)) + } + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader + err error +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + br.r = io.TeeReader(fr, br.hash) + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// errBlockSize indicates that the size of the block in the block header +// is wrong. +var errBlockSize = errors.New("xz: wrong uncompressed size for block") + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 0000000..c126f70 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,386 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 + CheckSum byte + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + bw.mw = io.MultiWriter(bw.w, bw.hash) + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/LICENSE b/vendor/github.com/zclconf/go-cty/LICENSE new file mode 100644 index 0000000..d6503b5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017-2018 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/zclconf/go-cty/cty/capsule.go b/vendor/github.com/zclconf/go-cty/cty/capsule.go new file mode 100644 index 0000000..4fce92a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/capsule.go @@ -0,0 +1,89 @@ +package cty + +import ( + "fmt" + "reflect" +) + +type capsuleType struct { + typeImplSigil + Name string + GoType reflect.Type +} + +func (t *capsuleType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(*capsuleType); ok { + // capsule types compare by pointer identity + return otherP == t + } + return false +} + +func (t *capsuleType) FriendlyName() string { + return t.Name +} + +func (t *capsuleType) GoString() string { + // To get a useful representation of our native type requires some + // shenanigans. + victimVal := reflect.Zero(t.GoType) + return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface()) +} + +// Capsule creates a new Capsule type. +// +// A Capsule type is a special type that can be used to transport arbitrary +// Go native values of a given type through the cty type system. A language +// that uses cty as its type system might, for example, provide functions +// that return capsule-typed values and then other functions that operate +// on those values. +// +// From cty's perspective, Capsule types have a few interesting characteristics, +// described in the following paragraphs. +// +// Each capsule type has an associated Go native type that it is able to +// transport. Capsule types compare by identity, so each call to the +// Capsule function creates an entirely-distinct cty Type, even if two calls +// use the same native type. +// +// Each capsule-typed value contains a pointer to a value of the given native +// type. A capsule-typed value supports no operations except equality, and +// equality is implemented by pointer identity of the encapsulated pointer. +// +// The given name is used as the new type's "friendly name". This can be any +// string in principle, but will usually be a short, all-lowercase name aimed +// at users of the embedding language (i.e. not mention Go-specific details) +// and will ideally not create ambiguity with any predefined cty type. +// +// Capsule types are never introduced by any standard cty operation, so a +// calling application opts in to including them within its own type system +// by creating them and introducing them via its own functions. At that point, +// the application is responsible for dealing with any capsule-typed values +// that might be returned. +func Capsule(name string, nativeType reflect.Type) Type { + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + }, + } +} + +// IsCapsuleType returns true if this type is a capsule type, as created +// by cty.Capsule . +func (t Type) IsCapsuleType() bool { + _, ok := t.typeImpl.(*capsuleType) + return ok +} + +// EncapsulatedType returns the encapsulated native type of a capsule type, +// or panics if the receiver is not a Capsule type. +// +// Is IsCapsuleType to determine if this method is safe to call. +func (t Type) EncapsulatedType() reflect.Type { + impl, ok := t.typeImpl.(*capsuleType) + if !ok { + panic("not a capsule type") + } + return impl.GoType +} diff --git a/vendor/github.com/zclconf/go-cty/cty/collection.go b/vendor/github.com/zclconf/go-cty/cty/collection.go new file mode 100644 index 0000000..ab3919b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/collection.go @@ -0,0 +1,34 @@ +package cty + +import ( + "errors" +) + +type collectionTypeImpl interface { + ElementType() Type +} + +// IsCollectionType returns true if the given type supports the operations +// that are defined for all collection types. +func (t Type) IsCollectionType() bool { + _, ok := t.typeImpl.(collectionTypeImpl) + return ok +} + +// ElementType returns the element type of the receiver if it is a collection +// type, or panics if it is not. Use IsCollectionType first to test whether +// this method will succeed. +func (t Type) ElementType() Type { + if ct, ok := t.typeImpl.(collectionTypeImpl); ok { + return ct.ElementType() + } + panic(errors.New("not a collection type")) +} + +// ElementCallback is a callback type used for iterating over elements of +// collections and attributes of objects. +// +// The types of key and value depend on what type is being iterated over. +// Return true to stop iterating after the current element, or false to +// continue iterating. +type ElementCallback func(key Value, val Value) (stop bool) diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go new file mode 100644 index 0000000..d84f6ac --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go @@ -0,0 +1,165 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// compareTypes implements a preference order for unification. +// +// The result of this method is not useful for anything other than unification +// preferences, since it assumes that the caller will verify that any suggested +// conversion is actually possible and it is thus able to to make certain +// optimistic assumptions. +func compareTypes(a cty.Type, b cty.Type) int { + + // DynamicPseudoType always has lowest preference, because anything can + // convert to it (it acts as a placeholder for "any type") and we want + // to optimistically assume that any dynamics will converge on matching + // their neighbors. + if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType { + if a != cty.DynamicPseudoType { + return -1 + } + if b != cty.DynamicPseudoType { + return 1 + } + return 0 + } + + if a.IsPrimitiveType() && b.IsPrimitiveType() { + // String is a supertype of all primitive types, because we can + // represent all primitive values as specially-formatted strings. + if a == cty.String || b == cty.String { + if a != cty.String { + return 1 + } + if b != cty.String { + return -1 + } + return 0 + } + } + + if a.IsListType() && b.IsListType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsSetType() && b.IsSetType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsMapType() && b.IsMapType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + + // From this point on we may have swapped the two items in order to + // simplify our cases. Therefore any non-zero return after this point + // must be multiplied by "swap" to potentially invert the return value + // if needed. + swap := 1 + switch { + case a.IsTupleType() && b.IsListType(): + fallthrough + case a.IsObjectType() && b.IsMapType(): + fallthrough + case a.IsSetType() && b.IsTupleType(): + fallthrough + case a.IsSetType() && b.IsListType(): + a, b = b, a + swap = -1 + } + + if b.IsSetType() && (a.IsTupleType() || a.IsListType()) { + // We'll just optimistically assume that the element types are + // unifyable/convertible, and let a second recursive pass + // figure out how to make that so. + return -1 * swap + } + + if a.IsListType() && b.IsTupleType() { + // We'll just optimistically assume that the tuple's element types + // can be unified into something compatible with the list's element + // type. + return -1 * swap + } + + if a.IsMapType() && b.IsObjectType() { + // We'll just optimistically assume that the object's attribute types + // can be unified into something compatible with the map's element + // type. + return -1 * swap + } + + // For object and tuple types, comparing two types doesn't really tell + // the whole story because it may be possible to construct a new type C + // that is the supertype of both A and B by unifying each attribute/element + // separately. That possibility is handled by Unify as a follow-up if + // type sorting is insufficient to produce a valid result. + // + // Here we will take care of the simple possibilities where no new type + // is needed. + if a.IsObjectType() && b.IsObjectType() { + atysA := a.AttributeTypes() + atysB := b.AttributeTypes() + + if len(atysA) != len(atysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for k := range atysA { + if _, has := atysB[k]; !has { + return 0 + } + + cmp := compareTypes(atysA[k], atysB[k]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + if a.IsTupleType() && b.IsTupleType() { + etysA := a.TupleElementTypes() + etysB := b.TupleElementTypes() + + if len(etysA) != len(etysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for i := range etysA { + cmp := compareTypes(etysA[i], etysB[i]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + + return 0 +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go new file mode 100644 index 0000000..7bfcc08 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go @@ -0,0 +1,120 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversion is an internal variant of Conversion that carries around +// a cty.Path to be used in error responses. +type conversion func(cty.Value, cty.Path) (cty.Value, error) + +func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { + conv := getConversionKnown(in, out, unsafe) + if conv == nil { + return nil + } + + // Wrap the conversion in some standard checks that we don't want to + // have to repeat in every conversion function. + return func(in cty.Value, path cty.Path) (cty.Value, error) { + if !in.IsKnown() { + return cty.UnknownVal(out), nil + } + if in.IsNull() { + // We'll pass through nulls, albeit type converted, and let + // the caller deal with whatever handling they want to do in + // case null values are considered valid in some applications. + return cty.NullVal(out), nil + } + + return conv(in, path) + } +} + +func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { + switch { + + case out == cty.DynamicPseudoType: + // Conversion *to* DynamicPseudoType means that the caller wishes + // to allow any type in this position, so we'll produce a do-nothing + // conversion that just passes through the value as-is. + return dynamicPassthrough + + case unsafe && in == cty.DynamicPseudoType: + // Conversion *from* DynamicPseudoType means that we have a value + // whose type isn't yet known during type checking. For these we will + // assume that conversion will succeed and deal with any errors that + // result (which is why we can only do this when "unsafe" is set). + return dynamicFixup(out) + + case in.IsPrimitiveType() && out.IsPrimitiveType(): + conv := primitiveConversionsSafe[in][out] + if conv != nil { + return conv + } + if unsafe { + return primitiveConversionsUnsafe[in][out] + } + return nil + + case out.IsListType() && (in.IsListType() || in.IsSetType()): + inEty := in.ElementType() + outEty := out.ElementType() + if inEty.Equals(outEty) { + // This indicates that we're converting from list to set with + // the same element type, so we don't need an element converter. + return conversionCollectionToList(outEty, nil) + } + + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToList(outEty, convEty) + + case out.IsSetType() && (in.IsListType() || in.IsSetType()): + if in.IsListType() && !unsafe { + // Conversion from list to map is unsafe because it will lose + // information: the ordering will not be preserved, and any + // duplicate elements will be conflated. + return nil + } + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if inEty.Equals(outEty) { + // This indicates that we're converting from set to list with + // the same element type, so we don't need an element converter. + return conversionCollectionToSet(outEty, nil) + } + + if convEty == nil { + return nil + } + return conversionCollectionToSet(outEty, convEty) + + case out.IsListType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToList(in, outEty, unsafe) + + case out.IsMapType() && in.IsObjectType(): + outEty := out.ElementType() + return conversionObjectToMap(in, outEty, unsafe) + + default: + return nil + + } +} + +// retConversion wraps a conversion (internal type) so it can be returned +// as a Conversion (public type). +func retConversion(conv conversion) Conversion { + if conv == nil { + return nil + } + + return func(in cty.Value) (cty.Value, error) { + return conv(in, cty.Path(nil)) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go new file mode 100644 index 0000000..eace85d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go @@ -0,0 +1,226 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionCollectionToList returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a list. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a list. (For example, +// if we're converting from a set into a list of the same element type.) +func conversionCollectionToList(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(elems), nil + } +} + +// conversionCollectionToSet returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a set. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a set. (For example, +// if we're converting from a list into a set of the same element type.) +func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(elems), nil + } +} + +// conversionTupleToList returns a conversion that will take a value of the +// given tuple type and return a list of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + path = append(path, nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.ListVal(elems), nil + } +} + +// conversionObjectToMap returns a conversion that will take a value of the +// given object type and return a map of the given element type. +// +// Will panic if the given objectType isn't actually an object type. +func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion { + objectAtys := objectType.AttributeTypes() + + if len(objectAtys) == 0 { + // Empty object short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.MapValEmpty(mapEty), nil + } + } + + if mapEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + objectAtysList := make([]cty.Type, 0, len(objectAtys)) + for _, aty := range objectAtys { + objectAtysList = append(objectAtysList, aty) + } + mapEty, _ = unify(objectAtysList, unsafe) + if mapEty == cty.NilType { + return nil + } + } + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(objectAty, mapEty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems[name.AsString()] = val + } + + return cty.MapVal(elems), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go new file mode 100644 index 0000000..4d19cf6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go @@ -0,0 +1,33 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// dynamicFixup deals with just-in-time conversions of values that were +// input-typed as cty.DynamicPseudoType during analysis, ensuring that +// we end up with the desired output type once the value is known, or +// failing with an error if that is not possible. +// +// This is in the spirit of the cty philosophy of optimistically assuming that +// DynamicPseudoType values will become the intended value eventually, and +// dealing with any inconsistencies during final evaluation. +func dynamicFixup(wantType cty.Type) conversion { + return func(in cty.Value, path cty.Path) (cty.Value, error) { + ret, err := Convert(in, wantType) + if err != nil { + // Re-wrap this error so that the returned path is relative + // to the caller's original value, rather than relative to our + // conversion value here. + return cty.NilVal, path.NewError(err) + } + return ret, nil + } +} + +// dynamicPassthrough is an identity conversion that is used when the +// target type is DynamicPseudoType, indicating that the caller doesn't care +// which type is returned. +func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { + return in, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go new file mode 100644 index 0000000..e563ee3 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go @@ -0,0 +1,50 @@ +package convert + +import ( + "math/big" + + "github.com/zclconf/go-cty/cty" +) + +var stringTrue = cty.StringVal("true") +var stringFalse = cty.StringVal("false") + +var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{ + cty.Number: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + f := val.AsBigFloat() + return cty.StringVal(f.Text('f', -1)), nil + }, + }, + cty.Bool: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + if val.True() { + return stringTrue, nil + } else { + return stringFalse, nil + } + }, + }, +} + +var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{ + cty.String: { + cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) { + f, _, err := big.ParseFloat(val.AsString(), 10, 512, big.ToNearestEven) + if err != nil { + return cty.NilVal, path.NewErrorf("a number is required") + } + return cty.NumberVal(f), nil + }, + cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) { + switch val.AsString() { + case "true", "1": + return cty.True, nil + case "false", "0": + return cty.False, nil + default: + return cty.NilVal, path.NewErrorf("a bool is required") + } + }, + }, +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/doc.go b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go new file mode 100644 index 0000000..2037299 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go @@ -0,0 +1,15 @@ +// Package convert contains some routines for converting between cty types. +// The intent of providing this package is to encourage applications using +// cty to have consistent type conversion behavior for maximal interoperability +// when Values pass from one application to another. +// +// The conversions are categorized into two categories. "Safe" conversions are +// ones that are guaranteed to succeed if given a non-null value of the +// appropriate source type. "Unsafe" conversions, on the other hand, are valid +// for only a subset of input values, and thus may fail with an error when +// called for values outside of that valid subset. +// +// The functions whose names end in Unsafe support all of the conversions that +// are supported by the corresponding functions whose names do not have that +// suffix, and then additional unsafe conversions as well. +package convert diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/public.go b/vendor/github.com/zclconf/go-cty/cty/convert/public.go new file mode 100644 index 0000000..55f44ae --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/public.go @@ -0,0 +1,83 @@ +package convert + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// This file contains the public interface of this package, which is intended +// to be a small, convenient interface designed for easy integration into +// a hypothetical language type checker and interpreter. + +// Conversion is a named function type representing a conversion from a +// value of one type to a value of another type. +// +// The source type for a conversion is always the source type given to +// the function that returned the Conversion, but there is no way to recover +// that from a Conversion value itself. If a Conversion is given a value +// that is not of its expected type (with the exception of DynamicPseudoType, +// which is always supported) then the function may panic or produce undefined +// results. +type Conversion func(in cty.Value) (out cty.Value, err error) + +// GetConversion returns a Conversion between the given in and out Types if +// a safe one is available, or returns nil otherwise. +func GetConversion(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, false)) +} + +// GetConversionUnsafe returns a Conversion between the given in and out Types +// if either a safe or unsafe one is available, or returns nil otherwise. +func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, true)) +} + +// Convert returns the result of converting the given value to the given type +// if an safe or unsafe conversion is available, or returns an error if such a +// conversion is impossible. +// +// This is a convenience wrapper around calling GetConversionUnsafe and then +// immediately passing the given value to the resulting function. +func Convert(in cty.Value, want cty.Type) (cty.Value, error) { + if in.Type().Equals(want) { + return in, nil + } + + conv := GetConversionUnsafe(in.Type(), want) + if conv == nil { + return cty.NilVal, fmt.Errorf("incorrect type; %s required", want.FriendlyName()) + } + return conv(in) +} + +// Unify attempts to find the most general type that can be converted from +// all of the given types. If this is possible, that type is returned along +// with a slice of necessary conversions for some of the given types. +// +// If no common supertype can be found, this function returns cty.NilType and +// a nil slice. +// +// If a common supertype *can* be found, the returned slice will always be +// non-nil and will contain a non-nil conversion for each given type that +// needs to be converted, with indices corresponding to the input slice. +// Any given type that does *not* need conversion (because it is already of +// the appropriate type) will have a nil Conversion. +// +// cty.DynamicPseudoType is, as usual, a special case. If the given type list +// contains a mixture of dynamic and non-dynamic types, the dynamic types are +// disregarded for type selection and a conversion is returned for them that +// will attempt a late conversion of the given value to the target type, +// failing with a conversion error if the eventual concrete type is not +// compatible. If *all* given types are DynamicPseudoType, or in the +// degenerate case of an empty slice of types, the returned type is itself +// cty.DynamicPseudoType and no conversions are attempted. +func Unify(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, false) +} + +// UnifyUnsafe is the same as Unify except that it may return unsafe +// conversions in situations where a safe conversion isn't also available. +func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, true) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go new file mode 100644 index 0000000..b776910 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go @@ -0,0 +1,69 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// sortTypes produces an ordering of the given types that serves as a +// preference order for the result of unification of the given types. +// The return value is a slice of indices into the given slice, and will +// thus always be the same length as the given slice. +// +// The goal is that the most general of the given types will appear first +// in the ordering. If there are uncomparable pairs of types in the list +// then they will appear in an undefined order, and the unification pass +// will presumably then fail. +func sortTypes(tys []cty.Type) []int { + l := len(tys) + + // First we build a graph whose edges represent "more general than", + // which we will then do a topological sort of. + edges := make([][]int, l) + for i := 0; i < (l - 1); i++ { + for j := i + 1; j < l; j++ { + cmp := compareTypes(tys[i], tys[j]) + switch { + case cmp < 0: + edges[i] = append(edges[i], j) + case cmp > 0: + edges[j] = append(edges[j], i) + } + } + } + + // Compute the in-degree of each node + inDegree := make([]int, l) + for _, outs := range edges { + for _, j := range outs { + inDegree[j]++ + } + } + + // The array backing our result will double as our queue for visiting + // the nodes, with the queue slice moving along this array until it + // is empty and positioned at the end of the array. Thus our visiting + // order is also our result order. + result := make([]int, l) + queue := result[0:0] + + // Initialize the queue with any item of in-degree 0, preserving + // their relative order. + for i, n := range inDegree { + if n == 0 { + queue = append(queue, i) + } + } + + for len(queue) != 0 { + i := queue[0] + queue = queue[1:] + for _, j := range edges[i] { + inDegree[j]-- + if inDegree[j] == 0 { + queue = append(queue, j) + } + } + } + + return result +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/unify.go b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go new file mode 100644 index 0000000..bd6736b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go @@ -0,0 +1,66 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// The current unify implementation is somewhat inefficient, but we accept this +// under the assumption that it will generally be used with small numbers of +// types and with types of reasonable complexity. However, it does have a +// "happy path" where all of the given types are equal. +// +// This function is likely to have poor performance in cases where any given +// types are very complex (lots of deeply-nested structures) or if the list +// of types itself is very large. In particular, it will walk the nested type +// structure under the given types several times, especially when given a +// list of types for which unification is not possible, since each permutation +// will be tried to determine that result. +func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + if len(types) == 0 { + // Degenerate case + return cty.NilType, nil + } + + prefOrder := sortTypes(types) + + // sortTypes gives us an order where earlier items are preferable as + // our result type. We'll now walk through these and choose the first + // one we encounter for which conversions exist for all source types. + conversions := make([]Conversion, len(types)) +Preferences: + for _, wantTypeIdx := range prefOrder { + wantType := types[wantTypeIdx] + for i, tryType := range types { + if i == wantTypeIdx { + // Don't need to convert our wanted type to itself + conversions[i] = nil + continue + } + + if tryType.Equals(wantType) { + conversions[i] = nil + continue + } + + if unsafe { + conversions[i] = GetConversionUnsafe(tryType, wantType) + } else { + conversions[i] = GetConversion(tryType, wantType) + } + + if conversions[i] == nil { + // wantType is not a suitable unification type, so we'll + // try the next one in our preference order. + continue Preferences + } + } + + return wantType, conversions + } + + // TODO: For structural types, try to invent a new type that they + // can all be unified to, by unifying their respective attributes. + + // If we fall out here, no unification is possible + return cty.NilType, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/doc.go b/vendor/github.com/zclconf/go-cty/cty/doc.go new file mode 100644 index 0000000..d31f054 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/doc.go @@ -0,0 +1,18 @@ +// Package cty (pronounced see-tie) provides some infrastructure for a type +// system that might be useful for applications that need to represent +// configuration values provided by the user whose types are not known +// at compile time, particularly if the calling application also allows +// such values to be used in expressions. +// +// The type system consists of primitive types Number, String and Bool, as +// well as List and Map collection types and Object types that can have +// arbitrarily-typed sets of attributes. +// +// A set of operations is defined on these types, which is accessible via +// the wrapper struct Value, which annotates the raw, internal representation +// of a value with its corresponding type. +// +// This package is oriented towards being a building block for configuration +// languages used to bootstrap an application. It is not optimized for use +// in tight loops where CPU time or memory pressure are a concern. +package cty diff --git a/vendor/github.com/zclconf/go-cty/cty/element_iterator.go b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go new file mode 100644 index 0000000..0bf84c7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go @@ -0,0 +1,191 @@ +package cty + +import ( + "sort" + + "github.com/zclconf/go-cty/cty/set" +) + +// ElementIterator is the interface type returned by Value.ElementIterator to +// allow the caller to iterate over elements of a collection-typed value. +// +// Its usage pattern is as follows: +// +// it := val.ElementIterator() +// for it.Next() { +// key, val := it.Element() +// // ... +// } +type ElementIterator interface { + Next() bool + Element() (key Value, value Value) +} + +func canElementIterator(val Value) bool { + switch { + case val.ty.IsListType(): + return true + case val.ty.IsMapType(): + return true + case val.ty.IsSetType(): + return true + case val.ty.IsTupleType(): + return true + case val.ty.IsObjectType(): + return true + default: + return false + } +} + +func elementIterator(val Value) ElementIterator { + switch { + case val.ty.IsListType(): + return &listElementIterator{ + ety: val.ty.ElementType(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsMapType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same input map. + rawMap := val.v.(map[string]interface{}) + keys := make([]string, 0, len(rawMap)) + for key := range rawMap { + keys = append(keys, key) + } + sort.Strings(keys) + + return &mapElementIterator{ + ety: val.ty.ElementType(), + vals: rawMap, + keys: keys, + idx: -1, + } + case val.ty.IsSetType(): + rawSet := val.v.(set.Set) + return &setElementIterator{ + ety: val.ty.ElementType(), + setIt: rawSet.Iterator(), + } + case val.ty.IsTupleType(): + return &tupleElementIterator{ + etys: val.ty.TupleElementTypes(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsObjectType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same object type. + atys := val.ty.AttributeTypes() + keys := make([]string, 0, len(atys)) + for key := range atys { + keys = append(keys, key) + } + sort.Strings(keys) + + return &objectElementIterator{ + atys: atys, + vals: val.v.(map[string]interface{}), + attrNames: keys, + idx: -1, + } + default: + panic("attempt to iterate on non-collection, non-tuple type") + } +} + +type listElementIterator struct { + ety Type + vals []interface{} + idx int +} + +func (it *listElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.ety, + v: it.vals[i], + } +} + +func (it *listElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type mapElementIterator struct { + ety Type + vals map[string]interface{} + keys []string + idx int +} + +func (it *mapElementIterator) Element() (Value, Value) { + key := it.keys[it.idx] + return StringVal(key), Value{ + ty: it.ety, + v: it.vals[key], + } +} + +func (it *mapElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.keys) +} + +type setElementIterator struct { + ety Type + setIt *set.Iterator +} + +func (it *setElementIterator) Element() (Value, Value) { + val := Value{ + ty: it.ety, + v: it.setIt.Value(), + } + return val, val +} + +func (it *setElementIterator) Next() bool { + return it.setIt.Next() +} + +type tupleElementIterator struct { + etys []Type + vals []interface{} + idx int +} + +func (it *tupleElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.etys[i], + v: it.vals[i], + } +} + +func (it *tupleElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type objectElementIterator struct { + atys map[string]Type + vals map[string]interface{} + attrNames []string + idx int +} + +func (it *objectElementIterator) Element() (Value, Value) { + key := it.attrNames[it.idx] + return StringVal(key), Value{ + ty: it.atys[key], + v: it.vals[key], + } +} + +func (it *objectElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.attrNames) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/error.go b/vendor/github.com/zclconf/go-cty/cty/error.go new file mode 100644 index 0000000..dd139f7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/error.go @@ -0,0 +1,55 @@ +package cty + +import ( + "fmt" +) + +// PathError is a specialization of error that represents where in a +// potentially-deep data structure an error occured, using a Path. +type PathError struct { + error + Path Path +} + +func errorf(path Path, f string, args ...interface{}) error { + // We need to copy the Path because often our caller builds it by + // continually mutating the same underlying buffer. + sPath := make(Path, len(path)) + copy(sPath, path) + return PathError{ + error: fmt.Errorf(f, args...), + Path: sPath, + } +} + +// NewErrorf creates a new PathError for the current path by passing the +// given format and arguments to fmt.Errorf and then wrapping the result +// similarly to NewError. +func (p Path) NewErrorf(f string, args ...interface{}) error { + return errorf(p, f, args...) +} + +// NewError creates a new PathError for the current path, wrapping the given +// error. +func (p Path) NewError(err error) error { + // if we're being asked to wrap an existing PathError then our new + // PathError will be the concatenation of the two paths, ensuring + // that we still get a single flat PathError that's thus easier for + // callers to deal with. + perr, wrappingPath := err.(PathError) + pathLen := len(p) + if wrappingPath { + pathLen = pathLen + len(perr.Path) + } + + sPath := make(Path, pathLen) + copy(sPath, p) + if wrappingPath { + copy(sPath[len(p):], perr.Path) + } + + return PathError{ + error: err, + Path: sPath, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/argument.go b/vendor/github.com/zclconf/go-cty/cty/function/argument.go new file mode 100644 index 0000000..bfd3015 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/argument.go @@ -0,0 +1,50 @@ +package function + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Parameter represents a parameter to a function. +type Parameter struct { + // Name is an optional name for the argument. This package ignores this + // value, but callers may use it for documentation, etc. + Name string + + // A type that any argument for this parameter must conform to. + // cty.DynamicPseudoType can be used, either at top-level or nested + // in a parameterized type, to indicate that any type should be + // permitted, to allow the definition of type-generic functions. + Type cty.Type + + // If AllowNull is set then null values may be passed into this + // argument's slot in both the type-check function and the implementation + // function. If not set, such values are rejected by the built-in + // checking rules. + AllowNull bool + + // If AllowUnknown is set then unknown values may be passed into this + // argument's slot in the implementation function. If not set, any + // unknown values will cause the function to immediately return + // an unkonwn value without calling the implementation function, thus + // freeing the function implementer from dealing with this case. + AllowUnknown bool + + // If AllowDynamicType is set then DynamicVal may be passed into this + // argument's slot in the implementation function. If not set, any + // dynamic values will cause the function to immediately return + // DynamicVal value without calling the implementation function, thus + // freeing the function implementer from dealing with this case. + // + // Note that DynamicVal is also unknown, so in order to receive dynamic + // *values* it is also necessary to set AllowUnknown. + // + // However, it is valid to set AllowDynamicType without AllowUnknown, in + // which case a dynamic value may be passed to the type checking function + // but will not make it to the *implementation* function. Instead, an + // unknown value of the type returned by the type-check function will be + // returned. This is suggested for functions that have a static return + // type since it allows the return value to be typed even if the input + // values are not, thus improving the type-check accuracy of derived + // values. + AllowDynamicType bool +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/doc.go b/vendor/github.com/zclconf/go-cty/cty/function/doc.go new file mode 100644 index 0000000..393b311 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/doc.go @@ -0,0 +1,6 @@ +// Package function builds on the functionality of cty by modeling functions +// that operate on cty Values. +// +// Functions are, at their core, Go anonymous functions. However, this package +// wraps around them utility functions for parameter type checking, etc. +package function diff --git a/vendor/github.com/zclconf/go-cty/cty/function/error.go b/vendor/github.com/zclconf/go-cty/cty/function/error.go new file mode 100644 index 0000000..2b56779 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/error.go @@ -0,0 +1,50 @@ +package function + +import ( + "fmt" + "runtime/debug" +) + +// ArgError represents an error with one of the arguments in a call. The +// attribute Index represents the zero-based index of the argument in question. +// +// Its error *may* be a cty.PathError, in which case the error actually +// pertains to a nested value within the data structure passed as the argument. +type ArgError struct { + error + Index int +} + +func NewArgErrorf(i int, f string, args ...interface{}) error { + return ArgError{ + error: fmt.Errorf(f, args...), + Index: i, + } +} + +func NewArgError(i int, err error) error { + return ArgError{ + error: err, + Index: i, + } +} + +// PanicError indicates that a panic occurred while executing either a +// function's type or implementation function. This is captured and wrapped +// into a normal error so that callers (expected to be language runtimes) +// are freed from having to deal with panics in buggy functions. +type PanicError struct { + Value interface{} + Stack []byte +} + +func errorForPanic(val interface{}) error { + return PanicError{ + Value: val, + Stack: debug.Stack(), + } +} + +func (e PanicError) Error() string { + return fmt.Sprintf("panic in function implementation: %s\n%s", e.Value, e.Stack) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/function.go b/vendor/github.com/zclconf/go-cty/cty/function/function.go new file mode 100644 index 0000000..162f7bf --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/function.go @@ -0,0 +1,291 @@ +package function + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// Function represents a function. This is the main type in this package. +type Function struct { + spec *Spec +} + +// Spec is the specification of a function, used to instantiate +// a new Function. +type Spec struct { + // Params is a description of the positional parameters for the function. + // The standard checking logic rejects any calls that do not provide + // arguments conforming to this definition, freeing the function + // implementer from dealing with such inconsistencies. + Params []Parameter + + // VarParam is an optional specification of additional "varargs" the + // function accepts. If this is non-nil then callers may provide an + // arbitrary number of additional arguments (after those matching with + // the fixed parameters in Params) that conform to the given specification, + // which will appear as additional values in the slices of values + // provided to the type and implementation functions. + VarParam *Parameter + + // Type is the TypeFunc that decides the return type of the function + // given its arguments, which may be Unknown. See the documentation + // of TypeFunc for more information. + // + // Use StaticReturnType if the function's return type does not vary + // depending on its arguments. + Type TypeFunc + + // Impl is the ImplFunc that implements the function's behavior. + // + // Functions are expected to behave as pure functions, and not create + // any visible side-effects. + // + // If a TypeFunc is also provided, the value returned from Impl *must* + // conform to the type it returns, or a call to the function will panic. + Impl ImplFunc +} + +// New creates a new function with the given specification. +// +// After passing a Spec to this function, the caller must no longer read from +// or mutate it. +func New(spec *Spec) Function { + f := Function{ + spec: spec, + } + return f +} + +// TypeFunc is a callback type for determining the return type of a function +// given its arguments. +// +// Any of the values passed to this function may be unknown, even if the +// parameters are not configured to accept unknowns. +// +// If any of the given values are *not* unknown, the TypeFunc may use the +// values for pre-validation and for choosing the return type. For example, +// a hypothetical JSON-unmarshalling function could return +// cty.DynamicPseudoType if the given JSON string is unknown, but return +// a concrete type based on the JSON structure if the JSON string is already +// known. +type TypeFunc func(args []cty.Value) (cty.Type, error) + +// ImplFunc is a callback type for the main implementation of a function. +// +// "args" are the values for the arguments, and this slice will always be at +// least as long as the argument definition slice for the function. +// +// "retType" is the type returned from the Type callback, included as a +// convenience to avoid the need to re-compute the return type for generic +// functions whose return type is a function of the arguments. +type ImplFunc func(args []cty.Value, retType cty.Type) (cty.Value, error) + +// StaticReturnType returns a TypeFunc that always returns the given type. +// +// This is provided as a convenience for defining a function whose return +// type does not depend on the argument types. +func StaticReturnType(ty cty.Type) TypeFunc { + return func([]cty.Value) (cty.Type, error) { + return ty, nil + } +} + +// ReturnType returns the return type of a function given a set of candidate +// argument types, or returns an error if the given types are unacceptable. +// +// If the caller already knows values for at least some of the arguments +// it can be better to call ReturnTypeForValues, since certain functions may +// determine their return types from their values and return DynamicVal if +// the values are unknown. +func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) { + vals := make([]cty.Value, len(argTypes)) + for i, ty := range argTypes { + vals[i] = cty.UnknownVal(ty) + } + return f.ReturnTypeForValues(vals) +} + +// ReturnTypeForValues is similar to ReturnType but can be used if the caller +// already knows the values of some or all of the arguments, in which case +// the function may be able to determine a more definite result if its +// return type depends on the argument *values*. +// +// For any arguments whose values are not known, pass an Unknown value of +// the appropriate type. +func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { + var posArgs []cty.Value + var varArgs []cty.Value + + if f.spec.VarParam == nil { + if len(args) != len(f.spec.Params) { + return cty.Type{}, fmt.Errorf( + "wrong number of arguments (%d required; %d given)", + len(f.spec.Params), len(args), + ) + } + + posArgs = args + varArgs = nil + } else { + if len(args) < len(f.spec.Params) { + return cty.Type{}, fmt.Errorf( + "wrong number of arguments (at least %d required; %d given)", + len(f.spec.Params), len(args), + ) + } + + posArgs = args[0:len(f.spec.Params)] + varArgs = args[len(f.spec.Params):] + } + + for i, spec := range f.spec.Params { + val := posArgs[i] + + if val.IsNull() && !spec.AllowNull { + return cty.Type{}, NewArgErrorf(i, "must not be null") + } + + // AllowUnknown is ignored for type-checking, since we expect to be + // able to type check with unknown values. We *do* still need to deal + // with DynamicPseudoType here though, since the Type function might + // not be ready to deal with that. + + if val.Type() == cty.DynamicPseudoType { + if !spec.AllowDynamicType { + return cty.DynamicPseudoType, nil + } + } else if errs := val.Type().TestConformance(spec.Type); errs != nil { + // For now we'll just return the first error in the set, since + // we don't have a good way to return the whole list here. + // Would be good to do something better at some point... + return cty.Type{}, NewArgError(i, errs[0]) + } + } + + if varArgs != nil { + spec := f.spec.VarParam + for i, val := range varArgs { + realI := i + len(posArgs) + + if val.IsNull() && !spec.AllowNull { + return cty.Type{}, NewArgErrorf(realI, "must not be null") + } + + if val.Type() == cty.DynamicPseudoType { + if !spec.AllowDynamicType { + return cty.DynamicPseudoType, nil + } + } else if errs := val.Type().TestConformance(spec.Type); errs != nil { + // For now we'll just return the first error in the set, since + // we don't have a good way to return the whole list here. + // Would be good to do something better at some point... + return cty.Type{}, NewArgError(i, errs[0]) + } + } + } + + // Intercept any panics from the function and return them as normal errors, + // so a calling language runtime doesn't need to deal with panics. + defer func() { + if r := recover(); r != nil { + ty = cty.NilType + err = errorForPanic(r) + } + }() + + return f.spec.Type(args) +} + +// Call actually calls the function with the given arguments, which must +// conform to the function's parameter specification or an error will be +// returned. +func (f Function) Call(args []cty.Value) (val cty.Value, err error) { + expectedType, err := f.ReturnTypeForValues(args) + if err != nil { + return cty.NilVal, err + } + + // Type checking already dealt with most situations relating to our + // parameter specification, but we still need to deal with unknown + // values. + posArgs := args[:len(f.spec.Params)] + varArgs := args[len(f.spec.Params):] + + for i, spec := range f.spec.Params { + val := posArgs[i] + + if !val.IsKnown() && !spec.AllowUnknown { + return cty.UnknownVal(expectedType), nil + } + } + + if f.spec.VarParam != nil { + spec := f.spec.VarParam + for _, val := range varArgs { + if !val.IsKnown() && !spec.AllowUnknown { + return cty.UnknownVal(expectedType), nil + } + } + } + + var retVal cty.Value + { + // Intercept any panics from the function and return them as normal errors, + // so a calling language runtime doesn't need to deal with panics. + defer func() { + if r := recover(); r != nil { + val = cty.NilVal + err = errorForPanic(r) + } + }() + + retVal, err = f.spec.Impl(args, expectedType) + if err != nil { + return cty.NilVal, err + } + } + + // Returned value must conform to what the Type function expected, to + // protect callers from having to deal with inconsistencies. + if errs := retVal.Type().TestConformance(expectedType); errs != nil { + panic(fmt.Errorf( + "returned value %#v does not conform to expected return type %#v: %s", + retVal, expectedType, errs[0], + )) + } + + return retVal, nil +} + +// ProxyFunc the type returned by the method Function.Proxy. +type ProxyFunc func(args ...cty.Value) (cty.Value, error) + +// Proxy returns a function that can be called with cty.Value arguments +// to run the function. This is provided as a convenience for when using +// a function directly within Go code. +func (f Function) Proxy() ProxyFunc { + return func(args ...cty.Value) (cty.Value, error) { + return f.Call(args) + } +} + +// Params returns information about the function's fixed positional parameters. +// This does not include information about any variadic arguments accepted; +// for that, call VarParam. +func (f Function) Params() []Parameter { + new := make([]Parameter, len(f.spec.Params)) + copy(new, f.spec.Params) + return new +} + +// VarParam returns information about the variadic arguments the function +// expects, or nil if the function is not variadic. +func (f Function) VarParam() *Parameter { + if f.spec.VarParam == nil { + return nil + } + + ret := *f.spec.VarParam + return &ret +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go new file mode 100644 index 0000000..a473d0e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go @@ -0,0 +1,73 @@ +package stdlib + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var NotFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "val", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Not(), nil + }, +}) + +var AndFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Bool, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].And(args[1]), nil + }, +}) + +var OrFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Bool, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Or(args[1]), nil + }, +}) + +// Not returns the logical complement of the given boolean value. +func Not(num cty.Value) (cty.Value, error) { + return NotFunc.Call([]cty.Value{num}) +} + +// And returns true if and only if both of the given boolean values are true. +func And(a, b cty.Value) (cty.Value, error) { + return AndFunc.Call([]cty.Value{a, b}) +} + +// Or returns true if either of the given boolean values are true. +func Or(a, b cty.Value) (cty.Value, error) { + return OrFunc.Call([]cty.Value{a, b}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go new file mode 100644 index 0000000..a132e0c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go @@ -0,0 +1,112 @@ +package stdlib + +import ( + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// Bytes is a capsule type that can be used with the binary functions to +// support applications that need to support raw buffers in addition to +// UTF-8 strings. +var Bytes = cty.Capsule("bytes", reflect.TypeOf([]byte(nil))) + +// BytesVal creates a new Bytes value from the given buffer, which must be +// non-nil or this function will panic. +// +// Once a byte slice has been wrapped in a Bytes capsule, its underlying array +// must be considered immutable. +func BytesVal(buf []byte) cty.Value { + if buf == nil { + panic("can't make Bytes value from nil slice") + } + + return cty.CapsuleVal(Bytes, &buf) +} + +// BytesLen is a Function that returns the length of the buffer encapsulated +// in a Bytes value. +var BytesLenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "buf", + Type: Bytes, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bufPtr := args[0].EncapsulatedValue().(*[]byte) + return cty.NumberIntVal(int64(len(*bufPtr))), nil + }, +}) + +// BytesSlice is a Function that returns a slice of the given Bytes value. +var BytesSliceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "buf", + Type: Bytes, + AllowDynamicType: true, + }, + { + Name: "offset", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "length", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(Bytes), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bufPtr := args[0].EncapsulatedValue().(*[]byte) + + var offset, length int + + var err error + err = gocty.FromCtyValue(args[1], &offset) + if err != nil { + return cty.NilVal, err + } + err = gocty.FromCtyValue(args[2], &length) + if err != nil { + return cty.NilVal, err + } + + if offset < 0 || length < 0 { + return cty.NilVal, fmt.Errorf("offset and length must be non-negative") + } + + if offset > len(*bufPtr) { + return cty.NilVal, fmt.Errorf( + "offset %d is greater than total buffer length %d", + offset, len(*bufPtr), + ) + } + + end := offset + length + + if end > len(*bufPtr) { + return cty.NilVal, fmt.Errorf( + "offset %d + length %d is greater than total buffer length %d", + offset, length, len(*bufPtr), + ) + } + + return BytesVal((*bufPtr)[offset:end]), nil + }, +}) + +func BytesLen(buf cty.Value) (cty.Value, error) { + return BytesLenFunc.Call([]cty.Value{buf}) +} + +func BytesSlice(buf cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) { + return BytesSliceFunc.Call([]cty.Value{buf, offset, length}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go new file mode 100644 index 0000000..967ba03 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go @@ -0,0 +1,140 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +var HasIndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + { + Name: "key", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy == cty.DynamicPseudoType) { + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + return cty.Bool, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].HasIndex(args[1]), nil + }, +}) + +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + }, + { + Name: "key", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + key := args[1] + keyTy := key.Type() + switch { + case collTy.IsTupleType(): + if keyTy != cty.Number && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for tuple must be number") + } + if !key.IsKnown() { + return cty.DynamicPseudoType, nil + } + var idx int + err := gocty.FromCtyValue(key, &idx) + if err != nil { + return cty.NilType, fmt.Errorf("invalid key for tuple: %s", err) + } + + etys := collTy.TupleElementTypes() + + if idx >= len(etys) || idx < 0 { + return cty.NilType, fmt.Errorf("key must be between 0 and %d inclusive", len(etys)) + } + + return etys[idx], nil + + case collTy.IsListType(): + if keyTy != cty.Number && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for list must be number") + } + + return collTy.ElementType(), nil + + case collTy.IsMapType(): + if keyTy != cty.String && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for map must be string") + } + + return collTy.ElementType(), nil + + default: + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + has, err := HasIndex(args[0], args[1]) + if err != nil { + return cty.NilVal, err + } + if has.False() { // safe because collection and key are guaranteed known here + return cty.NilVal, fmt.Errorf("invalid index") + } + + return args[0].Index(args[1]), nil + }, +}) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType) { + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + return cty.Number, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Length(), nil + }, +}) + +// HasIndex determines whether the given collection can be indexed with the +// given key. +func HasIndex(collection cty.Value, key cty.Value) (cty.Value, error) { + return HasIndexFunc.Call([]cty.Value{collection, key}) +} + +// Index returns an element from the given collection using the given key, +// or returns an error if there is no element for the given key. +func Index(collection cty.Value, key cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{collection, key}) +} + +// Length returns the number of elements in the given collection. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go new file mode 100644 index 0000000..5070a5a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go @@ -0,0 +1,93 @@ +package stdlib + +import ( + "encoding/csv" + "fmt" + "io" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var CSVDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + str := args[0] + if !str.IsKnown() { + return cty.DynamicPseudoType, nil + } + + r := strings.NewReader(str.AsString()) + cr := csv.NewReader(r) + headers, err := cr.Read() + if err == io.EOF { + return cty.DynamicPseudoType, fmt.Errorf("missing header line") + } + if err != nil { + return cty.DynamicPseudoType, err + } + + atys := make(map[string]cty.Type, len(headers)) + for _, name := range headers { + if _, exists := atys[name]; exists { + return cty.DynamicPseudoType, fmt.Errorf("duplicate column name %q", name) + } + atys[name] = cty.String + } + return cty.List(cty.Object(atys)), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ety := retType.ElementType() + atys := ety.AttributeTypes() + str := args[0] + r := strings.NewReader(str.AsString()) + cr := csv.NewReader(r) + cr.FieldsPerRecord = len(atys) + + // Read the header row first, since that'll tell us which indices + // map to which attribute names. + headers, err := cr.Read() + if err != nil { + return cty.DynamicVal, err + } + + var rows []cty.Value + for { + cols, err := cr.Read() + if err == io.EOF { + break + } + if err != nil { + return cty.DynamicVal, err + } + + vals := make(map[string]cty.Value, len(cols)) + for i, str := range cols { + name := headers[i] + vals[name] = cty.StringVal(str) + } + rows = append(rows, cty.ObjectVal(vals)) + } + + if len(rows) == 0 { + return cty.ListValEmpty(ety), nil + } + return cty.ListVal(rows), nil + }, +}) + +// CSVDecode parses the given CSV (RFC 4180) string and, if it is valid, +// returns a list of objects representing the rows. +// +// The result is always a list of some object type. The first row of the +// input is used to determine the object attributes, and subsequent rows +// determine the values of those attributes. +func CSVDecode(str cty.Value) (cty.Value, error) { + return CSVDecodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go new file mode 100644 index 0000000..cfb613e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go @@ -0,0 +1,13 @@ +// Package stdlib is a collection of cty functions that are expected to be +// generally useful, and are thus factored out into this shared library in +// the hope that cty-using applications will have consistent behavior when +// using these functions. +// +// See the parent package "function" for more information on the purpose +// and usage of cty functions. +// +// This package contains both Go functions, which provide convenient access +// to call the functions from Go code, and the Function objects themselves. +// The latter follow the naming scheme of appending "Func" to the end of +// the function name. +package stdlib diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go new file mode 100644 index 0000000..fb24f20 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go @@ -0,0 +1,496 @@ +package stdlib + +import ( + "bytes" + "fmt" + "math/big" + "strings" + + "github.com/apparentlymart/go-textseg/textseg" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/json" +) + +//go:generate ragel -Z format_fsm.rl +//go:generate gofmt -w format_fsm.go + +var FormatFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "args", + Type: cty.DynamicPseudoType, + AllowNull: true, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + for _, arg := range args[1:] { + if !arg.IsWhollyKnown() { + // We require all nested values to be known because the only + // thing we can do for a collection/structural type is print + // it as JSON and that requires it to be wholly known. + return cty.UnknownVal(cty.String), nil + } + } + str, err := formatFSM(args[0].AsString(), args[1:]) + return cty.StringVal(str), err + }, +}) + +var FormatListFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "args", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowUnknown: true, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + fmtVal := args[0] + args = args[1:] + + if len(args) == 0 { + // With no arguments, this function is equivalent to Format, but + // returning a single-element list result. + result, err := Format(fmtVal, args...) + return cty.ListVal([]cty.Value{result}), err + } + + fmtStr := fmtVal.AsString() + + // Each of our arguments will be dealt with either as an iterator + // or as a single value. Iterators are used for sequence-type values + // (lists, sets, tuples) while everything else is treated as a + // single value. The sequences we iterate over are required to be + // all the same length. + iterLen := -1 + lenChooser := -1 + iterators := make([]cty.ElementIterator, len(args)) + singleVals := make([]cty.Value, len(args)) + for i, arg := range args { + argTy := arg.Type() + switch { + case (argTy.IsListType() || argTy.IsSetType() || argTy.IsTupleType()) && !arg.IsNull(): + thisLen := arg.LengthInt() + if iterLen == -1 { + iterLen = thisLen + lenChooser = i + } else { + if thisLen != iterLen { + return cty.NullVal(cty.List(cty.String)), function.NewArgErrorf( + i+1, + "argument %d has length %d, which is inconsistent with argument %d of length %d", + i+1, thisLen, + lenChooser+1, iterLen, + ) + } + } + iterators[i] = arg.ElementIterator() + default: + singleVals[i] = arg + } + } + + if iterLen == 0 { + // If our sequences are all empty then our result must be empty. + return cty.ListValEmpty(cty.String), nil + } + + if iterLen == -1 { + // If we didn't encounter any iterables at all then we're going + // to just do one iteration with items from singleVals. + iterLen = 1 + } + + ret := make([]cty.Value, 0, iterLen) + fmtArgs := make([]cty.Value, len(iterators)) + Results: + for iterIdx := 0; iterIdx < iterLen; iterIdx++ { + + // Construct our arguments for a single format call + for i := range fmtArgs { + switch { + case iterators[i] != nil: + iterator := iterators[i] + iterator.Next() + _, val := iterator.Element() + fmtArgs[i] = val + default: + fmtArgs[i] = singleVals[i] + } + + // If any of the arguments to this call would be unknown then + // this particular result is unknown, but we'll keep going + // to see if any other iterations can produce known values. + if !fmtArgs[i].IsWhollyKnown() { + // We require all nested values to be known because the only + // thing we can do for a collection/structural type is print + // it as JSON and that requires it to be wholly known. + ret = append(ret, cty.UnknownVal(cty.String)) + continue Results + } + } + + str, err := formatFSM(fmtStr, fmtArgs) + if err != nil { + return cty.NullVal(cty.List(cty.String)), fmt.Errorf( + "error on format iteration %d: %s", iterIdx, err, + ) + } + + ret = append(ret, cty.StringVal(str)) + } + + return cty.ListVal(ret), nil + }, +}) + +// Format produces a string representation of zero or more values using a +// format string similar to the "printf" function in C. +// +// It supports the following "verbs": +// +// %% Literal percent sign, consuming no value +// %v A default formatting of the value based on type, as described below. +// %#v JSON serialization of the value +// %t Converts to boolean and then produces "true" or "false" +// %b Converts to number, requires integer, produces binary representation +// %d Converts to number, requires integer, produces decimal representation +// %o Converts to number, requires integer, produces octal representation +// %x Converts to number, requires integer, produces hexadecimal representation +// with lowercase letters +// %X Like %x but with uppercase letters +// %e Converts to number, produces scientific notation like -1.234456e+78 +// %E Like %e but with an uppercase "E" representing the exponent +// %f Converts to number, produces decimal representation with fractional +// part but no exponent, like 123.456 +// %g %e for large exponents or %f otherwise +// %G %E for large exponents or %f otherwise +// %s Converts to string and produces the string's characters +// %q Converts to string and produces JSON-quoted string representation, +// like %v. +// +// The default format selections made by %v are: +// +// string %s +// number %g +// bool %t +// other %#v +// +// Null values produce the literal keyword "null" for %v and %#v, and produce +// an error otherwise. +// +// Width is specified by an optional decimal number immediately preceding the +// verb letter. If absent, the width is whatever is necessary to represent the +// value. Precision is specified after the (optional) width by a period +// followed by a decimal number. If no period is present, a default precision +// is used. A period with no following number is invalid. +// For examples: +// +// %f default width, default precision +// %9f width 9, default precision +// %.2f default width, precision 2 +// %9.2f width 9, precision 2 +// +// Width and precision are measured in unicode characters (grapheme clusters). +// +// For most values, width is the minimum number of characters to output, +// padding the formatted form with spaces if necessary. +// +// For strings, precision limits the length of the input to be formatted (not +// the size of the output), truncating if necessary. +// +// For numbers, width sets the minimum width of the field and precision sets +// the number of places after the decimal, if appropriate, except that for +// %g/%G precision sets the total number of significant digits. +// +// The following additional symbols can be used immediately after the percent +// introducer as flags: +// +// (a space) leave a space where the sign would be if number is positive +// + Include a sign for a number even if it is positive (numeric only) +// - Pad with spaces on the left rather than the right +// 0 Pad with zeros rather than spaces. +// +// Flag characters are ignored for verbs that do not support them. +// +// By default, % sequences consume successive arguments starting with the first. +// Introducing a [n] sequence immediately before the verb letter, where n is a +// decimal integer, explicitly chooses a particular value argument by its +// one-based index. Subsequent calls without an explicit index will then +// proceed with n+1, n+2, etc. +// +// An error is produced if the format string calls for an impossible conversion +// or accesses more values than are given. An error is produced also for +// an unsupported format verb. +func Format(format cty.Value, vals ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, 0, len(vals)+1) + args = append(args, format) + args = append(args, vals...) + return FormatFunc.Call(args) +} + +// FormatList applies the same formatting behavior as Format, but accepts +// a mixture of list and non-list values as arguments. Any list arguments +// passed must have the same length, which dictates the length of the +// resulting list. +// +// Any non-list arguments are used repeatedly for each iteration over the +// list arguments. The list arguments are iterated in order by key, so +// corresponding items are formatted together. +func FormatList(format cty.Value, vals ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, 0, len(vals)+1) + args = append(args, format) + args = append(args, vals...) + return FormatListFunc.Call(args) +} + +type formatVerb struct { + Raw string + Offset int + + ArgNum int + Mode rune + + Zero bool + Sharp bool + Plus bool + Minus bool + Space bool + + HasPrec bool + Prec int + + HasWidth bool + Width int +} + +// formatAppend is called by formatFSM (generated by format_fsm.rl) for each +// formatting sequence that is encountered. +func formatAppend(verb *formatVerb, buf *bytes.Buffer, args []cty.Value) error { + argIdx := verb.ArgNum - 1 + if argIdx >= len(args) { + return fmt.Errorf( + "not enough arguments for %q at %d: need index %d but have %d total", + verb.Raw, verb.Offset, + verb.ArgNum, len(args), + ) + } + arg := args[argIdx] + + if verb.Mode != 'v' && arg.IsNull() { + return fmt.Errorf("unsupported value for %q at %d: null value cannot be formatted", verb.Raw, verb.Offset) + } + + // Normalize to make some things easier for downstream formatters + if !verb.HasWidth { + verb.Width = -1 + } + if !verb.HasPrec { + verb.Prec = -1 + } + + // For our first pass we'll ensure the verb is supported and then fan + // out to other functions based on what conversion is needed. + switch verb.Mode { + + case 'v': + return formatAppendAsIs(verb, buf, arg) + + case 't': + return formatAppendBool(verb, buf, arg) + + case 'b', 'd', 'o', 'x', 'X', 'e', 'E', 'f', 'g', 'G': + return formatAppendNumber(verb, buf, arg) + + case 's', 'q': + return formatAppendString(verb, buf, arg) + + default: + return fmt.Errorf("unsupported format verb %q in %q at offset %d", verb.Mode, verb.Raw, verb.Offset) + } +} + +func formatAppendAsIs(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + + if !verb.Sharp && !arg.IsNull() { + // Unless the caller overrode it with the sharp flag, we'll try some + // specialized formats before we fall back on JSON. + switch arg.Type() { + case cty.String: + fmted := arg.AsString() + fmted = formatPadWidth(verb, fmted) + buf.WriteString(fmted) + return nil + case cty.Number: + bf := arg.AsBigFloat() + fmted := bf.Text('g', -1) + fmted = formatPadWidth(verb, fmted) + buf.WriteString(fmted) + return nil + } + } + + jb, err := json.Marshal(arg, arg.Type()) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + fmted := formatPadWidth(verb, string(jb)) + buf.WriteString(fmted) + + return nil +} + +func formatAppendBool(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.Bool) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + if arg.True() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + return nil +} + +func formatAppendNumber(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.Number) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + switch verb.Mode { + case 'b', 'd', 'o', 'x', 'X': + return formatAppendInteger(verb, buf, arg) + default: + bf := arg.AsBigFloat() + + // For floats our format syntax is a subset of Go's, so it's + // safe for us to just lean on the existing Go implementation. + fmtstr := formatStripIndexSegment(verb.Raw) + fmted := fmt.Sprintf(fmtstr, bf) + buf.WriteString(fmted) + return nil + } +} + +func formatAppendInteger(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + bf := arg.AsBigFloat() + bi, acc := bf.Int(nil) + if acc != big.Exact { + return fmt.Errorf("unsupported value for %q at %d: an integer is required", verb.Raw, verb.Offset) + } + + // For integers our format syntax is a subset of Go's, so it's + // safe for us to just lean on the existing Go implementation. + fmtstr := formatStripIndexSegment(verb.Raw) + fmted := fmt.Sprintf(fmtstr, bi) + buf.WriteString(fmted) + return nil +} + +func formatAppendString(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.String) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + // We _cannot_ directly use the Go fmt.Sprintf implementation for strings + // because it measures widths and precisions in runes rather than grapheme + // clusters. + + str := arg.AsString() + if verb.Prec > 0 { + strB := []byte(str) + pos := 0 + wanted := verb.Prec + for i := 0; i < wanted; i++ { + next := strB[pos:] + if len(next) == 0 { + // ran out of characters before we hit our max width + break + } + d, _, _ := textseg.ScanGraphemeClusters(strB[pos:], true) + pos += d + } + str = str[:pos] + } + + switch verb.Mode { + case 's': + fmted := formatPadWidth(verb, str) + buf.WriteString(fmted) + case 'q': + jb, err := json.Marshal(cty.StringVal(str), cty.String) + if err != nil { + // Should never happen, since we know this is a known, non-null string + panic(fmt.Errorf("failed to marshal %#v as JSON: %s", arg, err)) + } + fmted := formatPadWidth(verb, string(jb)) + buf.WriteString(fmted) + default: + // Should never happen because formatAppend should've already validated + panic(fmt.Errorf("invalid string formatting mode %q", verb.Mode)) + } + return nil +} + +func formatPadWidth(verb *formatVerb, fmted string) string { + if verb.Width < 0 { + return fmted + } + + // Safe to ignore errors because ScanGraphemeClusters cannot produce errors + givenLen, _ := textseg.TokenCount([]byte(fmted), textseg.ScanGraphemeClusters) + wantLen := verb.Width + if givenLen >= wantLen { + return fmted + } + + padLen := wantLen - givenLen + padChar := " " + if verb.Zero { + padChar = "0" + } + pads := strings.Repeat(padChar, padLen) + + if verb.Minus { + return fmted + pads + } + return pads + fmted +} + +// formatStripIndexSegment strips out any [nnn] segment present in a verb +// string so that we can pass it through to Go's fmt.Sprintf with a single +// argument. This is used in cases where we're just leaning on Go's formatter +// because it's a superset of ours. +func formatStripIndexSegment(rawVerb string) string { + // We assume the string has already been validated here, since we should + // only be using this function with strings that were accepted by our + // scanner in formatFSM. + start := strings.Index(rawVerb, "[") + end := strings.Index(rawVerb, "]") + if start == -1 || end == -1 { + return rawVerb + } + + return rawVerb[:start] + rawVerb[end+1:] +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go new file mode 100644 index 0000000..86876ba --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go @@ -0,0 +1,358 @@ +// line 1 "format_fsm.rl" +// This file is generated from format_fsm.rl. DO NOT EDIT. + +// line 5 "format_fsm.rl" + +package stdlib + +import ( + "bytes" + "fmt" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" +) + +// line 20 "format_fsm.go" +var _formatfsm_actions []byte = []byte{ + 0, 1, 0, 1, 1, 1, 2, 1, 4, + 1, 5, 1, 6, 1, 7, 1, 8, + 1, 9, 1, 10, 1, 11, 1, 14, + 1, 16, 1, 17, 1, 18, 2, 3, + 4, 2, 12, 10, 2, 12, 16, 2, + 12, 18, 2, 13, 14, 2, 15, 10, + 2, 15, 18, +} + +var _formatfsm_key_offsets []byte = []byte{ + 0, 0, 14, 27, 34, 36, 39, 43, + 51, +} + +var _formatfsm_trans_keys []byte = []byte{ + 32, 35, 37, 43, 45, 46, 48, 91, + 49, 57, 65, 90, 97, 122, 32, 35, + 43, 45, 46, 48, 91, 49, 57, 65, + 90, 97, 122, 91, 48, 57, 65, 90, + 97, 122, 49, 57, 93, 48, 57, 65, + 90, 97, 122, 46, 91, 48, 57, 65, + 90, 97, 122, 37, +} + +var _formatfsm_single_lengths []byte = []byte{ + 0, 8, 7, 1, 0, 1, 0, 2, + 1, +} + +var _formatfsm_range_lengths []byte = []byte{ + 0, 3, 3, 3, 1, 1, 2, 3, + 0, +} + +var _formatfsm_index_offsets []byte = []byte{ + 0, 0, 12, 23, 28, 30, 33, 36, + 42, +} + +var _formatfsm_indicies []byte = []byte{ + 1, 2, 3, 4, 5, 6, 7, 10, + 8, 9, 9, 0, 1, 2, 4, 5, + 6, 7, 10, 8, 9, 9, 0, 13, + 11, 12, 12, 0, 14, 0, 15, 14, + 0, 9, 9, 0, 16, 19, 17, 18, + 18, 0, 20, 3, +} + +var _formatfsm_trans_targs []byte = []byte{ + 0, 2, 2, 8, 2, 2, 3, 2, + 7, 8, 4, 3, 8, 4, 5, 6, + 3, 7, 8, 4, 1, +} + +var _formatfsm_trans_actions []byte = []byte{ + 7, 17, 9, 3, 15, 13, 25, 11, + 43, 29, 19, 27, 49, 46, 21, 0, + 37, 23, 40, 34, 1, +} + +var _formatfsm_eof_actions []byte = []byte{ + 0, 31, 31, 31, 31, 31, 31, 31, + 5, +} + +const formatfsm_start int = 8 +const formatfsm_first_final int = 8 +const formatfsm_error int = 0 + +const formatfsm_en_main int = 8 + +// line 19 "format_fsm.rl" + +func formatFSM(format string, a []cty.Value) (string, error) { + var buf bytes.Buffer + data := format + nextArg := 1 // arg numbers are 1-based + var verb formatVerb + + // line 153 "format_fsm.rl" + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + cs := 0 // current state (will be initialized by ragel-generated code) + ts := 0 + te := 0 + eof := pe + + // Keep Go compiler happy even if generated code doesn't use these + _ = ts + _ = te + _ = eof + + // line 121 "format_fsm.go" + { + cs = formatfsm_start + } + + // line 126 "format_fsm.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _keys = int(_formatfsm_key_offsets[cs]) + _trans = int(_formatfsm_index_offsets[cs]) + + _klen = int(_formatfsm_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _formatfsm_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _formatfsm_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_formatfsm_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _formatfsm_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _formatfsm_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_formatfsm_indicies[_trans]) + cs = int(_formatfsm_trans_targs[_trans]) + + if _formatfsm_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_formatfsm_trans_actions[_trans]) + _nacts = uint(_formatfsm_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _formatfsm_actions[_acts-1] { + case 0: + // line 29 "format_fsm.rl" + + verb = formatVerb{ + ArgNum: nextArg, + Prec: -1, + Width: -1, + } + ts = p + + case 1: + // line 38 "format_fsm.rl" + + buf.WriteByte(data[p]) + + case 4: + // line 49 "format_fsm.rl" + + // We'll try to slurp a whole UTF-8 sequence here, to give the user + // better feedback. + r, _ := utf8.DecodeRuneInString(data[p:]) + return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) + + case 5: + // line 56 "format_fsm.rl" + + verb.Sharp = true + + case 6: + // line 59 "format_fsm.rl" + + verb.Zero = true + + case 7: + // line 62 "format_fsm.rl" + + verb.Minus = true + + case 8: + // line 65 "format_fsm.rl" + + verb.Plus = true + + case 9: + // line 68 "format_fsm.rl" + + verb.Space = true + + case 10: + // line 72 "format_fsm.rl" + + verb.ArgNum = 0 + + case 11: + // line 75 "format_fsm.rl" + + verb.ArgNum = (10 * verb.ArgNum) + (int(data[p]) - '0') + + case 12: + // line 79 "format_fsm.rl" + + verb.HasWidth = true + + case 13: + // line 82 "format_fsm.rl" + + verb.Width = 0 + + case 14: + // line 85 "format_fsm.rl" + + verb.Width = (10 * verb.Width) + (int(data[p]) - '0') + + case 15: + // line 89 "format_fsm.rl" + + verb.HasPrec = true + + case 16: + // line 92 "format_fsm.rl" + + verb.Prec = 0 + + case 17: + // line 95 "format_fsm.rl" + + verb.Prec = (10 * verb.Prec) + (int(data[p]) - '0') + + case 18: + // line 99 "format_fsm.rl" + + verb.Mode = rune(data[p]) + te = p + 1 + verb.Raw = data[ts:te] + verb.Offset = ts + + err := formatAppend(&verb, &buf, a) + if err != nil { + return buf.String(), err + } + nextArg = verb.ArgNum + 1 + + // line 324 "format_fsm.go" + } + } + + _again: + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + __acts := _formatfsm_eof_actions[cs] + __nacts := uint(_formatfsm_actions[__acts]) + __acts++ + for ; __nacts > 0; __nacts-- { + __acts++ + switch _formatfsm_actions[__acts-1] { + case 2: + // line 42 "format_fsm.rl" + + case 3: + // line 45 "format_fsm.rl" + + return buf.String(), fmt.Errorf("invalid format string starting at offset %d", p) + + case 4: + // line 49 "format_fsm.rl" + + // We'll try to slurp a whole UTF-8 sequence here, to give the user + // better feedback. + r, _ := utf8.DecodeRuneInString(data[p:]) + return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) + + // line 363 "format_fsm.go" + } + } + } + + _out: + { + } + } + + // line 171 "format_fsm.rl" + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // flag it anyway rather than just losing data from the end. + if cs < formatfsm_first_final { + return buf.String(), fmt.Errorf("extraneous characters beginning at offset %i", p) + } + + return buf.String(), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl new file mode 100644 index 0000000..85d43bb --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl @@ -0,0 +1,182 @@ +// This file is generated from format_fsm.rl. DO NOT EDIT. +%%{ + # (except you are actually in scan_tokens.rl here, so edit away!) + machine formatfsm; +}%% + +package stdlib + +import ( + "bytes" + "fmt" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" +) + +%%{ + write data; +}%% + +func formatFSM(format string, a []cty.Value) (string, error) { + var buf bytes.Buffer + data := format + nextArg := 1 // arg numbers are 1-based + var verb formatVerb + + %%{ + + action begin { + verb = formatVerb{ + ArgNum: nextArg, + Prec: -1, + Width: -1, + } + ts = p + } + + action emit { + buf.WriteByte(fc); + } + + action finish_ok { + } + + action finish_err { + return buf.String(), fmt.Errorf("invalid format string starting at offset %d", p) + } + + action err_char { + // We'll try to slurp a whole UTF-8 sequence here, to give the user + // better feedback. + r, _ := utf8.DecodeRuneInString(data[p:]) + return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) + } + + action flag_sharp { + verb.Sharp = true + } + action flag_zero { + verb.Zero = true + } + action flag_minus { + verb.Minus = true + } + action flag_plus { + verb.Plus = true + } + action flag_space { + verb.Space = true + } + + action argidx_reset { + verb.ArgNum = 0 + } + action argidx_num { + verb.ArgNum = (10 * verb.ArgNum) + (int(fc) - '0') + } + + action has_width { + verb.HasWidth = true + } + action width_reset { + verb.Width = 0 + } + action width_num { + verb.Width = (10 * verb.Width) + (int(fc) - '0') + } + + action has_prec { + verb.HasPrec = true + } + action prec_reset { + verb.Prec = 0 + } + action prec_num { + verb.Prec = (10 * verb.Prec) + (int(fc) - '0') + } + + action mode { + verb.Mode = rune(fc) + te = p+1 + verb.Raw = data[ts:te] + verb.Offset = ts + + err := formatAppend(&verb, &buf, a) + if err != nil { + return buf.String(), err + } + nextArg = verb.ArgNum + 1 + } + + # a number that isn't zero and doesn't have a leading zero + num = [1-9] [0-9]*; + + flags = ( + '0' @flag_zero | + '#' @flag_sharp | + '-' @flag_minus | + '+' @flag_plus | + ' ' @flag_space + )*; + + argidx = (( + '[' (num $argidx_num) ']' + ) >argidx_reset)?; + + width = ( + ( num $width_num ) >width_reset %has_width + )?; + + precision = ( + ('.' ( digit* $prec_num )) >prec_reset %has_prec + )?; + + # We accept any letter here, but will be more picky in formatAppend + mode = ('a'..'z' | 'A'..'Z') @mode; + + fmt_verb = ( + '%' @begin + flags + width + precision + argidx + mode + ); + + main := ( + [^%] @emit | + '%%' @emit | + fmt_verb + )* @/finish_err %/finish_ok $!err_char; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + cs := 0 // current state (will be initialized by ragel-generated code) + ts := 0 + te := 0 + eof := pe + + // Keep Go compiler happy even if generated code doesn't use these + _ = ts + _ = te + _ = eof + + %%{ + write init; + write exec; + }%% + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // flag it anyway rather than just losing data from the end. + if cs < formatfsm_first_final { + return buf.String(), fmt.Errorf("extraneous characters beginning at offset %i", p) + } + + return buf.String(), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go new file mode 100644 index 0000000..6b31f26 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go @@ -0,0 +1,107 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +var EqualFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + { + Name: "b", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Equals(args[1]), nil + }, +}) + +var NotEqualFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + { + Name: "b", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Equals(args[1]).Not(), nil + }, +}) + +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, fmt.Errorf("all arguments must have the same type") + } + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + + return convert.Convert(argVal, retType) + } + return cty.NilVal, fmt.Errorf("no non-null arguments") + }, +}) + +// Equal determines whether the two given values are equal, returning a +// bool value. +func Equal(a cty.Value, b cty.Value) (cty.Value, error) { + return EqualFunc.Call([]cty.Value{a, b}) +} + +// NotEqual is the opposite of Equal. +func NotEqual(a cty.Value, b cty.Value) (cty.Value, error) { + return NotEqualFunc.Call([]cty.Value{a, b}) +} + +// Coalesce returns the first of the given arguments that is not null. If +// all arguments are null, an error is produced. +func Coalesce(vals ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(vals) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go new file mode 100644 index 0000000..07901c6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go @@ -0,0 +1,72 @@ +package stdlib + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/json" +) + +var JSONEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "val", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + val := args[0] + if !val.IsWhollyKnown() { + // We can't serialize unknowns, so if the value is unknown or + // contains any _nested_ unknowns then our result must be + // unknown. + return cty.UnknownVal(retType), nil + } + + buf, err := json.Marshal(val, val.Type()) + if err != nil { + return cty.NilVal, err + } + + return cty.StringVal(string(buf)), nil + }, +}) + +var JSONDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + str := args[0] + if !str.IsKnown() { + return cty.DynamicPseudoType, nil + } + + buf := []byte(str.AsString()) + return json.ImpliedType(buf) + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + buf := []byte(args[0].AsString()) + return json.Unmarshal(buf, retType) + }, +}) + +// JSONEncode returns a JSON serialization of the given value. +func JSONEncode(val cty.Value) (cty.Value, error) { + return JSONEncodeFunc.Call([]cty.Value{val}) +} + +// JSONDecode parses the given JSON string and, if it is valid, returns the +// value it represents. +// +// Note that applying JSONDecode to the result of JSONEncode may not produce +// an identically-typed result, since JSON encoding is lossy for cty Types. +// The resulting value will consist only of primitive types, object types, and +// tuple types. +func JSONDecode(str cty.Value) (cty.Value, error) { + return JSONDecodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go new file mode 100644 index 0000000..bd9b2e5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go @@ -0,0 +1,428 @@ +package stdlib + +import ( + "fmt" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var AbsoluteFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Absolute(), nil + }, +}) + +var AddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Add can panic if the input values are opposing infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't compute sum of opposing infinities") + } else { + // not a panic we recognize + panic(r) + } + } + }() + return args[0].Add(args[1]), nil + }, +}) + +var SubtractFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Sub can panic if the input values are infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't subtract infinity from itself") + } else { + // not a panic we recognize + panic(r) + } + } + }() + return args[0].Subtract(args[1]), nil + }, +}) + +var MultiplyFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Mul can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't multiply zero by infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Multiply(args[1]), nil + }, +}) + +var DivideFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Quo can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't divide zero by zero or infinity by infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Divide(args[1]), nil + }, +}) + +var ModuloFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Mul can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't use modulo with zero and infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Modulo(args[1]), nil + }, +}) + +var GreaterThanFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].GreaterThan(args[1]), nil + }, +}) + +var GreaterThanOrEqualToFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].GreaterThanOrEqualTo(args[1]), nil + }, +}) + +var LessThanFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].LessThan(args[1]), nil + }, +}) + +var LessThanOrEqualToFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].LessThanOrEqualTo(args[1]), nil + }, +}) + +var NegateFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Negate(), nil + }, +}) + +var MinFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "numbers", + Type: cty.Number, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if len(args) == 0 { + return cty.NilVal, fmt.Errorf("must pass at least one number") + } + + min := cty.PositiveInfinity + for _, num := range args { + if num.LessThan(min).True() { + min = num + } + } + + return min, nil + }, +}) + +var MaxFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "numbers", + Type: cty.Number, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if len(args) == 0 { + return cty.NilVal, fmt.Errorf("must pass at least one number") + } + + max := cty.NegativeInfinity + for _, num := range args { + if num.GreaterThan(max).True() { + max = num + } + } + + return max, nil + }, +}) + +var IntFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bf := args[0].AsBigFloat() + if bf.IsInt() { + return args[0], nil + } + bi, _ := bf.Int(nil) + bf = (&big.Float{}).SetInt(bi) + return cty.NumberVal(bf), nil + }, +}) + +// Absolute returns the magnitude of the given number, without its sign. +// That is, it turns negative values into positive values. +func Absolute(num cty.Value) (cty.Value, error) { + return AbsoluteFunc.Call([]cty.Value{num}) +} + +// Add returns the sum of the two given numbers. +func Add(a cty.Value, b cty.Value) (cty.Value, error) { + return AddFunc.Call([]cty.Value{a, b}) +} + +// Subtract returns the difference between the two given numbers. +func Subtract(a cty.Value, b cty.Value) (cty.Value, error) { + return SubtractFunc.Call([]cty.Value{a, b}) +} + +// Multiply returns the product of the two given numbers. +func Multiply(a cty.Value, b cty.Value) (cty.Value, error) { + return MultiplyFunc.Call([]cty.Value{a, b}) +} + +// Divide returns a divided by b, where both a and b are numbers. +func Divide(a cty.Value, b cty.Value) (cty.Value, error) { + return DivideFunc.Call([]cty.Value{a, b}) +} + +// Negate returns the given number multipled by -1. +func Negate(num cty.Value) (cty.Value, error) { + return NegateFunc.Call([]cty.Value{num}) +} + +// LessThan returns true if a is less than b. +func LessThan(a cty.Value, b cty.Value) (cty.Value, error) { + return LessThanFunc.Call([]cty.Value{a, b}) +} + +// LessThanOrEqualTo returns true if a is less than b. +func LessThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) { + return LessThanOrEqualToFunc.Call([]cty.Value{a, b}) +} + +// GreaterThan returns true if a is less than b. +func GreaterThan(a cty.Value, b cty.Value) (cty.Value, error) { + return GreaterThanFunc.Call([]cty.Value{a, b}) +} + +// GreaterThanOrEqualTo returns true if a is less than b. +func GreaterThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) { + return GreaterThanOrEqualToFunc.Call([]cty.Value{a, b}) +} + +// Modulo returns the remainder of a divided by b under integer division, +// where both a and b are numbers. +func Modulo(a cty.Value, b cty.Value) (cty.Value, error) { + return ModuloFunc.Call([]cty.Value{a, b}) +} + +// Min returns the minimum number from the given numbers. +func Min(numbers ...cty.Value) (cty.Value, error) { + return MinFunc.Call(numbers) +} + +// Max returns the maximum number from the given numbers. +func Max(numbers ...cty.Value) (cty.Value, error) { + return MaxFunc.Call(numbers) +} + +// Int removes the fractional component of the given number returning an +// integer representing the whole number component, rounding towards zero. +// For example, -1.5 becomes -1. +// +// If an infinity is passed to Int, an error is returned. +func Int(num cty.Value) (cty.Value, error) { + if num == cty.PositiveInfinity || num == cty.NegativeInfinity { + return cty.NilVal, fmt.Errorf("can't truncate infinity to an integer") + } + return IntFunc.Call([]cty.Value{num}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go new file mode 100644 index 0000000..e2c77c5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go @@ -0,0 +1,130 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +var ConcatFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "seqs", + Type: cty.DynamicPseudoType, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, fmt.Errorf("at least one argument is required") + } + + if args[0].Type().IsListType() { + // Possibly we're going to return a list, if all of our other + // args are also lists and we can find a common element type. + tys := make([]cty.Type, len(args)) + for i, val := range args { + ty := val.Type() + if !ty.IsListType() { + tys = nil + break + } + tys[i] = ty + } + + if tys != nil { + commonType, _ := convert.UnifyUnsafe(tys) + if commonType != cty.NilType { + return commonType, nil + } + } + } + + etys := make([]cty.Type, 0, len(args)) + for i, val := range args { + ety := val.Type() + switch { + case ety.IsTupleType(): + etys = append(etys, ety.TupleElementTypes()...) + case ety.IsListType(): + if !val.IsKnown() { + // We need to know the list to count its elements to + // build our tuple type, so any concat of an unknown + // list can't be typed yet. + return cty.DynamicPseudoType, nil + } + + l := val.LengthInt() + subEty := ety.ElementType() + for j := 0; j < l; j++ { + etys = append(etys, subEty) + } + default: + return cty.NilType, function.NewArgErrorf( + i, "all arguments must be lists or tuples; got %s", + ety.FriendlyName(), + ) + } + } + return cty.Tuple(etys), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + switch { + case retType.IsListType(): + // If retType is a list type then we know that all of the + // given values will be lists and that they will either be of + // retType or of something we can convert to retType. + vals := make([]cty.Value, 0, len(args)) + for i, list := range args { + list, err = convert.Convert(list, retType) + if err != nil { + // Conversion might fail because we used UnifyUnsafe + // to choose our return type. + return cty.NilVal, function.NewArgError(i, err) + } + + it := list.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, v) + } + } + if len(vals) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + return cty.ListVal(vals), nil + case retType.IsTupleType(): + // If retType is a tuple type then we could have a mixture of + // lists and tuples but we know they all have known values + // (because our params don't AllowUnknown) and we know that + // concatenating them all together will produce a tuple of + // retType because of the work we did in the Type function above. + vals := make([]cty.Value, 0, len(args)) + + for _, seq := range args { + // Both lists and tuples support ElementIterator, so this is easy. + it := seq.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, v) + } + } + + return cty.TupleVal(vals), nil + default: + // should never happen if Type is working correctly above + panic("unsupported return type") + } + }, +}) + +// Concat takes one or more sequences (lists or tuples) and returns the single +// sequence that results from concatenating them together in order. +// +// If all of the given sequences are lists of the same element type then the +// result is a list of that type. Otherwise, the result is a of a tuple type +// constructed from the given sequence types. +func Concat(seqs ...cty.Value) (cty.Value, error) { + return ConcatFunc.Call(seqs) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go new file mode 100644 index 0000000..100078f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go @@ -0,0 +1,195 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/convert" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var SetHasElementFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + { + Name: "elem", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].HasElement(args[1]), nil + }, +}) + +var SetUnionFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Union(s2) + }), +}) + +var SetIntersectionFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Intersection(s2) + }), +}) + +var SetSubtractFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Subtract(s2) + }), +}) + +var SetSymmetricDifferenceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Subtract(s2) + }), +}) + +// SetHasElement determines whether the given set contains the given value as an +// element. +func SetHasElement(set cty.Value, elem cty.Value) (cty.Value, error) { + return SetHasElementFunc.Call([]cty.Value{set, elem}) +} + +// SetUnion returns a new set containing all of the elements from the given +// sets, which must have element types that can all be converted to some +// common type using the standard type unification rules. If conversion +// is not possible, an error is returned. +// +// The union operation is performed after type conversion, which may result +// in some previously-distinct values being conflated. +// +// At least one set must be provided. +func SetUnion(sets ...cty.Value) (cty.Value, error) { + return SetUnionFunc.Call(sets) +} + +// Intersection returns a new set containing the elements that exist +// in all of the given sets, which must have element types that can all be +// converted to some common type using the standard type unification rules. +// If conversion is not possible, an error is returned. +// +// The intersection operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +// +// At least one set must be provided. +func SetIntersection(sets ...cty.Value) (cty.Value, error) { + return SetIntersectionFunc.Call(sets) +} + +// SetSubtract returns a new set containing the elements from the +// first set that are not present in the second set. The sets must have +// element types that can both be converted to some common type using the +// standard type unification rules. If conversion is not possible, an error +// is returned. +// +// The subtract operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +func SetSubtract(a, b cty.Value) (cty.Value, error) { + return SetSubtractFunc.Call([]cty.Value{a, b}) +} + +// SetSymmetricDifference returns a new set containing elements that appear +// in any of the given sets but not multiple. The sets must have +// element types that can all be converted to some common type using the +// standard type unification rules. If conversion is not possible, an error +// is returned. +// +// The difference operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +func SetSymmetricDifference(sets ...cty.Value) (cty.Value, error) { + return SetSymmetricDifferenceFunc.Call(sets) +} + +func setOperationReturnType(args []cty.Value) (ret cty.Type, err error) { + var etys []cty.Type + for _, arg := range args { + etys = append(etys, arg.Type().ElementType()) + } + newEty, _ := convert.UnifyUnsafe(etys) + if newEty == cty.NilType { + return cty.NilType, fmt.Errorf("given sets must all have compatible element types") + } + return cty.Set(newEty), nil +} + +func setOperationImpl(f func(s1, s2 cty.ValueSet) cty.ValueSet) function.ImplFunc { + return func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + first := args[0] + first, err = convert.Convert(first, retType) + if err != nil { + return cty.NilVal, function.NewArgError(0, err) + } + + set := first.AsValueSet() + for i, arg := range args[1:] { + arg, err := convert.Convert(arg, retType) + if err != nil { + return cty.NilVal, function.NewArgError(i+1, err) + } + + argSet := arg.AsValueSet() + set = f(set, argSet) + } + return cty.SetValFromValueSet(set), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go new file mode 100644 index 0000000..d7c89fa --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go @@ -0,0 +1,234 @@ +package stdlib + +import ( + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "github.com/apparentlymart/go-textseg/textseg" +) + +var UpperFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + out := strings.ToUpper(in) + return cty.StringVal(out), nil + }, +}) + +var LowerFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + out := strings.ToLower(in) + return cty.StringVal(out), nil + }, +}) + +var ReverseFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := []byte(args[0].AsString()) + out := make([]byte, len(in)) + pos := len(out) + + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + cluster := in[i : i+d] + pos -= len(cluster) + copy(out[pos:], cluster) + i += d + } + + return cty.StringVal(string(out)), nil + }, +}) + +var StrlenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + l := 0 + + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + l++ + i += d + } + + return cty.NumberIntVal(int64(l)), nil + }, +}) + +var SubstrFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + { + Name: "offset", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "length", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := []byte(args[0].AsString()) + var offset, length int + + var err error + err = gocty.FromCtyValue(args[1], &offset) + if err != nil { + return cty.NilVal, err + } + err = gocty.FromCtyValue(args[2], &length) + if err != nil { + return cty.NilVal, err + } + + if offset < 0 { + totalLenNum, err := Strlen(args[0]) + if err != nil { + // should never happen + panic("Stdlen returned an error") + } + + var totalLen int + err = gocty.FromCtyValue(totalLenNum, &totalLen) + if err != nil { + // should never happen + panic("Stdlen returned a non-int number") + } + + offset += totalLen + } + + sub := in + pos := 0 + var i int + + // First we'll seek forward to our offset + if offset > 0 { + for i = 0; i < len(sub); { + d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true) + i += d + pos++ + if pos == offset { + break + } + if i >= len(in) { + return cty.StringVal(""), nil + } + } + + sub = sub[i:] + } + + if length < 0 { + // Taking the remainder of the string is a fast path since + // we can just return the rest of the buffer verbatim. + return cty.StringVal(string(sub)), nil + } + + // Otherwise we need to start seeking forward again until we + // reach the length we want. + pos = 0 + for i = 0; i < len(sub); { + d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true) + i += d + pos++ + if pos == length { + break + } + } + + sub = sub[:i] + + return cty.StringVal(string(sub)), nil + }, +}) + +// Upper is a Function that converts a given string to uppercase. +func Upper(str cty.Value) (cty.Value, error) { + return UpperFunc.Call([]cty.Value{str}) +} + +// Lower is a Function that converts a given string to lowercase. +func Lower(str cty.Value) (cty.Value, error) { + return LowerFunc.Call([]cty.Value{str}) +} + +// Reverse is a Function that reverses the order of the characters in the +// given string. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +func Reverse(str cty.Value) (cty.Value, error) { + return ReverseFunc.Call([]cty.Value{str}) +} + +// Strlen is a Function that returns the length of the given string in +// characters. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +func Strlen(str cty.Value) (cty.Value, error) { + return StrlenFunc.Call([]cty.Value{str}) +} + +// Substr is a Function that extracts a sequence of characters from another +// string and creates a new string. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +// +// The "offset" index may be negative, in which case it is relative to the +// end of the given string. +// +// The "length" may be -1, in which case the remainder of the string after +// the given offset will be returned. +func Substr(str cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) { + return SubstrFunc.Call([]cty.Value{str, offset, length}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go b/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go new file mode 100644 index 0000000..3495550 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go @@ -0,0 +1,31 @@ +package function + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Unpredictable wraps a given function such that it retains the same arguments +// and type checking behavior but will return an unknown value when called. +// +// It is recommended that most functions be "pure", which is to say that they +// will always produce the same value given particular input. However, +// sometimes it is necessary to offer functions whose behavior depends on +// some external state, such as reading a file or determining the current time. +// In such cases, an unpredictable wrapper might be used to stand in for +// the function during some sort of prior "checking" phase in order to delay +// the actual effect until later. +// +// While Unpredictable can support a function that isn't pure in its +// implementation, it still expects a function to be pure in its type checking +// behavior, except for the special case of returning cty.DynamicPseudoType +// if it is not yet able to predict its return value based on current argument +// information. +func Unpredictable(f Function) Function { + newSpec := *f.spec // shallow copy + newSpec.Impl = unpredictableImpl + return New(&newSpec) +} + +func unpredictableImpl(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.UnknownVal(retType), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gob.go b/vendor/github.com/zclconf/go-cty/cty/gob.go new file mode 100644 index 0000000..3d73199 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gob.go @@ -0,0 +1,125 @@ +package cty + +import ( + "bytes" + "encoding/gob" + "fmt" + "math/big" +) + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Values to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent values of capsule types in gob, +// because the types themselves cannot be represented. +func (val Value) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gv := gobValue{ + Version: 0, + Ty: val.ty, + V: val.v, + } + + err := enc.Encode(gv) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Value: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementation of the gob.GobDecoder interface, which +// inverts the operation performed by GobEncode. See the documentation of +// GobEncode for considerations when using cty.Value instances with gob. +func (val *Value) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gv gobValue + err := dec.Decode(&gv) + if err != nil { + return fmt.Errorf("error decoding cty.Value: %s", err) + } + if gv.Version != 0 { + return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version) + } + + // big.Float seems to, for some reason, lose its "pointerness" when we + // round-trip it, so we'll fix that here. + if bf, ok := gv.V.(big.Float); ok { + gv.V = &bf + } + + val.ty = gv.Ty + val.v = gv.V + + return nil +} + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Types to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent capsule types in gob. +func (t Type) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gt := gobType{ + Version: 0, + Impl: t.typeImpl, + } + + err := enc.Encode(gt) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Type: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementatino of the gob.GobDecoder interface, which +// reverses the encoding performed by GobEncode to allow types to be recovered +// from gob buffers. +func (t *Type) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gt gobType + err := dec.Decode(>) + if err != nil { + return fmt.Errorf("error decoding cty.Type: %s", err) + } + if gt.Version != 0 { + return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version) + } + + t.typeImpl = gt.Impl + + return nil +} + +// Capsule types cannot currently be gob-encoded, because they rely on pointer +// equality and we have no way to recover the original pointer on decode. +func (t *capsuleType) GobEncode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName()) +} + +func (t *capsuleType) GobDecode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName()) +} + +type gobValue struct { + Version int + Ty Type + V interface{} +} + +type gobType struct { + Version int + Impl typeImpl +} + +type gobCapsuleTypeImpl struct { +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go b/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go new file mode 100644 index 0000000..a5177d2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go @@ -0,0 +1,7 @@ +// Package gocty deals with converting between cty Values and native go +// values. +// +// It operates under a similar principle to the encoding/json and +// encoding/xml packages in the standard library, using reflection to +// populate native Go data structures from cty values and vice-versa. +package gocty diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go new file mode 100644 index 0000000..94ffd2f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go @@ -0,0 +1,43 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/set" +) + +var valueType = reflect.TypeOf(cty.Value{}) +var typeType = reflect.TypeOf(cty.Type{}) + +var setType = reflect.TypeOf(set.Set{}) + +var bigFloatType = reflect.TypeOf(big.Float{}) +var bigIntType = reflect.TypeOf(big.Int{}) + +var emptyInterfaceType = reflect.TypeOf(interface{}(nil)) + +var stringType = reflect.TypeOf("") + +// structTagIndices interrogates the fields of the given type (which must +// be a struct type, or we'll panic) and returns a map from the cty +// attribute names declared via struct tags to the indices of the +// fields holding those tags. +// +// This function will panic if two fields within the struct are tagged with +// the same cty attribute name. +func structTagIndices(st reflect.Type) map[string]int { + ct := st.NumField() + ret := make(map[string]int, ct) + + for i := 0; i < ct; i++ { + field := st.Field(i) + attrName := field.Tag.Get("cty") + if attrName != "" { + ret[attrName] = i + } + } + + return ret +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/in.go b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go new file mode 100644 index 0000000..642501b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go @@ -0,0 +1,528 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/set" +) + +// ToCtyValue produces a cty.Value from a Go value. The result will conform +// to the given type, or an error will be returned if this is not possible. +// +// The target type serves as a hint to resolve ambiguities in the mapping. +// For example, the Go type set.Set tells us that the value is a set but +// does not describe the set's element type. This also allows for convenient +// conversions, such as populating a set from a slice rather than having to +// first explicitly instantiate a set.Set. +// +// The audience of this function is assumed to be the developers of Go code +// that is integrating with cty, and thus the error messages it returns are +// presented from Go's perspective. These messages are thus not appropriate +// for display to end-users. An error returned from ToCtyValue represents a +// bug in the calling program, not user error. +func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) { + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time toCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given Type is. + path := make(cty.Path, 0) + return toCtyValue(reflect.ValueOf(val), ty, path) +} + +func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) { + + switch ty { + case cty.Bool: + return toCtyBool(val, path) + case cty.Number: + return toCtyNumber(val, path) + case cty.String: + return toCtyString(val, path) + case cty.DynamicPseudoType: + return toCtyDynamic(val, path) + } + + switch { + case ty.IsListType(): + return toCtyList(val, ty.ElementType(), path) + case ty.IsMapType(): + return toCtyMap(val, ty.ElementType(), path) + case ty.IsSetType(): + return toCtySet(val, ty.ElementType(), path) + case ty.IsObjectType(): + return toCtyObject(val, ty.AttributeTypes(), path) + case ty.IsTupleType(): + return toCtyTuple(val, ty.TupleElementTypes(), path) + case ty.IsCapsuleType(): + return toCtyCapsule(val, ty, path) + } + + // We should never fall out here + return cty.NilVal, path.NewErrorf("unsupported target type %#v", ty) +} + +func toCtyBool(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Bool), nil + } + + switch val.Kind() { + + case reflect.Bool: + return cty.BoolVal(val.Bool()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to bool", val.Kind()) + + } + +} + +func toCtyNumber(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Number), nil + } + + switch val.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.NumberIntVal(val.Int()), nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.NumberUIntVal(val.Uint()), nil + + case reflect.Float32, reflect.Float64: + return cty.NumberFloatVal(val.Float()), nil + + case reflect.Struct: + if val.Type().AssignableTo(bigIntType) { + bigInt := val.Interface().(big.Int) + bigFloat := (&big.Float{}).SetInt(&bigInt) + val = reflect.ValueOf(*bigFloat) + } + + if val.Type().AssignableTo(bigFloatType) { + bigFloat := val.Interface().(big.Float) + return cty.NumberVal(&bigFloat), nil + } + + fallthrough + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to number", val.Kind()) + + } + +} + +func toCtyString(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.String), nil + } + + switch val.Kind() { + + case reflect.String: + return cty.StringVal(val.String()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to string", val.Kind()) + + } + +} + +func toCtyList(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.List(ety)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.List(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.ListValEmpty(ety), nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, val.Len()) + for i := range vals { + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ListVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.List(ety)) + + } +} + +func toCtyMap(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Map(ety)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Map(ety)), nil + } + + if val.Len() == 0 { + return cty.MapValEmpty(ety), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make(map[string]cty.Value, val.Len()) + for _, kv := range val.MapKeys() { + k := kv.String() + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.MapVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Map(ety)) + + } +} + +func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Set(ety)), nil + } + + var vals []cty.Value + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Set(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, val.Len()) + for i := range vals { + var err error + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + case reflect.Struct: + + if !val.Type().AssignableTo(setType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety)) + } + + rawSet := val.Interface().(set.Set) + inVals := rawSet.Values() + + if len(inVals) == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, len(inVals)) + for i := range inVals { + var err error + vals[i], err = toCtyValue(reflect.ValueOf(inVals[i]), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Set(ety)) + + } + + return cty.SetVal(vals), nil +} + +func toCtyObject(val reflect.Value, attrTypes map[string]cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + haveKeys := make(map[string]struct{}, val.Len()) + for _, kv := range val.MapKeys() { + haveKeys[kv.String()] = struct{}{} + } + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + var err error + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if _, have := haveKeys[k]; !have { + vals[k] = cty.NullVal(at) + continue + } + + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), at, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + case reflect.Struct: + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + attrFields := structTagIndices(val.Type()) + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if fieldIdx, have := attrFields[k]; have { + var err error + vals[k], err = toCtyValue(val.Field(fieldIdx), at, path) + if err != nil { + return cty.NilVal, err + } + } else { + vals[k] = cty.NullVal(at) + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Object(attrTypes)) + + } +} + +func toCtyTuple(val reflect.Value, elemTypes []cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + if val.Len() != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of elements %d; need %d", val.Len(), len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + case reflect.Struct: + fieldCount := val.Type().NumField() + if fieldCount != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of struct fields %d; need %d", fieldCount, len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Field(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Tuple(elemTypes)) + + } +} + +func toCtyCapsule(val reflect.Value, capsuleType cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(capsuleType), nil + } + + if val.Kind() != reflect.Ptr { + if !val.CanAddr() { + return cty.NilVal, path.NewErrorf("source value for capsule %#v must be addressable", capsuleType) + } + + val = val.Addr() + } + + if !val.Type().Elem().AssignableTo(capsuleType.EncapsulatedType()) { + return cty.NilVal, path.NewErrorf("value of type %T not compatible with capsule %#v", val.Interface(), capsuleType) + } + + return cty.CapsuleVal(capsuleType, val.Interface()), nil +} + +func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.DynamicPseudoType), nil + } + + switch val.Kind() { + + case reflect.Struct: + if !val.Type().AssignableTo(valueType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Type()) + } + + return val.Interface().(cty.Value), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Kind()) + + } + +} + +// toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three +// possible outcomes: +// +// - Given value isn't a pointer, so it's just returned as-is. +// - Given value is a non-nil pointer, in which case it is dereferenced +// and the result returned. +// - Given value is a nil pointer, in which case an invalid value is returned. +// +// For nested pointer types, like **int, they are all dereferenced in turn +// until a non-pointer value is found, or until a nil pointer is encountered. +func toCtyUnwrapPointer(val reflect.Value) reflect.Value { + for val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { + if val.IsNil() { + return reflect.Value{} + } + + val = val.Elem() + } + + return val +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/out.go b/vendor/github.com/zclconf/go-cty/cty/gocty/out.go new file mode 100644 index 0000000..99b65a7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/out.go @@ -0,0 +1,705 @@ +package gocty + +import ( + "math/big" + "reflect" + + "math" + + "github.com/zclconf/go-cty/cty" +) + +// FromCtyValue assigns a cty.Value to a reflect.Value, which must be a pointer, +// using a fixed set of conversion rules. +// +// This function considers its audience to be the creator of the cty Value +// given, and thus the error messages it generates are (unlike with ToCtyValue) +// presented in cty terminology that is generally appropriate to return to +// end-users in applications where cty data structures are built from +// user-provided configuration. In particular this means that if incorrect +// target types are provided by the calling application the resulting error +// messages are likely to be confusing, since we assume that the given target +// type is correct and the cty.Value is where the error lies. +// +// If an error is returned, the target data structure may have been partially +// populated, but the degree to which this is true is an implementation +// detail that the calling application should not rely on. +// +// The function will panic if given a non-pointer as the Go value target, +// since that is considered to be a bug in the calling program. +func FromCtyValue(val cty.Value, target interface{}) error { + tVal := reflect.ValueOf(target) + if tVal.Kind() != reflect.Ptr { + panic("target value is not a pointer") + } + if tVal.IsNil() { + panic("target value is nil pointer") + } + + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time fromCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given cty.Value is. + path := make(cty.Path, 0) + return fromCtyValue(val, tVal, path) +} + +func fromCtyValue(val cty.Value, target reflect.Value, path cty.Path) error { + ty := val.Type() + + deepTarget := fromCtyPopulatePtr(target, false) + + // If we're decoding into a cty.Value then we just pass through the + // value as-is, to enable partial decoding. This is the only situation + // where unknown values are permitted. + if deepTarget.Kind() == reflect.Struct && deepTarget.Type().AssignableTo(valueType) { + deepTarget.Set(reflect.ValueOf(val)) + return nil + } + + // Lists and maps can be nil without indirection, but everything else + // requires a pointer and we set it immediately to nil. + // We also make an exception for capsule types because we want to handle + // pointers specially for these. + // (fromCtyList and fromCtyMap must therefore deal with val.IsNull, while + // other types can assume no nulls after this point.) + if val.IsNull() && !val.Type().IsListType() && !val.Type().IsMapType() && !val.Type().IsCapsuleType() { + target = fromCtyPopulatePtr(target, true) + if target.Kind() != reflect.Ptr { + return path.NewErrorf("null value is not allowed") + } + + target.Set(reflect.Zero(target.Type())) + return nil + } + + target = deepTarget + + if !val.IsKnown() { + return path.NewErrorf("value must be known") + } + + switch ty { + case cty.Bool: + return fromCtyBool(val, target, path) + case cty.Number: + return fromCtyNumber(val, target, path) + case cty.String: + return fromCtyString(val, target, path) + } + + switch { + case ty.IsListType(): + return fromCtyList(val, target, path) + case ty.IsMapType(): + return fromCtyMap(val, target, path) + case ty.IsSetType(): + return fromCtySet(val, target, path) + case ty.IsObjectType(): + return fromCtyObject(val, target, path) + case ty.IsTupleType(): + return fromCtyTuple(val, target, path) + case ty.IsCapsuleType(): + return fromCtyCapsule(val, target, path) + } + + // We should never fall out here; reaching here indicates a bug in this + // function. + return path.NewErrorf("unsupported source type %#v", ty) +} + +func fromCtyBool(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Bool: + if val.True() { + target.Set(reflect.ValueOf(true)) + } else { + target.Set(reflect.ValueOf(false)) + } + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumber(val cty.Value, target reflect.Value, path cty.Path) error { + bf := val.AsBigFloat() + + switch target.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fromCtyNumberInt(bf, target, path) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fromCtyNumberUInt(bf, target, path) + + case reflect.Float32, reflect.Float64: + return fromCtyNumberFloat(bf, target, path) + + case reflect.Struct: + return fromCtyNumberBig(bf, target, path) + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumberInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var min int64 + var max int64 + switch target.Type().Bits() { + case 8: + min = math.MinInt8 + max = math.MaxInt8 + case 16: + min = math.MinInt16 + max = math.MaxInt16 + case 32: + min = math.MinInt32 + max = math.MaxInt32 + case 64: + min = math.MinInt64 + max = math.MaxInt64 + default: + panic("weird number of bits in target int") + } + + iv, accuracy := bf.Int64() + if accuracy != big.Exact || iv < min || iv > max { + return path.NewErrorf("value must be a whole number, between %d and %d", min, max) + } + + target.Set(reflect.ValueOf(iv).Convert(target.Type())) + + return nil +} + +func fromCtyNumberUInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var max uint64 + switch target.Type().Bits() { + case 8: + max = math.MaxUint8 + case 16: + max = math.MaxUint16 + case 32: + max = math.MaxUint32 + case 64: + max = math.MaxUint64 + default: + panic("weird number of bits in target uint") + } + + iv, accuracy := bf.Uint64() + if accuracy != big.Exact || iv > max { + return path.NewErrorf("value must be a whole number, between 0 and %d inclusive", max) + } + + target.Set(reflect.ValueOf(iv).Convert(target.Type())) + + return nil +} + +func fromCtyNumberFloat(bf *big.Float, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.Float32: + fv, accuracy := bf.Float32() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(float64(fv), 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat32, math.MaxFloat32) + } + } + target.Set(reflect.ValueOf(fv)) + return nil + case reflect.Float64: + fv, accuracy := bf.Float64() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(fv, 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat64, math.MaxFloat64) + } + } + target.Set(reflect.ValueOf(fv)) + return nil + default: + panic("unsupported kind of float") + } +} + +func fromCtyNumberBig(bf *big.Float, target reflect.Value, path cty.Path) error { + switch { + + case bigFloatType.AssignableTo(target.Type()): + // Easy! + target.Set(reflect.ValueOf(bf).Elem()) + return nil + + case bigIntType.AssignableTo(target.Type()): + bi, accuracy := bf.Int(nil) + if accuracy != big.Exact { + return path.NewErrorf("value must be a whole number") + } + target.Set(reflect.ValueOf(bi).Elem()) + return nil + + default: + return likelyRequiredTypesError(path, target) + } +} + +func fromCtyString(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.String: + target.Set(reflect.ValueOf(val.AsString())) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyList(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a list of length %d", target.Len()) + } + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyMap(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Map: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + tv := reflect.MakeMap(target.Type()) + et := target.Type().Elem() + + path = append(path, nil) + + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + ks := key.AsString() + + targetElem := reflect.New(et) + err = fromCtyValue(val, targetElem, path) + + tv.SetMapIndex(reflect.ValueOf(ks), targetElem.Elem()) + + return err != nil + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtySet(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a set of length %d", target.Len()) + } + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + return nil + + // TODO: decode into set.Set instance + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyObject(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + attrTypes := val.Type().AttributeTypes() + targetFields := structTagIndices(target.Type()) + + path = append(path, nil) + + for k, i := range targetFields { + if _, exists := attrTypes[k]; !exists { + // If the field in question isn't able to represent nil, + // that's an error. + fk := target.Field(i).Kind() + switch fk { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface: + // okay + default: + return path.NewErrorf("missing required attribute %q", k) + } + } + } + + for k := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + fieldIdx, exists := targetFields[k] + if !exists { + return path.NewErrorf("unsupported attribute %q", k) + } + + ev := val.GetAttr(k) + + targetField := target.Field(fieldIdx) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyTuple(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + elemTypes := val.Type().TupleElementTypes() + fieldCount := target.Type().NumField() + + if fieldCount != len(elemTypes) { + return path.NewErrorf("a tuple of %d elements is required", fieldCount) + } + + path = append(path, nil) + + for i := range elemTypes { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + ev := val.Index(cty.NumberIntVal(int64(i))) + + targetField := target.Field(i) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyCapsule(val cty.Value, target reflect.Value, path cty.Path) error { + + if target.Kind() == reflect.Ptr { + // Walk through indirection until we get to the last pointer, + // which we might set to null below. + target = fromCtyPopulatePtr(target, true) + + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + // Since a capsule contains a pointer to an object, we'll preserve + // that pointer on the way out and thus allow the caller to recover + // the original object, rather than a copy of it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Elem().Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + target.Set(reflect.ValueOf(val.EncapsulatedValue())) + + return nil + } else { + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + // If our target isn't a pointer then we will attempt to copy + // the encapsulated value into it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + // We know that EncapsulatedValue is always a pointer, so we + // can safely call .Elem on its reflect.Value. + target.Set(reflect.ValueOf(val.EncapsulatedValue()).Elem()) + + return nil + } + +} + +// fromCtyPopulatePtr recognizes when target is a pointer type and allocates +// a value to assign to that pointer, which it returns. +// +// If the given value has multiple levels of indirection, like **int, these +// will be processed in turn so that the return value is guaranteed to be +// a non-pointer. +// +// As an exception, if decodingNull is true then the returned value will be +// the final level of pointer, if any, so that the caller can assign it +// as nil to represent a null value. If the given target value is not a pointer +// at all then the returned value will be just the given target, so the caller +// must test if the returned value is a pointer before trying to assign nil +// to it. +func fromCtyPopulatePtr(target reflect.Value, decodingNull bool) reflect.Value { + for { + if target.Kind() == reflect.Interface && !target.IsNil() { + e := target.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + target = e + } + } + + if target.Kind() != reflect.Ptr { + break + } + + // Stop early if we're decodingNull and we've found our last indirection + if target.Elem().Kind() != reflect.Ptr && decodingNull && target.CanSet() { + break + } + + if target.IsNil() { + target.Set(reflect.New(target.Type().Elem())) + } + + target = target.Elem() + } + return target +} + +// likelyRequiredTypesError returns an error that states which types are +// acceptable by making some assumptions about what types we support for +// each target Go kind. It's not a precise science but it allows us to return +// an error message that is cty-user-oriented rather than Go-oriented. +// +// Generally these error messages should be a matter of last resort, since +// the calling application should be validating user-provided value types +// before decoding anyway. +func likelyRequiredTypesError(path cty.Path, target reflect.Value) error { + switch target.Kind() { + + case reflect.Bool: + return path.NewErrorf("bool value is required") + + case reflect.String: + return path.NewErrorf("string value is required") + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fallthrough + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + fallthrough + case reflect.Float32, reflect.Float64: + return path.NewErrorf("number value is required") + + case reflect.Slice, reflect.Array: + return path.NewErrorf("list or set value is required") + + case reflect.Map: + return path.NewErrorf("map or object value is required") + + case reflect.Struct: + switch { + + case target.Type().AssignableTo(bigFloatType) || target.Type().AssignableTo(bigIntType): + return path.NewErrorf("number value is required") + + case target.Type().AssignableTo(setType): + return path.NewErrorf("set or list value is required") + + default: + return path.NewErrorf("object or tuple value is required") + + } + + default: + // We should avoid getting into this path, since this error + // message is rather useless. + return path.NewErrorf("incorrect type") + + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go new file mode 100644 index 0000000..ce4c8f1 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go @@ -0,0 +1,108 @@ +package gocty + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts +// to find a suitable cty.Type instance that could be used for a conversion +// with ToCtyValue. +// +// This allows -- for simple situations at least -- types to be defined just +// once in Go and the cty types derived from the Go types, but in the process +// it makes some assumptions that may be undesirable so applications are +// encouraged to build their cty types directly if exacting control is +// required. +// +// Not all Go types can be represented as cty types, so an error may be +// returned which is usually considered to be a bug in the calling program. +// In particular, ImpliedType will never use capsule types in its returned +// type, because it cannot know the capsule types supported by the calling +// program. +func ImpliedType(gv interface{}) (cty.Type, error) { + rt := reflect.TypeOf(gv) + var path cty.Path + return impliedType(rt, path) +} + +func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) { + switch rt.Kind() { + + case reflect.Ptr: + return impliedType(rt.Elem(), path) + + // Primitive types + case reflect.Bool: + return cty.Bool, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.Number, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.Number, nil + case reflect.Float32, reflect.Float64: + return cty.Number, nil + case reflect.String: + return cty.String, nil + + // Collection types + case reflect.Slice: + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.List(ety), nil + case reflect.Map: + if !stringType.AssignableTo(rt.Key()) { + return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt) + } + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.Map(ety), nil + + // Structural types + case reflect.Struct: + return impliedStructType(rt, path) + + default: + return cty.NilType, path.NewErrorf("no cty.Type for %s", rt) + } +} + +func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) { + if valueType.AssignableTo(rt) { + // Special case: cty.Value represents cty.DynamicPseudoType, for + // type conformance checking. + return cty.DynamicPseudoType, nil + } + + fieldIdxs := structTagIndices(rt) + if len(fieldIdxs) == 0 { + return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt) + } + + atys := make(map[string]cty.Type, len(fieldIdxs)) + + { + // Temporary extension of path for attributes + path := append(path, nil) + + for k, fi := range fieldIdxs { + path[len(path)-1] = cty.GetAttrStep{Name: k} + + ft := rt.Field(fi).Type + aty, err := impliedType(ft, path) + if err != nil { + return cty.NilType, err + } + + atys[k] = aty + } + } + + return cty.Object(atys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/helper.go b/vendor/github.com/zclconf/go-cty/cty/helper.go new file mode 100644 index 0000000..1b88e9f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/helper.go @@ -0,0 +1,99 @@ +package cty + +import ( + "fmt" +) + +// anyUnknown is a helper to easily check if a set of values contains any +// unknowns, for operations that short-circuit to return unknown in that case. +func anyUnknown(values ...Value) bool { + for _, val := range values { + if val.v == unknown { + return true + } + } + return false +} + +// typeCheck tests whether all of the given values belong to the given type. +// If the given types are a mixture of the given type and the dynamic +// pseudo-type then a short-circuit dynamic value is returned. If the given +// values are all of the correct type but at least one is unknown then +// a short-circuit unknown value is returned. If any other types appear then +// an error is returned. Otherwise (finally!) the result is nil, nil. +func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) { + hasDynamic := false + hasUnknown := false + + for i, val := range values { + if val.ty == DynamicPseudoType { + hasDynamic = true + continue + } + + if !val.Type().Equals(required) { + return nil, fmt.Errorf( + "type mismatch: want %s but value %d is %s", + required.FriendlyName(), + i, val.ty.FriendlyName(), + ) + } + + if val.v == unknown { + hasUnknown = true + } + } + + if hasDynamic { + return &DynamicVal, nil + } + + if hasUnknown { + ret := UnknownVal(ret) + return &ret, nil + } + + return nil, nil +} + +// mustTypeCheck is a wrapper around typeCheck that immediately panics if +// any error is returned. +func mustTypeCheck(required Type, ret Type, values ...Value) *Value { + shortCircuit, err := typeCheck(required, ret, values...) + if err != nil { + panic(err) + } + return shortCircuit +} + +// shortCircuitForceType takes the return value from mustTypeCheck and +// replaces it with an unknown of the given type if the original value was +// DynamicVal. +// +// This is useful for operations that are specified to always return a +// particular type, since then a dynamic result can safely be "upgrade" to +// a strongly-typed unknown, which then allows subsequent operations to +// be actually type-checked. +// +// It is safe to use this only if the operation in question is defined as +// returning either a value of the given type or panicking, since we know +// then that subsequent operations won't run if the operation panics. +// +// If the given short-circuit value is *not* DynamicVal then it must be +// of the given type, or this function will panic. +func forceShortCircuitType(shortCircuit *Value, ty Type) *Value { + if shortCircuit == nil { + return nil + } + + if shortCircuit.ty == DynamicPseudoType { + ret := UnknownVal(ty) + return &ret + } + + if !shortCircuit.ty.Equals(ty) { + panic("forceShortCircuitType got value of wrong type") + } + + return shortCircuit +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json.go b/vendor/github.com/zclconf/go-cty/cty/json.go new file mode 100644 index 0000000..c421a62 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json.go @@ -0,0 +1,176 @@ +package cty + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// MarshalJSON is an implementation of json.Marshaler that allows Type +// instances to be serialized as JSON. +// +// All standard types can be serialized, but capsule types cannot since there +// is no way to automatically recover the original pointer and capsule types +// compare by equality. +func (t Type) MarshalJSON() ([]byte, error) { + switch impl := t.typeImpl.(type) { + case primitiveType: + switch impl.Kind { + case primitiveTypeBool: + return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil + case primitiveTypeNumber: + return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil + case primitiveTypeString: + return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil + default: + panic("unknown primitive type kind") + } + case typeList, typeMap, typeSet: + buf := &bytes.Buffer{} + etyJSON, err := t.ElementType().MarshalJSON() + if err != nil { + return nil, err + } + buf.WriteRune('[') + switch impl.(type) { + case typeList: + buf.WriteString(`"list"`) + case typeMap: + buf.WriteString(`"map"`) + case typeSet: + buf.WriteString(`"set"`) + } + buf.WriteRune(',') + buf.Write(etyJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeObject: + buf := &bytes.Buffer{} + atysJSON, err := json.Marshal(t.AttributeTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["object",`) + buf.Write(atysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeTuple: + buf := &bytes.Buffer{} + etysJSON, err := json.Marshal(t.TupleElementTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["tuple",`) + buf.Write(etysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case pseudoTypeDynamic: + return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil + case *capsuleType: + return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName()) + default: + // should never happen + panic("unknown type implementation") + } +} + +// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of +// MarshalJSON for information on the limitations of JSON serialization of +// types. +func (t *Type) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + *t = Bool + case "number": + *t = Number + case "string": + *t = String + case "dynamic": + *t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = List(ety) + case "map": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Map(ety) + case "set": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Set(ety) + case "object": + var atys map[string]Type + err = dec.Decode(&atys) + if err != nil { + return err + } + *t = Object(atys) + case "tuple": + var etys []Type + err = dec.Decode(&etys) + if err != nil { + return err + } + *t = Tuple(etys) + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/doc.go b/vendor/github.com/zclconf/go-cty/cty/json/doc.go new file mode 100644 index 0000000..8916513 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/doc.go @@ -0,0 +1,11 @@ +// Package json provides functions for serializing cty types and values in +// JSON format, and for decoding them again. +// +// Since the cty type system is a superset of the JSON type system, +// round-tripping through JSON is lossy unless type information is provided +// both at encoding time and decoding time. Callers of this package are +// therefore suggested to define their expected structure as a cty.Type +// and pass it in consistently both when encoding and when decoding, though +// default (type-lossy) behavior is provided for situations where the precise +// representation of the data is not significant. +package json diff --git a/vendor/github.com/zclconf/go-cty/cty/json/marshal.go b/vendor/github.com/zclconf/go-cty/cty/json/marshal.go new file mode 100644 index 0000000..f7bea1a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/marshal.go @@ -0,0 +1,189 @@ +package json + +import ( + "bytes" + "encoding/json" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error { + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if t == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, b) + } + + if val.IsNull() { + b.WriteString("null") + return nil + } + + if !val.IsKnown() { + return path.NewErrorf("value is not known") + } + + // The caller should've guaranteed that the given val is conformant with + // the given type t, so we'll proceed under that assumption here. + + switch { + case t.IsPrimitiveType(): + switch t { + case cty.String: + json, err := json.Marshal(val.AsString()) + if err != nil { + return path.NewErrorf("failed to serialize value: %s", err) + } + b.Write(json) + return nil + case cty.Number: + if val.RawEquals(cty.PositiveInfinity) || val.RawEquals(cty.NegativeInfinity) { + return path.NewErrorf("cannot serialize infinity as JSON") + } + b.WriteString(val.AsBigFloat().Text('f', -1)) + return nil + case cty.Bool: + if val.True() { + b.WriteString("true") + } else { + b.WriteString("false") + } + return nil + default: + panic("unsupported primitive type") + } + case t.IsListType(), t.IsSetType(): + b.WriteRune('[') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune(']') + return nil + case t.IsMapType(): + b.WriteRune('{') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune('}') + return nil + case t.IsTupleType(): + b.WriteRune('[') + etys := t.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + for it.Next() { + if i > 0 { + b.WriteRune(',') + } + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + i++ + } + b.WriteRune(']') + return nil + case t.IsObjectType(): + b.WriteRune('{') + atys := t.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + for i, k := range names { + aty := atys[k] + if i > 0 { + b.WriteRune(',') + } + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(av, aty, path, b) + if err != nil { + return err + } + } + b.WriteRune('}') + return nil + case t.IsCapsuleType(): + rawVal := val.EncapsulatedValue() + jsonVal, err := json.Marshal(rawVal) + if err != nil { + return path.NewError(err) + } + b.Write(jsonVal) + return nil + default: + // should never happen + return path.NewErrorf("cannot JSON-serialize %s", t.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error { + typeJSON, err := MarshalType(val.Type()) + if err != nil { + return path.NewErrorf("failed to serialize type: %s", err) + } + b.WriteString(`{"value":`) + marshal(val, val.Type(), path, b) + b.WriteString(`,"type":`) + b.Write(typeJSON) + b.WriteRune('}') + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/simple.go b/vendor/github.com/zclconf/go-cty/cty/json/simple.go new file mode 100644 index 0000000..507c9cc --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/simple.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/zclconf/go-cty/cty" +) + +// SimpleJSONValue is a wrapper around cty.Value that adds implementations of +// json.Marshaler and json.Unmarshaler for simple-but-type-lossy automatic +// encoding and decoding of values. +// +// The couplet Marshal and Unmarshal both take extra type information to +// inform the encoding and decoding process so that all of the cty types +// can be represented even though JSON's type system is a subset. +// +// SimpleJSONValue instead takes the approach of discarding the value's type +// information and then deriving a new type from the stored structure when +// decoding. This results in the same data being returned but not necessarily +// with exactly the same type. +// +// For information on how types are inferred when decoding, see the +// documentation of the function ImpliedType. +type SimpleJSONValue struct { + cty.Value +} + +// MarshalJSON is an implementation of json.Marshaler. See the documentation +// of SimpleJSONValue for more information. +func (v SimpleJSONValue) MarshalJSON() ([]byte, error) { + return Marshal(v.Value, v.Type()) +} + +// UnmarshalJSON is an implementation of json.Unmarshaler. See the +// documentation of SimpleJSONValue for more information. +func (v *SimpleJSONValue) UnmarshalJSON(buf []byte) error { + t, err := ImpliedType(buf) + if err != nil { + return err + } + v.Value, err = Unmarshal(buf, t) + return err +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/type.go b/vendor/github.com/zclconf/go-cty/cty/json/type.go new file mode 100644 index 0000000..9131c6c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/type.go @@ -0,0 +1,23 @@ +package json + +import ( + "github.com/zclconf/go-cty/cty" +) + +// MarshalType returns a JSON serialization of the given type. +// +// This is just a thin wrapper around t.MarshalJSON, for symmetry with +// UnmarshalType. +func MarshalType(t cty.Type) ([]byte, error) { + return t.MarshalJSON() +} + +// UnmarshalType decodes a JSON serialization of the given type as produced +// by either Type.MarshalJSON or MarshalType. +// +// This is a convenience wrapper around Type.UnmarshalJSON. +func UnmarshalType(buf []byte) (cty.Type, error) { + var t cty.Type + err := t.UnmarshalJSON(buf) + return t, err +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go new file mode 100644 index 0000000..1a97306 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go @@ -0,0 +1,171 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// JSON-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary JSON without explicit cty Type +// information. +// +// The rules are as follows: +// +// JSON strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// JSON objects map to cty object types, with the attributes defined by the +// object keys and the types of their values. +// +// JSON arrays map to cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any JSON syntax errors will be returned as an error, and the type will +// be the invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + if dec.More() { + return cty.NilType, fmt.Errorf("extraneous data after JSON object") + } + + return ty, nil +} + +func impliedType(dec *json.Decoder) (cty.Type, error) { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + return impliedTypeForTok(tok, dec) +} + +func impliedTypeForTok(tok json.Token, dec *json.Decoder) (cty.Type, error) { + if tok == nil { + return cty.DynamicPseudoType, nil + } + + switch ttok := tok.(type) { + case bool: + return cty.Bool, nil + + case json.Number: + return cty.Number, nil + + case string: + return cty.String, nil + + case json.Delim: + + switch rune(ttok) { + case '{': + return impliedObjectType(dec) + case '[': + return impliedTupleType(dec) + default: + return cty.NilType, fmt.Errorf("unexpected token %q", ttok) + } + + default: + return cty.NilType, fmt.Errorf("unsupported JSON token %#v", tok) + } +} + +func impliedObjectType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first object key. + + var atys map[string]cty.Type + + for { + // Read the object key first + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != '}' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + key, ok := tok.(string) + if !ok { + return cty.NilType, fmt.Errorf("expected string but found %T", tok) + } + + // Now read the value + tok, err = dec.Token() + if err != nil { + return cty.NilType, err + } + + aty, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[key] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first value. + + var etys []cty.Type + + for { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != ']' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + ety, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + etys = append(etys, ety) + } + + if len(etys) == 0 { + return cty.EmptyTuple, nil + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go new file mode 100644 index 0000000..155f0b8 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go @@ -0,0 +1,459 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func unmarshal(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return cty.NilVal, path.NewError(err) + } + + if tok == nil { + return cty.NullVal(t), nil + } + + if t == cty.DynamicPseudoType { + return unmarshalDynamic(buf, path) + } + + switch { + case t.IsPrimitiveType(): + val, err := unmarshalPrimitive(tok, t, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case t.IsListType(): + return unmarshalList(buf, t.ElementType(), path) + case t.IsSetType(): + return unmarshalSet(buf, t.ElementType(), path) + case t.IsMapType(): + return unmarshalMap(buf, t.ElementType(), path) + case t.IsTupleType(): + return unmarshalTuple(buf, t.TupleElementTypes(), path) + case t.IsObjectType(): + return unmarshalObject(buf, t.AttributeTypes(), path) + case t.IsCapsuleType(): + return unmarshalCapsule(buf, t, path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", t.FriendlyName()) + } +} + +func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, error) { + + switch t { + case cty.Bool: + switch v := tok.(type) { + case bool: + return cty.BoolVal(v), nil + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("bool is required") + } + case cty.Number: + if v, ok := tok.(json.Number); ok { + tok = string(v) + } + switch v := tok.(type) { + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("number is required") + } + case cty.String: + switch v := tok.(type) { + case string: + return cty.StringVal(v), nil + case json.Number: + return cty.StringVal(string(v)), nil + case bool: + val, err := convert.Convert(cty.BoolVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("string is required") + } + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int64 + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(idx), + } + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read list value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(ety), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read set value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(cty.String), + } + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map key: %s", err) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(buf []byte, etys []cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int + + for dec.More() { + if idx >= len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("too many tuple elements (need %d)", len(etys)) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(idx)), + } + ety := etys[idx] + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read tuple value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) != len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("not enough tuple elements (need %d)", len(etys)) + } + + if len(vals) == 0 { + return cty.EmptyTupleVal, nil + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(buf []byte, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + objPath := path // some errors report from the object's perspective + path := append(path, nil) // path to a specific attribute + + for dec.More() { + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object key: %s", err) + } + + aty, ok := atys[k] + if !ok { + return cty.NilVal, objPath.NewErrorf("unsupported attribute %q", k) + } + + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object value: %s", err) + } + + el, err := unmarshal(rawVal, aty, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + // Make sure we have a value for every attribute + for k, aty := range atys { + if _, exists := vals[k]; !exists { + vals[k] = cty.NullVal(aty) + } + } + + if len(vals) == 0 { + return cty.EmptyObjectVal, nil + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalCapsule(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + rawType := t.EncapsulatedType() + ptrPtr := reflect.New(reflect.PtrTo(rawType)) + ptrPtr.Elem().Set(reflect.New(rawType)) + ptr := ptrPtr.Elem().Interface() + err := json.Unmarshal(buf, ptr) + if err != nil { + return cty.NilVal, path.NewError(err) + } + + return cty.CapsuleVal(t, ptr), nil +} + +func unmarshalDynamic(buf []byte, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + var t cty.Type + var valBody []byte // defer actual decoding until we know the type + + for dec.More() { + var err error + + key, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor key: %s", err) + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor value: %s", err) + } + + switch key { + case "type": + err := json.Unmarshal(rawVal, &t) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to decode type for dynamic value: %s", err) + } + case "value": + valBody = rawVal + default: + return cty.NilVal, path.NewErrorf("invalid key %q in dynamically-typed value", key) + } + + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if t == cty.NilType { + return cty.NilVal, path.NewErrorf("missing type in dynamically-typed value") + } + if valBody == nil { + return cty.NilVal, path.NewErrorf("missing value in dynamically-typed value") + } + + val, err := Unmarshal([]byte(valBody), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil +} + +func requireDelim(dec *json.Decoder, d rune) error { + tok, err := dec.Token() + if err != nil { + return err + } + + if tok != json.Delim(d) { + return fmt.Errorf("missing expected %c", d) + } + + return nil +} + +func requireObjectKey(dec *json.Decoder) (string, error) { + tok, err := dec.Token() + if err != nil { + return "", err + } + if s, ok := tok.(string); ok { + return s, nil + } + return "", fmt.Errorf("missing expected object key") +} + +func readRawValue(dec *json.Decoder) ([]byte, error) { + var rawVal json.RawMessage + err := dec.Decode(&rawVal) + if err != nil { + return nil, err + } + return []byte(rawVal), nil +} + +func bufDecoder(buf []byte) *json.Decoder { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + return dec +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/value.go b/vendor/github.com/zclconf/go-cty/cty/json/value.go new file mode 100644 index 0000000..f2f7dd5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/value.go @@ -0,0 +1,65 @@ +package json + +import ( + "bytes" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Marshal produces a JSON representation of the given value that can later +// be decoded into a value of the given type. +// +// A type is specified separately to allow for the given type to include +// cty.DynamicPseudoType to represent situations where any type is permitted +// and so type information must be included to allow recovery of the stored +// structure when decoding. +// +// The given type will also be used to attempt automatic conversions of any +// non-conformant types in the given value, although this will not always +// be possible. If the value cannot be made to be conformant then an error is +// returned, which may be a cty.PathError. +// +// Capsule-typed values can be marshalled, but with some caveats. Since +// capsule values are compared by pointer equality, it is impossible to recover +// a value that will compare equal to the original value. Additionally, +// it's not possible to JSON-serialize the capsule type itself, so it's not +// valid to use capsule types within parts of the value that are conformed to +// cty.DynamicPseudoType. Otherwise, a capsule value can be used as long as +// the encapsulated type itself is serializable with the Marshal function +// in encoding/json. +func Marshal(val cty.Value, t cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(t) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, t) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + buf := &bytes.Buffer{} + var path cty.Path + err := marshal(val, t, path, buf) + + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Unmarshal decodes a JSON representation of the given value into a cty Value +// conforming to the given type. +// +// While decoding, type conversions will be done where possible to make +// the result conformant even if the types given in JSON are not exactly +// correct. If conversion isn't possible then an error is returned, which +// may be a cty.PathError. +func Unmarshal(buf []byte, t cty.Type) (cty.Value, error) { + var path cty.Path + return unmarshal(buf, t, path) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/list_type.go b/vendor/github.com/zclconf/go-cty/cty/list_type.go new file mode 100644 index 0000000..eadc865 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/list_type.go @@ -0,0 +1,68 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeList struct { + typeImplSigil + ElementTypeT Type +} + +// List creates a map type with the given element Type. +// +// List types are CollectionType implementations. +func List(elem Type) Type { + return Type{ + typeList{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a list whose element type is +// equal to that of the receiver. +func (t typeList) Equals(other Type) bool { + ot, isList := other.typeImpl.(typeList) + if !isList { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeList) FriendlyName() string { + return "list of " + t.ElementTypeT.FriendlyName() +} + +func (t typeList) ElementType() Type { + return t.ElementTypeT +} + +func (t typeList) GoString() string { + return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT) +} + +// IsListType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsListType() bool { + _, ok := t.typeImpl.(typeList) + return ok +} + +// ListElementType is a convenience method that checks if the given type is +// a list type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.ListElementType(); et != nil { +// // Do something with *et +// } +func (t Type) ListElementType() *Type { + if lt, ok := t.typeImpl.(typeList); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/map_type.go b/vendor/github.com/zclconf/go-cty/cty/map_type.go new file mode 100644 index 0000000..ae9abae --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/map_type.go @@ -0,0 +1,68 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeMap struct { + typeImplSigil + ElementTypeT Type +} + +// Map creates a map type with the given element Type. +// +// Map types are CollectionType implementations. +func Map(elem Type) Type { + return Type{ + typeMap{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a map whose element type is +// equal to that of the receiver. +func (t typeMap) Equals(other Type) bool { + ot, isMap := other.typeImpl.(typeMap) + if !isMap { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeMap) FriendlyName() string { + return "map of " + t.ElementTypeT.FriendlyName() +} + +func (t typeMap) ElementType() Type { + return t.ElementTypeT +} + +func (t typeMap) GoString() string { + return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT) +} + +// IsMapType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsMapType() bool { + _, ok := t.typeImpl.(typeMap) + return ok +} + +// MapElementType is a convenience method that checks if the given type is +// a map type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.MapElementType(); et != nil { +// // Do something with *et +// } +func (t Type) MapElementType() *Type { + if lt, ok := t.typeImpl.(typeMap); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/null.go b/vendor/github.com/zclconf/go-cty/cty/null.go new file mode 100644 index 0000000..d58d028 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/null.go @@ -0,0 +1,14 @@ +package cty + +// NullVal returns a null value of the given type. A null can be created of any +// type, but operations on such values will always panic. Calling applications +// are encouraged to use nulls only sparingly, particularly when user-provided +// expressions are to be evaluated, since the precence of nulls creates a +// much higher chance of evaluation errors that can't be caught by a type +// checker. +func NullVal(t Type) Value { + return Value{ + ty: t, + v: nil, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/object_type.go b/vendor/github.com/zclconf/go-cty/cty/object_type.go new file mode 100644 index 0000000..2540883 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/object_type.go @@ -0,0 +1,135 @@ +package cty + +import ( + "fmt" +) + +type typeObject struct { + typeImplSigil + AttrTypes map[string]Type +} + +// Object creates an object type with the given attribute types. +// +// After a map is passed to this function the caller must no longer access it, +// since ownership is transferred to this library. +func Object(attrTypes map[string]Type) Type { + attrTypesNorm := make(map[string]Type, len(attrTypes)) + for k, v := range attrTypes { + attrTypesNorm[NormalizeString(k)] = v + } + + return Type{ + typeObject{ + AttrTypes: attrTypesNorm, + }, + } +} + +func (t typeObject) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeObject); ok { + if len(t.AttrTypes) != len(ot.AttrTypes) { + // Fast path: if we don't have the same number of attributes + // then we can't possibly be equal. This also avoids the need + // to test attributes in both directions below, since we know + // there can't be extras in "other". + return false + } + + for attr, ty := range t.AttrTypes { + oty, ok := ot.AttrTypes[attr] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeObject) FriendlyName() string { + // There isn't really a friendly way to write an object type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // an object type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "object" +} + +func (t typeObject) GoString() string { + if len(t.AttrTypes) == 0 { + return "cty.EmptyObject" + } + return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes) +} + +// EmptyObject is a shorthand for Object(map[string]Type{}), to more +// easily talk about the empty object type. +var EmptyObject Type + +// EmptyObjectVal is the only possible non-null, non-unknown value of type +// EmptyObject. +var EmptyObjectVal Value + +func init() { + EmptyObject = Object(map[string]Type{}) + EmptyObjectVal = Value{ + ty: EmptyObject, + v: map[string]interface{}{}, + } +} + +// IsObjectType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsObjectType() bool { + _, ok := t.typeImpl.(typeObject) + return ok +} + +// HasAttribute returns true if the receiver has an attribute with the given +// name, regardless of its type. Will panic if the reciever isn't an object +// type; use IsObjectType to determine whether this operation will succeed. +func (t Type) HasAttribute(name string) bool { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + _, hasAttr := ot.AttrTypes[name] + return hasAttr + } + panic("HasAttribute on non-object Type") +} + +// AttributeType returns the type of the attribute with the given name. Will +// panic if the receiver is not an object type (use IsObjectType to confirm) +// or if the object type has no such attribute (use HasAttribute to confirm). +func (t Type) AttributeType(name string) Type { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + aty, hasAttr := ot.AttrTypes[name] + if !hasAttr { + panic("no such attribute") + } + return aty + } + panic("AttributeType on non-object Type") +} + +// AttributeTypes returns a map from attribute names to their associated +// types. Will panic if the receiver is not an object type (use IsObjectType +// to confirm). +// +// The returned map is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the returned +// map. For many purposes the attribute-related methods of Value are more +// appropriate and more convenient to use. +func (t Type) AttributeTypes() map[string]Type { + if ot, ok := t.typeImpl.(typeObject); ok { + return ot.AttrTypes + } + panic("AttributeTypes on non-object Type") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/path.go b/vendor/github.com/zclconf/go-cty/cty/path.go new file mode 100644 index 0000000..84a9de0 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/path.go @@ -0,0 +1,186 @@ +package cty + +import ( + "errors" + "fmt" +) + +// A Path is a sequence of operations to locate a nested value within a +// data structure. +// +// The empty Path represents the given item. Any PathSteps within represent +// taking a single step down into a data structure. +// +// Path has some convenience methods for gradually constructing a path, +// but callers can also feel free to just produce a slice of PathStep manually +// and convert to this type, which may be more appropriate in environments +// where memory pressure is a concern. +type Path []PathStep + +// PathStep represents a single step down into a data structure, as part +// of a Path. PathStep is a closed interface, meaning that the only +// permitted implementations are those within this package. +type PathStep interface { + pathStepSigil() pathStepImpl + Apply(Value) (Value, error) +} + +// embed pathImpl into a struct to declare it a PathStep implementation +type pathStepImpl struct{} + +func (p pathStepImpl) pathStepSigil() pathStepImpl { + return p +} + +// Index returns a new Path that is the reciever with an IndexStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) Index(v Value) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = IndexStep{ + Key: v, + } + return ret +} + +// GetAttr returns a new Path that is the reciever with a GetAttrStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) GetAttr(name string) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = GetAttrStep{ + Name: name, + } + return ret +} + +// Apply applies each of the steps in turn to successive values starting with +// the given value, and returns the result. If any step returns an error, +// the whole operation returns an error. +func (p Path) Apply(val Value) (Value, error) { + var err error + for i, step := range p { + val, err = step.Apply(val) + if err != nil { + return NilVal, fmt.Errorf("at step %d: %s", i, err) + } + } + return val, nil +} + +// LastStep applies the given path up to the last step and then returns +// the resulting value and the final step. +// +// This is useful when dealing with assignment operations, since in that +// case the *value* of the last step is not important (and may not, in fact, +// present at all) and we care only about its location. +// +// Since LastStep applies all steps except the last, it will return errors +// for those steps in the same way as Apply does. +// +// If the path has *no* steps then the returned PathStep will be nil, +// representing that any operation should be applied directly to the +// given value. +func (p Path) LastStep(val Value) (Value, PathStep, error) { + var err error + + if len(p) == 0 { + return val, nil, nil + } + + journey := p[:len(p)-1] + val, err = journey.Apply(val) + if err != nil { + return NilVal, nil, err + } + return val, p[len(p)-1], nil +} + +// Copy makes a shallow copy of the receiver. Often when paths are passed to +// caller code they come with the constraint that they are valid only until +// the caller returns, due to how they are constructed internally. Callers +// can use Copy to conveniently produce a copy of the value that _they_ control +// the validity of. +func (p Path) Copy() Path { + ret := make(Path, len(p)) + copy(ret, p) + return ret +} + +// IndexStep is a Step implementation representing applying the index operation +// to a value, which must be of either a list, map, or set type. +// +// When describing a path through a *type* rather than a concrete value, +// the Key may be an unknown value, indicating that the step applies to +// *any* key of the given type. +// +// When indexing into a set, the Key is actually the element being accessed +// itself, since in sets elements are their own identity. +type IndexStep struct { + pathStepImpl + Key Value +} + +// Apply returns the value resulting from indexing the given value with +// our key value. +func (s IndexStep) Apply(val Value) (Value, error) { + switch s.Key.Type() { + case Number: + if !val.Type().IsListType() { + return NilVal, errors.New("not a list type") + } + case String: + if !val.Type().IsMapType() { + return NilVal, errors.New("not a map type") + } + default: + return NilVal, errors.New("key value not number or string") + } + + has := val.HasIndex(s.Key) + if !has.IsKnown() { + return UnknownVal(val.Type().ElementType()), nil + } + if !has.True() { + return NilVal, errors.New("value does not have given index key") + } + + return val.Index(s.Key), nil +} + +func (s IndexStep) GoString() string { + return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key) +} + +// GetAttrStep is a Step implementation representing retrieving an attribute +// from a value, which must be of an object type. +type GetAttrStep struct { + pathStepImpl + Name string +} + +// Apply returns the value of our named attribute from the given value, which +// must be of an object type that has a value of that name. +func (s GetAttrStep) Apply(val Value) (Value, error) { + if !val.Type().IsObjectType() { + return NilVal, errors.New("not an object type") + } + + if !val.Type().HasAttribute(s.Name) { + return NilVal, fmt.Errorf("object has no attribute %q", s.Name) + } + + return val.GetAttr(s.Name), nil +} + +func (s GetAttrStep) GoString() string { + return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go new file mode 100644 index 0000000..b8682dd --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -0,0 +1,122 @@ +package cty + +import "math/big" + +// primitiveType is the hidden implementation of the various primitive types +// that are exposed as variables in this package. +type primitiveType struct { + typeImplSigil + Kind primitiveTypeKind +} + +type primitiveTypeKind byte + +const ( + primitiveTypeBool primitiveTypeKind = 'B' + primitiveTypeNumber primitiveTypeKind = 'N' + primitiveTypeString primitiveTypeKind = 'S' +) + +func (t primitiveType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(primitiveType); ok { + return otherP.Kind == t.Kind + } + return false +} + +func (t primitiveType) FriendlyName() string { + switch t.Kind { + case primitiveTypeBool: + return "bool" + case primitiveTypeNumber: + return "number" + case primitiveTypeString: + return "string" + default: + // should never happen + panic("invalid primitive type") + } +} + +func (t primitiveType) GoString() string { + switch t.Kind { + case primitiveTypeBool: + return "cty.Bool" + case primitiveTypeNumber: + return "cty.Number" + case primitiveTypeString: + return "cty.String" + default: + // should never happen + panic("invalid primitive type") + } +} + +// Number is the numeric type. Number values are arbitrary-precision +// decimal numbers, which can then be converted into Go's various numeric +// types only if they are in the appropriate range. +var Number Type + +// String is the string type. String values are sequences of unicode codepoints +// encoded internally as UTF-8. +var String Type + +// Bool is the boolean type. The two values of this type are True and False. +var Bool Type + +// True is the truthy value of type Bool +var True Value + +// False is the falsey value of type Bool +var False Value + +// Zero is a number value representing exactly zero. +var Zero Value + +// PositiveInfinity is a Number value representing positive infinity +var PositiveInfinity Value + +// NegativeInfinity is a Number value representing negative infinity +var NegativeInfinity Value + +func init() { + Number = Type{ + primitiveType{Kind: primitiveTypeNumber}, + } + String = Type{ + primitiveType{Kind: primitiveTypeString}, + } + Bool = Type{ + primitiveType{Kind: primitiveTypeBool}, + } + True = Value{ + ty: Bool, + v: true, + } + False = Value{ + ty: Bool, + v: false, + } + Zero = Value{ + ty: Number, + v: big.NewFloat(0), + } + PositiveInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(false), + } + NegativeInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(true), + } +} + +// IsPrimitiveType returns true if and only if the reciever is a primitive +// type, which means it's either number, string, or bool. Any two primitive +// types can be safely compared for equality using the standard == operator +// without panic, which is not a guarantee that holds for all types. Primitive +// types can therefore also be used in switch statements. +func (t Type) IsPrimitiveType() bool { + _, ok := t.typeImpl.(primitiveType) + return ok +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/gob.go b/vendor/github.com/zclconf/go-cty/cty/set/gob.go new file mode 100644 index 0000000..da2978f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/gob.go @@ -0,0 +1,76 @@ +package set + +import ( + "bytes" + "encoding/gob" + "fmt" +) + +// GobEncode is an implementation of the interface gob.GobEncoder, allowing +// sets to be included in structures encoded via gob. +// +// The set rules are included in the serialized value, so the caller must +// register its concrete rules type with gob.Register before using a +// set in a gob, and possibly also implement GobEncode/GobDecode to customize +// how any parameters are persisted. +// +// The set elements are also included, so if they are of non-primitive types +// they too must be registered with gob. +// +// If the produced gob values will persist for a long time, the caller must +// ensure compatibility of the rules implementation. In particular, if the +// definition of element equivalence changes between encoding and decoding +// then two distinct stored elements may be considered equivalent on decoding, +// causing the recovered set to have fewer elements than when it was stored. +func (s Set) GobEncode() ([]byte, error) { + gs := gobSet{ + Version: 0, + Rules: s.rules, + Values: s.Values(), + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + err := enc.Encode(gs) + if err != nil { + return nil, fmt.Errorf("error encoding set.Set: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is the opposite of GobEncode. See GobEncode for information +// on the requirements for and caveats of including set values in gobs. +func (s *Set) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gs gobSet + err := dec.Decode(&gs) + if err != nil { + return fmt.Errorf("error decoding set.Set: %s", err) + } + if gs.Version != 0 { + return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version) + } + + victim := NewSetFromSlice(gs.Rules, gs.Values) + s.vals = victim.vals + s.rules = victim.rules + return nil +} + +type gobSet struct { + Version int + Rules Rules + + // The bucket-based representation is for efficient in-memory access, but + // for serialization it's enough to just retain the values themselves, + // which we can re-bucket using the rules (which may have changed!) when + // we re-inflate. + Values []interface{} +} + +func init() { + gob.Register([]interface{}(nil)) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/iterator.go b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go new file mode 100644 index 0000000..f15498e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go @@ -0,0 +1,36 @@ +package set + +type Iterator struct { + bucketIds []int + vals map[int][]interface{} + bucketIdx int + valIdx int +} + +func (it *Iterator) Value() interface{} { + return it.currentBucket()[it.valIdx] +} + +func (it *Iterator) Next() bool { + if it.bucketIdx == -1 { + // init + if len(it.bucketIds) == 0 { + return false + } + + it.valIdx = 0 + it.bucketIdx = 0 + return true + } + + it.valIdx++ + if it.valIdx >= len(it.currentBucket()) { + it.valIdx = 0 + it.bucketIdx++ + } + return it.bucketIdx < len(it.bucketIds) +} + +func (it *Iterator) currentBucket() []interface{} { + return it.vals[it.bucketIds[it.bucketIdx]] +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/ops.go b/vendor/github.com/zclconf/go-cty/cty/set/ops.go new file mode 100644 index 0000000..726e707 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/ops.go @@ -0,0 +1,199 @@ +package set + +import ( + "sort" +) + +// Add inserts the given value into the receiving Set. +// +// This mutates the set in-place. This operation is not thread-safe. +func (s Set) Add(val interface{}) { + hv := s.rules.Hash(val) + if _, ok := s.vals[hv]; !ok { + s.vals[hv] = make([]interface{}, 0, 1) + } + bucket := s.vals[hv] + + // See if an equivalent value is already present + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return + } + } + + s.vals[hv] = append(bucket, val) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s Set) Remove(val interface{}) { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return + } + + for i, ev := range bucket { + if s.rules.Equivalent(val, ev) { + newBucket := make([]interface{}, 0, len(bucket)-1) + newBucket = append(newBucket, bucket[:i]...) + newBucket = append(newBucket, bucket[i+1:]...) + if len(newBucket) > 0 { + s.vals[hv] = newBucket + } else { + delete(s.vals, hv) + } + return + } + } +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s Set) Has(val interface{}) bool { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return false + } + + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return true + } + } + return false +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s Set) Copy() Set { + ret := NewSet(s.rules) + for k, v := range s.vals { + ret.vals[k] = v + } + return ret +} + +// Iterator returns an iterator over values in the set, in an undefined order +// that callers should not depend on. +// +// The pattern for using the returned iterator is: +// +// it := set.Iterator() +// for it.Next() { +// val := it.Value() +// // ... +// } +// +// Once an iterator has been created for a set, the set *must not* be mutated +// until the iterator is no longer in use. +func (s Set) Iterator() *Iterator { + // Sort the bucketIds to ensure that we always traverse in a + // consistent order. + bucketIds := make([]int, 0, len(s.vals)) + for id := range s.vals { + bucketIds = append(bucketIds, id) + } + sort.Ints(bucketIds) + + return &Iterator{ + bucketIds: bucketIds, + vals: s.vals, + bucketIdx: -1, + } +} + +// EachValue calls the given callback once for each value in the set, in an +// undefined order that callers should not depend on. +func (s Set) EachValue(cb func(interface{})) { + it := s.Iterator() + for it.Next() { + cb(it.Value()) + } +} + +// Values returns a slice of all of the values in the set in no particular +// order. This is just a wrapper around EachValue that accumulates the results +// in a slice for caller convenience. +// +// The returned slice will be nil if there are no values in the set. +func (s Set) Values() []interface{} { + var ret []interface{} + s.EachValue(func(v interface{}) { + ret = append(ret, v) + }) + return ret +} + +// Length returns the number of values in the set. +func (s Set) Length() int { + var count int + for _, bucket := range s.vals { + count = count + len(bucket) + } + return count +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same rules, or +// else this function will panic. +func (s1 Set) Union(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + rs.Add(v) + }) + s2.EachValue(func(v interface{}) { + rs.Add(v) + }) + return rs +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Intersection(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Subtract(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same rules, or else this function will +// panic. +func (s1 Set) SymmetricDifference(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + rs.Add(v) + } + }) + return rs +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/rules.go b/vendor/github.com/zclconf/go-cty/cty/set/rules.go new file mode 100644 index 0000000..7200184 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/rules.go @@ -0,0 +1,25 @@ +package set + +// Rules represents the operations that define membership for a Set. +// +// Each Set has a Rules instance, whose methods must satisfy the interface +// contracts given below for any value that will be added to the set. +type Rules interface { + // Hash returns an int that somewhat-uniquely identifies the given value. + // + // A good hash function will minimize collisions for values that will be + // added to the set, though collisions *are* permitted. Collisions will + // simply reduce the efficiency of operations on the set. + Hash(interface{}) int + + // Equivalent returns true if and only if the two values are considered + // equivalent for the sake of set membership. Two values that are + // equivalent cannot exist in the set at the same time, and if two + // equivalent values are added it is undefined which one will be + // returned when enumerating all of the set members. + // + // Two values that are equivalent *must* result in the same hash value, + // though it is *not* required that two values with the same hash value + // be equivalent. + Equivalent(interface{}, interface{}) bool +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/set.go b/vendor/github.com/zclconf/go-cty/cty/set/set.go new file mode 100644 index 0000000..b4fb316 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/set.go @@ -0,0 +1,62 @@ +package set + +import ( + "fmt" +) + +// Set is an implementation of the concept of a set: a collection where all +// values are conceptually either in or out of the set, but the members are +// not ordered. +// +// This type primarily exists to be the internal type of sets in cty, but +// it is considered to be at the same level of abstraction as Go's built in +// slice and map collection types, and so should make no cty-specific +// assumptions. +// +// Set operations are not thread safe. It is the caller's responsibility to +// provide mutex guarantees where necessary. +// +// Set operations are not optimized to minimize memory pressure. Mutating +// a set will generally create garbage and so should perhaps be avoided in +// tight loops where memory pressure is a concern. +type Set struct { + vals map[int][]interface{} + rules Rules +} + +// NewSet returns an empty set with the membership rules given. +func NewSet(rules Rules) Set { + return Set{ + vals: map[int][]interface{}{}, + rules: rules, + } +} + +func NewSetFromSlice(rules Rules, vals []interface{}) Set { + s := NewSet(rules) + for _, v := range vals { + s.Add(v) + } + return s +} + +func sameRules(s1 Set, s2 Set) bool { + return s1.rules == s2.rules +} + +func mustHaveSameRules(s1 Set, s2 Set) { + if !sameRules(s1, s2) { + panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules)) + } +} + +// HasRules returns true if and only if the receiving set has the given rules +// instance as its rules. +func (s Set) HasRules(rules Rules) bool { + return s.rules == rules +} + +// Rules returns the receiving set's rules instance. +func (s Set) Rules() Rules { + return s.rules +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_helper.go b/vendor/github.com/zclconf/go-cty/cty/set_helper.go new file mode 100644 index 0000000..a88ddaf --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_helper.go @@ -0,0 +1,126 @@ +package cty + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/set" +) + +// ValueSet is to cty.Set what []cty.Value is to cty.List and +// map[string]cty.Value is to cty.Map. It's provided to allow callers a +// convenient interface for manipulating sets before wrapping them in cty.Set +// values using cty.SetValFromValueSet. +// +// Unlike value slices and value maps, ValueSet instances have a single +// homogenous element type because that is a requirement of the underlying +// set implementation, which uses the element type to select a suitable +// hashing function. +// +// Set mutations are not concurrency-safe. +type ValueSet struct { + // ValueSet is just a thin wrapper around a set.Set with our value-oriented + // "rules" applied. We do this so that the caller can work in terms of + // cty.Value objects even though the set internals use the raw values. + s set.Set +} + +// NewValueSet creates and returns a new ValueSet with the given element type. +func NewValueSet(ety Type) ValueSet { + return newValueSet(set.NewSet(setRules{Type: ety})) +} + +func newValueSet(s set.Set) ValueSet { + return ValueSet{ + s: s, + } +} + +// ElementType returns the element type for the receiving ValueSet. +func (s ValueSet) ElementType() Type { + return s.s.Rules().(setRules).Type +} + +// Add inserts the given value into the receiving set. +func (s ValueSet) Add(v Value) { + s.requireElementType(v) + s.s.Add(v.v) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s ValueSet) Remove(v Value) { + s.requireElementType(v) + s.s.Remove(v.v) +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s ValueSet) Has(v Value) bool { + s.requireElementType(v) + return s.s.Has(v.v) +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s ValueSet) Copy() ValueSet { + return newValueSet(s.s.Copy()) +} + +// Length returns the number of values in the set. +func (s ValueSet) Length() int { + return s.s.Length() +} + +// Values returns a slice of all of the values in the set in no particular +// order. +func (s ValueSet) Values() []Value { + l := s.s.Length() + if l == 0 { + return nil + } + ret := make([]Value, 0, l) + ety := s.ElementType() + for it := s.s.Iterator(); it.Next(); { + ret = append(ret, Value{ + ty: ety, + v: it.Value(), + }) + } + return ret +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same element type, +// or else this function will panic. +func (s ValueSet) Union(other ValueSet) ValueSet { + return newValueSet(s.s.Union(other.s)) +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Intersection(other ValueSet) ValueSet { + return newValueSet(s.s.Intersection(other.s)) +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Subtract(other ValueSet) ValueSet { + return newValueSet(s.s.Subtract(other.s)) +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same element type, or else this function +// will panic. +func (s ValueSet) SymmetricDifference(other ValueSet) ValueSet { + return newValueSet(s.s.SymmetricDifference(other.s)) +} + +// requireElementType panics if the given value is not of the set's element type. +func (s ValueSet) requireElementType(v Value) { + if !v.Type().Equals(s.ElementType()) { + panic(fmt.Errorf("attempt to use %#v value with set of %#v", v.Type(), s.ElementType())) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_internals.go b/vendor/github.com/zclconf/go-cty/cty/set_internals.go new file mode 100644 index 0000000..ce738db --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_internals.go @@ -0,0 +1,146 @@ +package cty + +import ( + "bytes" + "fmt" + "hash/crc32" + "math/big" + "sort" +) + +// setRules provides a Rules implementation for the ./set package that +// respects the equality rules for cty values of the given type. +// +// This implementation expects that values added to the set will be +// valid internal values for the given Type, which is to say that wrapping +// the given value in a Value struct along with the ruleset's type should +// produce a valid, working Value. +type setRules struct { + Type Type +} + +func (r setRules) Hash(v interface{}) int { + hashBytes := makeSetHashBytes(Value{ + ty: r.Type, + v: v, + }) + return int(crc32.ChecksumIEEE(hashBytes)) +} + +func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + eqv := v1v.Equals(v2v) + + // By comparing the result to true we ensure that an Unknown result, + // which will result if either value is unknown, will be considered + // as non-equivalent. Two unknown values are not equivalent for the + // sake of set membership. + return eqv.v == true +} + +func makeSetHashBytes(val Value) []byte { + var buf bytes.Buffer + appendSetHashBytes(val, &buf) + return buf.Bytes() +} + +func appendSetHashBytes(val Value, buf *bytes.Buffer) { + // Exactly what bytes we generate here don't matter as long as the following + // constraints hold: + // - Unknown and null values all generate distinct strings from + // each other and from any normal value of the given type. + // - The delimiter used to separate items in a compound structure can + // never appear literally in any of its elements. + // Since we don't support hetrogenous lists we don't need to worry about + // collisions between values of different types, apart from + // PseudoTypeDynamic. + // If in practice we *do* get a collision then it's not a big deal because + // the Equivalent function will still distinguish values, but set + // performance will be best if we are able to produce a distinct string + // for each distinct value, unknown values notwithstanding. + if !val.IsKnown() { + buf.WriteRune('?') + return + } + if val.IsNull() { + buf.WriteRune('~') + return + } + + switch val.ty { + case Number: + buf.WriteString(val.v.(*big.Float).String()) + return + case Bool: + if val.v.(bool) { + buf.WriteRune('T') + } else { + buf.WriteRune('F') + } + return + case String: + buf.WriteString(fmt.Sprintf("%q", val.v.(string))) + return + } + + if val.ty.IsMapType() { + buf.WriteRune('{') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(keyVal, buf) + buf.WriteRune(':') + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('}') + return + } + + if val.ty.IsListType() || val.ty.IsSetType() { + buf.WriteRune('[') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune(']') + return + } + + if val.ty.IsObjectType() { + buf.WriteRune('<') + attrNames := make([]string, 0, len(val.ty.AttributeTypes())) + for attrName := range val.ty.AttributeTypes() { + attrNames = append(attrNames, attrName) + } + sort.Strings(attrNames) + for _, attrName := range attrNames { + appendSetHashBytes(val.GetAttr(attrName), buf) + buf.WriteRune(';') + } + buf.WriteRune('>') + return + } + + if val.ty.IsTupleType() { + buf.WriteRune('<') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('>') + return + } + + // should never get down here + panic("unsupported type in set hash") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_type.go b/vendor/github.com/zclconf/go-cty/cty/set_type.go new file mode 100644 index 0000000..952a2d2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_type.go @@ -0,0 +1,66 @@ +package cty + +import ( + "fmt" +) + +type typeSet struct { + typeImplSigil + ElementTypeT Type +} + +// Set creates a set type with the given element Type. +// +// Set types are CollectionType implementations. +func Set(elem Type) Type { + return Type{ + typeSet{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a set whose element type is +// equal to that of the receiver. +func (t typeSet) Equals(other Type) bool { + ot, isSet := other.typeImpl.(typeSet) + if !isSet { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeSet) FriendlyName() string { + return "set of " + t.ElementTypeT.FriendlyName() +} + +func (t typeSet) ElementType() Type { + return t.ElementTypeT +} + +func (t typeSet) GoString() string { + return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT) +} + +// IsSetType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsSetType() bool { + _, ok := t.typeImpl.(typeSet) + return ok +} + +// SetElementType is a convenience method that checks if the given type is +// a set type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.SetElementType(); et != nil { +// // Do something with *et +// } +func (t Type) SetElementType() *Type { + if lt, ok := t.typeImpl.(typeSet); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/tuple_type.go b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go new file mode 100644 index 0000000..b98349e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go @@ -0,0 +1,121 @@ +package cty + +import ( + "fmt" +) + +type typeTuple struct { + typeImplSigil + ElemTypes []Type +} + +// Tuple creates a tuple type with the given element types. +// +// After a slice is passed to this function the caller must no longer access +// the underlying array, since ownership is transferred to this library. +func Tuple(elemTypes []Type) Type { + return Type{ + typeTuple{ + ElemTypes: elemTypes, + }, + } +} + +func (t typeTuple) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeTuple); ok { + if len(t.ElemTypes) != len(ot.ElemTypes) { + // Fast path: if we don't have the same number of elements + // then we can't possibly be equal. + return false + } + + for i, ty := range t.ElemTypes { + oty := ot.ElemTypes[i] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeTuple) FriendlyName() string { + // There isn't really a friendly way to write a tuple type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // a tuple type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "tuple" +} + +func (t typeTuple) GoString() string { + if len(t.ElemTypes) == 0 { + return "cty.EmptyTuple" + } + return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes) +} + +// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about +// the empty tuple type. +var EmptyTuple Type + +// EmptyTupleVal is the only possible non-null, non-unknown value of type +// EmptyTuple. +var EmptyTupleVal Value + +func init() { + EmptyTuple = Tuple([]Type{}) + EmptyTupleVal = Value{ + ty: EmptyTuple, + v: []interface{}{}, + } +} + +// IsTupleType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsTupleType() bool { + _, ok := t.typeImpl.(typeTuple) + return ok +} + +// Length returns the number of elements of the receiving tuple type. +// Will panic if the reciever isn't a tuple type; use IsTupleType to determine +// whether this operation will succeed. +func (t Type) Length() int { + if ot, ok := t.typeImpl.(typeTuple); ok { + return len(ot.ElemTypes) + } + panic("Length on non-tuple Type") +} + +// TupleElementType returns the type of the element with the given index. Will +// panic if the receiver is not a tuple type (use IsTupleType to confirm) +// or if the index is out of range (use Length to confirm). +func (t Type) TupleElementType(idx int) Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes[idx] + } + panic("TupleElementType on non-tuple Type") +} + +// TupleElementTypes returns a slice of the recieving tuple type's element +// types. Will panic if the receiver is not a tuple type (use IsTupleType +// to confirm). +// +// The returned slice is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the +// underlying array. For many purposes the element-related methods of Value +// are more appropriate and more convenient to use. +func (t Type) TupleElementTypes() []Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes + } + panic("TupleElementTypes on non-tuple Type") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/type.go b/vendor/github.com/zclconf/go-cty/cty/type.go new file mode 100644 index 0000000..ae5f1c8 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/type.go @@ -0,0 +1,95 @@ +package cty + +// Type represents value types within the type system. +// +// This is a closed interface type, meaning that only the concrete +// implementations provided within this package are considered valid. +type Type struct { + typeImpl +} + +type typeImpl interface { + // isTypeImpl is a do-nothing method that exists only to express + // that a type is an implementation of typeImpl. + isTypeImpl() typeImplSigil + + // Equals returns true if the other given Type exactly equals the + // receiver Type. + Equals(other Type) bool + + // FriendlyName returns a human-friendly *English* name for the given + // type. + FriendlyName() string + + // GoString implements the GoStringer interface from package fmt. + GoString() string +} + +// Base implementation of Type to embed into concrete implementations +// to signal that they are implementations of Type. +type typeImplSigil struct{} + +func (t typeImplSigil) isTypeImpl() typeImplSigil { + return typeImplSigil{} +} + +// Equals returns true if the other given Type exactly equals the receiver +// type. +func (t Type) Equals(other Type) bool { + return t.typeImpl.Equals(other) +} + +// FriendlyName returns a human-friendly *English* name for the given type. +func (t Type) FriendlyName() string { + return t.typeImpl.FriendlyName() +} + +// GoString returns a string approximating how the receiver type would be +// expressed in Go source code. +func (t Type) GoString() string { + if t.typeImpl == nil { + return "cty.NilType" + } + + return t.typeImpl.GoString() +} + +// NilType is an invalid type used when a function is returning an error +// and has no useful type to return. It should not be used and any methods +// called on it will panic. +var NilType = Type{} + +// HasDynamicTypes returns true either if the receiver is itself +// DynamicPseudoType or if it is a compound type whose descendent elements +// are DynamicPseudoType. +func (t Type) HasDynamicTypes() bool { + switch { + case t == DynamicPseudoType: + return true + case t.IsPrimitiveType(): + return false + case t.IsCollectionType(): + return false + case t.IsObjectType(): + attrTypes := t.AttributeTypes() + for _, at := range attrTypes { + if at.HasDynamicTypes() { + return true + } + } + return false + case t.IsTupleType(): + elemTypes := t.TupleElementTypes() + for _, et := range elemTypes { + if et.HasDynamicTypes() { + return true + } + } + return false + case t.IsCapsuleType(): + return false + default: + // Should never happen, since above should be exhaustive + panic("HasDynamicTypes does not support the given type") + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/type_conform.go b/vendor/github.com/zclconf/go-cty/cty/type_conform.go new file mode 100644 index 0000000..b417dc7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/type_conform.go @@ -0,0 +1,142 @@ +package cty + +// TestConformance recursively walks the receiver and the given other type and +// returns nil if the receiver *conforms* to the given type. +// +// Type conformance is similar to type equality but has one crucial difference: +// PseudoTypeDynamic can be used within the given type to represent that +// *any* type is allowed. +// +// If any non-conformities are found, the returned slice will be non-nil and +// contain at least one error value. It will be nil if the type is entirely +// conformant. +// +// Note that the special behavior of PseudoTypeDynamic is the *only* exception +// to normal type equality. Calling applications may wish to apply their own +// automatic conversion logic to the given data structure to create a more +// liberal notion of conformance to a type. +// +// Returned errors are usually (but not always) PathError instances that +// indicate where in the structure the error was found. If a returned error +// is of that type then the error message is written for (English-speaking) +// end-users working within the cty type system, not mentioning any Go-oriented +// implementation details. +func (t Type) TestConformance(other Type) []error { + path := make(Path, 0) + var errs []error + testConformance(t, other, path, &errs) + return errs +} + +func testConformance(given Type, want Type, path Path, errs *[]error) { + if want.Equals(DynamicPseudoType) { + // anything goes! + return + } + + if given.Equals(want) { + // Any equal types are always conformant + return + } + + // The remainder of this function is concerned with detecting + // and reporting the specific non-conformance, since we wouldn't + // have got here if the types were not divergent. + // We treat compound structures as special so that we can report + // specifically what is non-conforming, rather than simply returning + // the entire type names and letting the user puzzle it out. + + if given.IsObjectType() && want.IsObjectType() { + givenAttrs := given.AttributeTypes() + wantAttrs := want.AttributeTypes() + + if len(givenAttrs) != len(wantAttrs) { + // Something is missing from one of them. + for k := range givenAttrs { + if _, exists := wantAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "unsupported attribute %q", k), + ) + } + } + for k := range wantAttrs { + if _, exists := givenAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "missing required attribute %q", k), + ) + } + } + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for k, wantAttrType := range wantAttrs { + if givenAttrType, exists := givenAttrs[k]; exists { + path[pathIdx] = GetAttrStep{Name: k} + testConformance(givenAttrType, wantAttrType, path, errs) + } + } + + path = path[0:pathIdx] + + return + } + + if given.IsTupleType() && want.IsTupleType() { + givenElems := given.TupleElementTypes() + wantElems := want.TupleElementTypes() + + if len(givenElems) != len(wantElems) { + *errs = append( + *errs, + errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)), + ) + return + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for i, wantElemType := range wantElems { + givenElemType := givenElems[i] + path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))} + testConformance(givenElemType, wantElemType, path, errs) + } + + path = path[0:pathIdx] + + return + } + + if given.IsListType() && want.IsListType() { + path = append(path, IndexStep{Key: UnknownVal(Number)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsMapType() && want.IsMapType() { + path = append(path, IndexStep{Key: UnknownVal(String)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsSetType() && want.IsSetType() { + path = append(path, IndexStep{Key: UnknownVal(given.ElementType())}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + *errs = append( + *errs, + errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()), + ) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/types_to_register.go b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go new file mode 100644 index 0000000..e1e220a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go @@ -0,0 +1,57 @@ +package cty + +import ( + "encoding/gob" + "fmt" + "math/big" + "strings" + + "github.com/zclconf/go-cty/cty/set" +) + +// InternalTypesToRegister is a slice of values that covers all of the +// internal types used in the representation of cty.Type and cty.Value +// across all cty Types. +// +// This is intended to be used to register these types with encoding +// packages that require registration of types used in interfaces, such as +// encoding/gob, thus allowing cty types and values to be included in streams +// created from those packages. However, registering with gob is not necessary +// since that is done automatically as a side-effect of importing this package. +// +// Callers should not do anything with the values here except pass them on +// verbatim to a registration function. +// +// If the calling application uses Capsule types that wrap local structs either +// directly or indirectly, these structs may also need to be registered in +// order to support encoding and decoding of values of these types. That is the +// responsibility of the calling application. +var InternalTypesToRegister []interface{} + +func init() { + InternalTypesToRegister = []interface{}{ + primitiveType{}, + typeList{}, + typeMap{}, + typeObject{}, + typeSet{}, + setRules{}, + set.Set{}, + typeTuple{}, + big.Float{}, + capsuleType{}, + []interface{}(nil), + map[string]interface{}(nil), + } + + // Register these with gob here, rather than in gob.go, to ensure + // that this will always happen after we build the above. + for _, tv := range InternalTypesToRegister { + typeName := fmt.Sprintf("%T", tv) + if strings.HasPrefix(typeName, "cty.") { + gob.RegisterName(fmt.Sprintf("github.com/zclconf/go-cty/%s", typeName), tv) + } else { + gob.Register(tv) + } + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown.go b/vendor/github.com/zclconf/go-cty/cty/unknown.go new file mode 100644 index 0000000..9f6fce9 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/unknown.go @@ -0,0 +1,79 @@ +package cty + +// unknownType is the placeholder type used for the sigil value representing +// "Unknown", to make it unambigiously distinct from any other possible value. +type unknownType struct { +} + +// Unknown is a special value that can be +var unknown interface{} = &unknownType{} + +// UnknownVal returns an Value that represents an unknown value of the given +// type. Unknown values can be used to represent a value that is +// not yet known. Its meaning is undefined in cty, but it could be used by +// an calling application to allow partial evaluation. +// +// Unknown values of any type can be created of any type. All operations on +// Unknown values themselves return Unknown. +func UnknownVal(t Type) Value { + return Value{ + ty: t, + v: unknown, + } +} + +func (t unknownType) GoString() string { + // This is the stringification of our internal unknown marker. The + // stringification of the public representation of unknowns is in + // Value.GoString. + return "cty.unknown" +} + +type pseudoTypeDynamic struct { + typeImplSigil +} + +// DynamicPseudoType represents the dynamic pseudo-type. +// +// This type can represent situations where a type is not yet known. Its +// meaning is undefined in cty, but it could be used by a calling +// application to allow expression type checking with some types not yet known. +// For example, the application might optimistically permit any operation on +// values of this type in type checking, allowing a partial type-check result, +// and then repeat the check when more information is known to get the +// final, concrete type. +// +// It is a pseudo-type because it is used only as a sigil to the calling +// application. "Unknown" is the only valid value of this pseudo-type, so +// operations on values of this type will always short-circuit as per +// the rules for that special value. +var DynamicPseudoType Type + +func (t pseudoTypeDynamic) Equals(other Type) bool { + _, ok := other.typeImpl.(pseudoTypeDynamic) + return ok +} + +func (t pseudoTypeDynamic) FriendlyName() string { + return "dynamic" +} + +func (t pseudoTypeDynamic) GoString() string { + return "cty.DynamicPseudoType" +} + +// DynamicVal is the only valid value of the pseudo-type dynamic. +// This value can be used as a placeholder where a value or expression's +// type and value are both unknown, thus allowing partial evaluation. See +// the docs for DynamicPseudoType for more information. +var DynamicVal Value + +func init() { + DynamicPseudoType = Type{ + pseudoTypeDynamic{}, + } + DynamicVal = Value{ + ty: DynamicPseudoType, + v: unknown, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value.go b/vendor/github.com/zclconf/go-cty/cty/value.go new file mode 100644 index 0000000..80cb8f7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value.go @@ -0,0 +1,98 @@ +package cty + +// Value represents a value of a particular type, and is the interface by +// which operations are executed on typed values. +// +// Value has two different classes of method. Operation methods stay entirely +// within the type system (methods accept and return Value instances) and +// are intended for use in implementing a language in terms of cty, while +// integration methods either enter or leave the type system, working with +// native Go values. Operation methods are guaranteed to support all of the +// expected short-circuit behavior for unknown and dynamic values, while +// integration methods may not. +// +// The philosophy for the operations API is that it's the caller's +// responsibility to ensure that the given types and values satisfy the +// specified invariants during a separate type check, so that the caller is +// able to return errors to its user from the application's own perspective. +// +// Consequently the design of these methods assumes such checks have already +// been done and panics if any invariants turn out not to be satisfied. These +// panic errors are not intended to be handled, but rather indicate a bug in +// the calling application that should be fixed with more checks prior to +// executing operations. +// +// A related consequence of this philosophy is that no automatic type +// conversions are done. If a method specifies that its argument must be +// number then it's the caller's responsibility to do that conversion before +// the call, thus allowing the application to have more constrained conversion +// rules than are offered by the built-in converter where necessary. +type Value struct { + ty Type + v interface{} +} + +// Type returns the type of the value. +func (val Value) Type() Type { + return val.ty +} + +// IsKnown returns true if the value is known. That is, if it is not +// the result of the unknown value constructor Unknown(...), and is not +// the result of an operation on another unknown value. +// +// Unknown values are only produced either directly or as a result of +// operating on other unknown values, and so an application that never +// introduces Unknown values can be guaranteed to never receive any either. +func (val Value) IsKnown() bool { + return val.v != unknown +} + +// IsNull returns true if the value is null. Values of any type can be +// null, but any operations on a null value will panic. No operation ever +// produces null, so an application that never introduces Null values can +// be guaranteed to never receive any either. +func (val Value) IsNull() bool { + return val.v == nil +} + +// NilVal is an invalid Value that can be used as a placeholder when returning +// with an error from a function that returns (Value, error). +// +// NilVal is *not* a valid error and so no operations may be performed on it. +// Any attempt to use it will result in a panic. +// +// This should not be confused with the idea of a Null value, as returned by +// NullVal. NilVal is a nil within the *Go* type system, and is invalid in +// the cty type system. Null values *do* exist in the cty type system. +var NilVal = Value{ + ty: Type{typeImpl: nil}, + v: nil, +} + +// IsWhollyKnown is an extension of IsKnown that also recursively checks +// inside collections and structures to see if there are any nested unknown +// values. +func (val Value) IsWhollyKnown() bool { + if !val.IsKnown() { + return false + } + + if val.IsNull() { + // Can't recurse into a null, so we're done + return true + } + + switch { + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsWhollyKnown() { + return false + } + } + return true + default: + return true + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value_init.go b/vendor/github.com/zclconf/go-cty/cty/value_init.go new file mode 100644 index 0000000..495a83e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_init.go @@ -0,0 +1,276 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "golang.org/x/text/unicode/norm" + + "github.com/zclconf/go-cty/cty/set" +) + +// BoolVal returns a Value of type Number whose internal value is the given +// bool. +func BoolVal(v bool) Value { + return Value{ + ty: Bool, + v: v, + } +} + +// NumberVal returns a Value of type Number whose internal value is the given +// big.Float. The returned value becomes the owner of the big.Float object, +// and so it's forbidden for the caller to mutate the object after it's +// wrapped in this way. +func NumberVal(v *big.Float) Value { + return Value{ + ty: Number, + v: v, + } +} + +// NumberIntVal returns a Value of type Number whose internal value is equal +// to the given integer. +func NumberIntVal(v int64) Value { + return NumberVal(new(big.Float).SetInt64(v)) +} + +// NumberUIntVal returns a Value of type Number whose internal value is equal +// to the given unsigned integer. +func NumberUIntVal(v uint64) Value { + return NumberVal(new(big.Float).SetUint64(v)) +} + +// NumberFloatVal returns a Value of type Number whose internal value is +// equal to the given float. +func NumberFloatVal(v float64) Value { + return NumberVal(new(big.Float).SetFloat64(v)) +} + +// StringVal returns a Value of type String whose internal value is the +// given string. +// +// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and +// they are NFC-normalized on entry into the world of cty values. +// +// If the given string is not valid UTF-8 then behavior of string operations +// is undefined. +func StringVal(v string) Value { + return Value{ + ty: String, + v: NormalizeString(v), + } +} + +// NormalizeString applies the same normalization that cty applies when +// constructing string values. +// +// A return value from this function can be meaningfully compared byte-for-byte +// with a Value.AsString result. +func NormalizeString(s string) string { + return norm.NFC.String(s) +} + +// ObjectVal returns a Value of an object type whose structure is defined +// by the key names and value types in the given map. +func ObjectVal(attrs map[string]Value) Value { + attrTypes := make(map[string]Type, len(attrs)) + attrVals := make(map[string]interface{}, len(attrs)) + + for attr, val := range attrs { + attr = NormalizeString(attr) + attrTypes[attr] = val.ty + attrVals[attr] = val.v + } + + return Value{ + ty: Object(attrTypes), + v: attrVals, + } +} + +// TupleVal returns a Value of a tuple type whose element types are +// defined by the value types in the given slice. +func TupleVal(elems []Value) Value { + elemTypes := make([]Type, len(elems)) + elemVals := make([]interface{}, len(elems)) + + for i, val := range elems { + elemTypes[i] = val.ty + elemVals[i] = val.v + } + + return Value{ + ty: Tuple(elemTypes), + v: elemVals, + } +} + +// ListVal returns a Value of list type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also ListValEmpty.) +func ListVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call ListVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent list element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + return Value{ + ty: List(elementType), + v: rawList, + } +} + +// ListValEmpty returns an empty list of the given element type. +func ListValEmpty(element Type) Value { + return Value{ + ty: List(element), + v: []interface{}{}, + } +} + +// MapVal returns a Value of a map type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given map is empty, since then the element type cannot be inferred. +// (See also MapValEmpty.) +func MapVal(vals map[string]Value) Value { + if len(vals) == 0 { + panic("must not call MapVal with empty map") + } + elementType := DynamicPseudoType + rawMap := make(map[string]interface{}, len(vals)) + + for key, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent map element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawMap[NormalizeString(key)] = val.v + } + + return Value{ + ty: Map(elementType), + v: rawMap, + } +} + +// MapValEmpty returns an empty map of the given element type. +func MapValEmpty(element Type) Value { + return Value{ + ty: Map(element), + v: map[string]interface{}{}, + } +} + +// SetVal returns a Value of set type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also SetValEmpty.) +func SetVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call SetVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent set element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + rawVal := set.NewSetFromSlice(setRules{elementType}, rawList) + + return Value{ + ty: Set(elementType), + v: rawVal, + } +} + +// SetValFromValueSet returns a Value of set type based on an already-constructed +// ValueSet. +// +// The element type of the returned value is the element type of the given +// set. +func SetValFromValueSet(s ValueSet) Value { + ety := s.ElementType() + rawVal := s.s.Copy() // copy so caller can't mutate what we wrap + + return Value{ + ty: Set(ety), + v: rawVal, + } +} + +// SetValEmpty returns an empty set of the given element type. +func SetValEmpty(element Type) Value { + return Value{ + ty: Set(element), + v: set.NewSet(setRules{element}), + } +} + +// CapsuleVal creates a value of the given capsule type using the given +// wrapVal, which must be a pointer to a value of the capsule type's native +// type. +// +// This function will panic if the given type is not a capsule type, if +// the given wrapVal is not compatible with the given capsule type, or if +// wrapVal is not a pointer. +func CapsuleVal(ty Type, wrapVal interface{}) Value { + if !ty.IsCapsuleType() { + panic("not a capsule type") + } + + wv := reflect.ValueOf(wrapVal) + if wv.Kind() != reflect.Ptr { + panic("wrapVal is not a pointer") + } + + it := ty.typeImpl.(*capsuleType).GoType + if !wv.Type().Elem().AssignableTo(it) { + panic("wrapVal target is not compatible with the given capsule type") + } + + return Value{ + ty: ty, + v: wrapVal, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go new file mode 100644 index 0000000..967aa76 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -0,0 +1,1071 @@ +package cty + +import ( + "fmt" + "math/big" + + "reflect" + + "github.com/zclconf/go-cty/cty/set" +) + +func (val Value) GoString() string { + if val == NilVal { + return "cty.NilVal" + } + + if val.ty == DynamicPseudoType { + return "cty.DynamicVal" + } + + if !val.IsKnown() { + return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) + } + if val.IsNull() { + return fmt.Sprintf("cty.NullVal(%#v)", val.ty) + } + + // By the time we reach here we've dealt with all of the exceptions around + // unknowns and nulls, so we're guaranteed that the values are the + // canonical internal representation of the given type. + + switch val.ty { + case Bool: + if val.v.(bool) { + return "cty.True" + } else { + return "cty.False" + } + case Number: + fv := val.v.(*big.Float) + // We'll try to use NumberIntVal or NumberFloatVal if we can, since + // the fully-general initializer call is pretty ugly-looking. + if fv.IsInt() { + return fmt.Sprintf("cty.NumberIntVal(%#v)", fv) + } + if rfv, accuracy := fv.Float64(); accuracy == big.Exact { + return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv) + } + return fmt.Sprintf("cty.NumberVal(new(big.Float).Parse(\"%#v\", 10))", fv) + case String: + return fmt.Sprintf("cty.StringVal(%#v)", val.v) + } + + switch { + case val.ty.IsSetType(): + vals := val.v.(set.Set).Values() + if vals == nil || len(vals) == 0 { + return fmt.Sprintf("cty.SetValEmpty()") + } else { + return fmt.Sprintf("cty.SetVal(%#v)", vals) + } + case val.ty.IsCapsuleType(): + return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v) + } + + // Default exposes implementation details, so should actually cover + // all of the cases above for good caller UX. + return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v) +} + +// Equals returns True if the receiver and the given other value have the +// same type and are exactly equal in value. +// +// The usual short-circuit rules apply, so the result can be unknown or typed +// as dynamic if either of the given values are. Use RawEquals to compare +// if two values are equal *ignoring* the short-circuit rules. +func (val Value) Equals(other Value) Value { + if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { + return UnknownVal(Bool) + } + + if !val.ty.Equals(other.ty) { + return BoolVal(false) + } + + if !(val.IsKnown() && other.IsKnown()) { + return UnknownVal(Bool) + } + + if val.IsNull() || other.IsNull() { + if val.IsNull() && other.IsNull() { + return BoolVal(true) + } + return BoolVal(false) + } + + ty := val.ty + result := false + + switch { + case ty == Number: + result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + case ty == Bool: + result = val.v.(bool) == other.v.(bool) + case ty == String: + // Simple equality is safe because we NFC-normalize strings as they + // enter our world from StringVal, and so we can assume strings are + // always in normal form. + result = val.v.(string) == other.v.(string) + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + result = true + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + result = true + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + result = true + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + equal := true + + // Note that by our definition of sets it's never possible for two + // sets that contain unknown values (directly or indicrectly) to + // ever be equal, even if they are otherwise identical. + + // FIXME: iterating both lists and checking each item is not the + // ideal implementation here, but it works with the primitives we + // have in the set implementation. Perhaps the set implementation + // can provide its own equality test later. + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + equal = false + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + equal = false + } + }) + + result = equal + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + result = true + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + result = false + break + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return BoolVal(val.v == other.v) + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in Equals", ty)) + } + + return BoolVal(result) +} + +// NotEqual is a shorthand for Equals followed by Not. +func (val Value) NotEqual(other Value) Value { + return val.Equals(other).Not() +} + +// True returns true if the receiver is True, false if False, and panics if +// the receiver is not of type Bool. +// +// This is a helper function to help write application logic that works with +// values, rather than a first-class operation. It does not work with unknown +// or null values. For more robust handling with unknown value +// short-circuiting, use val.Equals(cty.True). +func (val Value) True() bool { + if val.ty != Bool { + panic("not bool") + } + return val.Equals(True).v.(bool) +} + +// False is the opposite of True. +func (val Value) False() bool { + return !val.True() +} + +// RawEquals returns true if and only if the two given values have the same +// type and equal value, ignoring the usual short-circuit rules about +// unknowns and dynamic types. +// +// This method is more appropriate for testing than for real use, since it +// skips over usual semantics around unknowns but as a consequence allows +// testing the result of another operation that is expected to return unknown. +// It returns a primitive Go bool rather than a Value to remind us that it +// is not a first-class value operation. +func (val Value) RawEquals(other Value) bool { + if !val.ty.Equals(other.ty) { + return false + } + if (!val.IsKnown()) && (!other.IsKnown()) { + return true + } + if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { + return false + } + if val.IsNull() && other.IsNull() { + return true + } + if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) { + return false + } + if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType { + return true + } + + ty := val.ty + switch { + case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType: + return val.Equals(other).True() + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + + // Since we're intentionally ignoring our rule that two unknowns + // are never equal, we can cheat here. + // (This isn't 100% right since e.g. it will fail if the set contains + // numbers that are infinite, which DeepEqual can't compare properly. + // We're accepting that limitation for simplicity here, since this + // function is here primarily for testing.) + return reflect.DeepEqual(s1, s2) + + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + return false + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return val.v == other.v + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty)) + } +} + +// Add returns the sum of the receiver and the given other value. Both values +// must be numbers; this method will panic if not. +func (val Value) Add(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Add(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Subtract returns receiver minus the given other value. Both values must be +// numbers; this method will panic if not. +func (val Value) Subtract(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + return val.Add(other.Negate()) +} + +// Negate returns the numeric negative of the receiver, which must be a number. +// This method will panic when given a value of any other type. +func (val Value) Negate() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float).Neg(val.v.(*big.Float)) + return NumberVal(ret) +} + +// Multiply returns the product of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +func (val Value) Multiply(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Mul(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Divide returns the quotient of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// If both values are zero or infinity, this function will panic with +// an instance of big.ErrNaN. +func (val Value) Divide(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Quo(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Modulo returns the remainder of an integer division of the receiver and +// the given other value. Both values must be numbers; this method will panic +// if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// This operation is primarily here for use with nonzero natural numbers. +// Modulo with "other" as a non-natural number gets somewhat philosophical, +// and this function takes a position on what that should mean, but callers +// may wish to disallow such things outright or implement their own modulo +// if they disagree with the interpretation used here. +func (val Value) Modulo(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + // We cheat a bit here with infinities, just abusing the Multiply operation + // to get an infinite result of the correct sign. + if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity { + return val.Multiply(other) + } + + if other.RawEquals(Zero) { + return val + } + + // FIXME: This is a bit clumsy. Should come back later and see if there's a + // more straightforward way to do this. + rat := val.Divide(other) + ratFloorInt := &big.Int{} + rat.v.(*big.Float).Int(ratFloorInt) + work := (&big.Float{}).SetInt(ratFloorInt) + work.Mul(other.v.(*big.Float), work) + work.Sub(val.v.(*big.Float), work) + + return NumberVal(work) +} + +// Absolute returns the absolute (signless) value of the receiver, which must +// be a number or this method will panic. +func (val Value) Absolute() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := (&big.Float{}).Abs(val.v.(*big.Float)) + return NumberVal(ret) +} + +// GetAttr returns the value of the given attribute of the receiver, which +// must be of an object type that has an attribute of the given name. +// This method will panic if the receiver type is not compatible. +// +// The method will also panic if the given attribute name is not defined +// for the value's type. Use the attribute-related methods on Type to +// check for the validity of an attribute before trying to use it. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be DynamicVal. +func (val Value) GetAttr(name string) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + if !val.ty.IsObjectType() { + panic("value is not an object") + } + + name = NormalizeString(name) + if !val.ty.HasAttribute(name) { + panic("value has no attribute of that name") + } + + attrType := val.ty.AttributeType(name) + + if !val.IsKnown() { + return UnknownVal(attrType) + } + + return Value{ + ty: attrType, + v: val.v.(map[string]interface{})[name], + } +} + +// Index returns the value of an element of the receiver, which must have +// either a list, map or tuple type. This method will panic if the receiver +// type is not compatible. +// +// The key value must be the correct type for the receving collection: a +// number if the collection is a list or tuple, or a string if it is a map. +// In the case of a list or tuple, the given number must be convertable to int +// or this method will panic. The key may alternatively be of +// DynamicPseudoType, in which case the result itself is an unknown of the +// collection's element type. +// +// The result is of the receiver collection's element type, or in the case +// of a tuple the type of the specific element index requested. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be the DynamicValue. +func (val Value) Index(key Value) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + switch { + case val.Type().IsListType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != Number { + panic("element key for list must be number") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + return Value{ + ty: elty, + v: val.v.([]interface{})[index], + } + case val.Type().IsMapType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != String { + panic("element key for map must be string") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + keyStr := key.v.(string) + + return Value{ + ty: elty, + v: val.v.(map[string]interface{})[keyStr], + } + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return DynamicVal + } + + if key.Type() != Number { + panic("element key for tuple must be number") + } + if !key.IsKnown() { + return DynamicVal + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + eltys := val.Type().TupleElementTypes() + + if !val.IsKnown() { + return UnknownVal(eltys[index]) + } + + return Value{ + ty: eltys[index], + v: val.v.([]interface{})[index], + } + default: + panic("not a list, map, or tuple type") + } +} + +// HasIndex returns True if the receiver (which must be supported for Index) +// has an element with the given index key, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the collection or the +// key value are unknown. +// +// This method will panic if the receiver is not indexable, but does not +// impose any panic-causing type constraints on the key. +func (val Value) HasIndex(key Value) Value { + if val.ty == DynamicPseudoType { + return UnknownVal(Bool) + } + + switch { + case val.Type().IsListType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) + case val.Type().IsMapType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != String { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + keyStr := key.v.(string) + _, exists := val.v.(map[string]interface{})[keyStr] + + return BoolVal(exists) + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + length := val.Type().Length() + return BoolVal(int(index) < length && index >= 0) + default: + panic("not a list, map, or tuple type") + } +} + +// HasElement returns True if the receiver (which must be of a set type) +// has the given value as an element, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the set or the +// given value are unknown. +// +// This method will panic if the receiver is not a set, or if it is a null set. +func (val Value) HasElement(elem Value) Value { + ty := val.Type() + + if !ty.IsSetType() { + panic("not a set type") + } + if !val.IsKnown() || !elem.IsKnown() { + return UnknownVal(Bool) + } + if val.IsNull() { + panic("can't call HasElement on a nil value") + } + if ty.ElementType() != elem.Type() { + return False + } + + s := val.v.(set.Set) + return BoolVal(s.Has(elem.v)) +} + +// Length returns the length of the receiver, which must be a collection type +// or tuple type, as a number value. If the receiver is not a compatible type +// then this method will panic. +// +// If the receiver is unknown then the result is also unknown. +// +// If the receiver is null then this function will panic. +// +// Note that Length is not supported for strings. To determine the length +// of a string, call AsString and take the length of the native Go string +// that is returned. +func (val Value) Length() Value { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return NumberIntVal(int64(val.Type().Length())) + } + + if !val.IsKnown() { + return UnknownVal(Number) + } + + return NumberIntVal(int64(val.LengthInt())) +} + +// LengthInt is like Length except it returns an int. It has the same behavior +// as Length except that it will panic if the receiver is unknown. +// +// This is an integration method provided for the convenience of code bridging +// into Go's type system. +func (val Value) LengthInt() int { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return val.Type().Length() + } + if !val.IsKnown() { + panic("value is not known") + } + if val.IsNull() { + panic("value is null") + } + + switch { + + case val.ty.IsListType(): + return len(val.v.([]interface{})) + + case val.ty.IsSetType(): + return val.v.(set.Set).Length() + + case val.ty.IsMapType(): + return len(val.v.(map[string]interface{})) + + default: + panic("value is not a collection") + } +} + +// ElementIterator returns an ElementIterator for iterating the elements +// of the receiver, which must be a collection type, a tuple type, or an object +// type. If called on a method of any other type, this method will panic. +// +// The value must be Known and non-Null, or this method will panic. +// +// If the receiver is of a list type, the returned keys will be of type Number +// and the values will be of the list's element type. +// +// If the receiver is of a map type, the returned keys will be of type String +// and the value will be of the map's element type. Elements are passed in +// ascending lexicographical order by key. +// +// If the receiver is of a set type, each element is returned as both the +// key and the value, since set members are their own identity. +// +// If the receiver is of a tuple type, the returned keys will be of type Number +// and the value will be of the corresponding element's type. +// +// If the receiver is of an object type, the returned keys will be of type +// String and the value will be of the corresponding attributes's type. +// +// ElementIterator is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ElementIterator() ElementIterator { + if !val.IsKnown() { + panic("can't use ElementIterator on unknown value") + } + if val.IsNull() { + panic("can't use ElementIterator on null value") + } + return elementIterator(val) +} + +// CanIterateElements returns true if the receiver can support the +// ElementIterator method (and by extension, ForEachElement) without panic. +func (val Value) CanIterateElements() bool { + return canElementIterator(val) +} + +// ForEachElement executes a given callback function for each element of +// the receiver, which must be a collection type or tuple type, or this method +// will panic. +// +// ForEachElement uses ElementIterator internally, and so the values passed +// to the callback are as described for ElementIterator. +// +// Returns true if the iteration exited early due to the callback function +// returning true, or false if the loop ran to completion. +// +// ForEachElement is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ForEachElement(cb ElementCallback) bool { + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + stop := cb(key, val) + if stop { + return true + } + } + return false +} + +// Not returns the logical inverse of the receiver, which must be of type +// Bool or this method will panic. +func (val Value) Not() Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(!val.v.(bool)) +} + +// And returns the result of logical AND with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) And(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) && other.v.(bool)) +} + +// Or returns the result of logical OR with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) Or(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) || other.v.(bool)) +} + +// LessThan returns True if the receiver is less than the other given value, +// which must both be numbers or this method will panic. +func (val Value) LessThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) +} + +// GreaterThan returns True if the receiver is greater than the other given +// value, which must both be numbers or this method will panic. +func (val Value) GreaterThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) +} + +// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or. +func (val Value) LessThanOrEqualTo(other Value) Value { + return val.LessThan(other).Or(val.Equals(other)) +} + +// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or. +func (val Value) GreaterThanOrEqualTo(other Value) Value { + return val.GreaterThan(other).Or(val.Equals(other)) +} + +// AsString returns the native string from a non-null, non-unknown cty.String +// value, or panics if called on any other value. +func (val Value) AsString() string { + if val.ty != String { + panic("not a string") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + return val.v.(string) +} + +// AsBigFloat returns a big.Float representation of a non-null, non-unknown +// cty.Number value, or panics if called on any other value. +// +// For more convenient conversions to other native numeric types, use the +// "gocty" package. +func (val Value) AsBigFloat() *big.Float { + if val.ty != Number { + panic("not a number") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + // Copy the float so that callers can't mutate our internal state + ret := *(val.v.(*big.Float)) + + return &ret +} + +// AsValueSlice returns a []cty.Value representation of a non-null, non-unknown +// value of any type that CanIterateElements, or panics if called on +// any other value. +// +// For more convenient conversions to slices of more specific types, use +// the "gocty" package. +func (val Value) AsValueSlice() []Value { + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret = append(ret, v) + } + return ret +} + +// AsValueMap returns a map[string]cty.Value representation of a non-null, +// non-unknown value of any type that CanIterateElements, or panics if called +// on any other value. +// +// For more convenient conversions to maps of more specific types, use +// the "gocty" package. +func (val Value) AsValueMap() map[string]Value { + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make(map[string]Value, l) + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + ret[k.AsString()] = v + } + return ret +} + +// AsValueSet returns a ValueSet representation of a non-null, +// non-unknown value of any collection type, or panics if called +// on any other value. +// +// Unlike AsValueSlice and AsValueMap, this method requires specifically a +// collection type (list, set or map) and does not allow structural types +// (tuple or object), because the ValueSet type requires homogenous +// element types. +// +// The returned ValueSet can store only values of the receiver's element type. +func (val Value) AsValueSet() ValueSet { + if !val.Type().IsCollectionType() { + panic("not a collection type") + } + + // We don't give the caller our own set.Set (assuming we're a cty.Set value) + // because then the caller could mutate our internals, which is forbidden. + // Instead, we will construct a new set and append our elements into it. + ret := NewValueSet(val.Type().ElementType()) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret.Add(v) + } + return ret +} + +// EncapsulatedValue returns the native value encapsulated in a non-null, +// non-unknown capsule-typed value, or panics if called on any other value. +// +// The result is the same pointer that was passed to CapsuleVal to create +// the value. Since cty considers values to be immutable, it is strongly +// recommended to treat the encapsulated value itself as immutable too. +func (val Value) EncapsulatedValue() interface{} { + if !val.Type().IsCapsuleType() { + panic("not a capsule-typed value") + } + + return val.v +} diff --git a/vendor/github.com/zclconf/go-cty/cty/walk.go b/vendor/github.com/zclconf/go-cty/cty/walk.go new file mode 100644 index 0000000..a6943ba --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/walk.go @@ -0,0 +1,182 @@ +package cty + +// Walk visits all of the values in a possibly-complex structure, calling +// a given function for each value. +// +// For example, given a list of strings the callback would first be called +// with the whole list and then called once for each element of the list. +// +// The callback function may prevent recursive visits to child values by +// returning false. The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Walk(val Value, cb func(Path, Value) (bool, error)) error { + var path Path + return walk(path, val, cb) +} + +func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { + deeper, err := cb(path, val) + if err != nil { + return err + } + if !deeper { + return nil + } + + if val.IsNull() || !val.IsKnown() { + // Can't recurse into null or unknown values, regardless of type + return nil + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + nameVal, av := it.Element() + path := append(path, GetAttrStep{ + Name: nameVal.AsString(), + }) + err := walk(path, av, cb) + if err != nil { + return err + } + } + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + err := walk(path, ev, cb) + if err != nil { + return err + } + } + } + return nil +} + +// Transform visits all of the values in a possibly-complex structure, +// calling a given function for each value which has an opportunity to +// replace that value. +// +// Unlike Walk, Transform visits child nodes first, so for a list of strings +// it would first visit the strings and then the _new_ list constructed +// from the transformed values of the list items. +// +// This is useful for creating the effect of being able to make deep mutations +// to a value even though values are immutable. However, it's the responsibility +// of the given function to preserve expected invariants, such as homogenity of +// element types in collections; this function can panic if such invariants +// are violated, just as if new values were constructed directly using the +// value constructor functions. An easy way to preserve invariants is to +// ensure that the transform function never changes the value type. +// +// The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) { + var path Path + return transform(path, val, cb) +} + +func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) { + ty := val.Type() + var newVal Value + + switch { + + case val.IsNull() || !val.IsKnown(): + // Can't recurse into null or unknown values, regardless of type + newVal = val + + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty sequence + newVal = val + default: + elems := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems = append(elems, newEv) + } + switch { + case ty.IsListType(): + newVal = ListVal(elems) + case ty.IsSetType(): + newVal = SetVal(elems) + case ty.IsTupleType(): + newVal = TupleVal(elems) + default: + panic("unknown sequence type") // should never happen because of the case we are in + } + } + + case ty.IsMapType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty map + newVal = val + default: + elems := make(map[string]Value) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems[kv.AsString()] = newEv + } + newVal = MapVal(elems) + } + + case ty.IsObjectType(): + switch { + case ty.Equals(EmptyObject): + // No deep transform for an empty object + newVal = val + default: + atys := ty.AttributeTypes() + newAVs := make(map[string]Value) + for name := range atys { + av := val.GetAttr(name) + path := append(path, GetAttrStep{ + Name: name, + }) + newAV, err := transform(path, av, cb) + if err != nil { + return DynamicVal, err + } + newAVs[name] = newAV + } + newVal = ObjectVal(newAVs) + } + + default: + newVal = val + } + + return cb(path, newVal) +} -- cgit v1.2.3