aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/google.golang.org
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/google.golang.org')
-rw-r--r--vendor/google.golang.org/api/AUTHORS10
-rw-r--r--vendor/google.golang.org/api/CONTRIBUTORS55
-rw-r--r--vendor/google.golang.org/api/LICENSE27
-rw-r--r--vendor/google.golang.org/api/gensupport/backoff.go51
-rw-r--r--vendor/google.golang.org/api/gensupport/buffer.go79
-rw-r--r--vendor/google.golang.org/api/gensupport/doc.go10
-rw-r--r--vendor/google.golang.org/api/gensupport/header.go22
-rw-r--r--vendor/google.golang.org/api/gensupport/json.go211
-rw-r--r--vendor/google.golang.org/api/gensupport/jsonfloat.go57
-rw-r--r--vendor/google.golang.org/api/gensupport/media.go342
-rw-r--r--vendor/google.golang.org/api/gensupport/params.go51
-rw-r--r--vendor/google.golang.org/api/gensupport/resumable.go216
-rw-r--r--vendor/google.golang.org/api/gensupport/retry.go84
-rw-r--r--vendor/google.golang.org/api/gensupport/send.go87
-rw-r--r--vendor/google.golang.org/api/googleapi/googleapi.go429
-rw-r--r--vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE18
-rw-r--r--vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go248
-rw-r--r--vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go17
-rw-r--r--vendor/google.golang.org/api/googleapi/transport/apikey.go38
-rw-r--r--vendor/google.golang.org/api/googleapi/types.go202
-rw-r--r--vendor/google.golang.org/api/internal/creds.go45
-rw-r--r--vendor/google.golang.org/api/internal/pool.go61
-rw-r--r--vendor/google.golang.org/api/internal/service-account.json12
-rw-r--r--vendor/google.golang.org/api/internal/settings.go81
-rw-r--r--vendor/google.golang.org/api/iterator/iterator.go231
-rw-r--r--vendor/google.golang.org/api/option/credentials_go19.go33
-rw-r--r--vendor/google.golang.org/api/option/credentials_notgo19.go32
-rw-r--r--vendor/google.golang.org/api/option/option.go191
-rw-r--r--vendor/google.golang.org/api/storage/v1/storage-api.json3818
-rw-r--r--vendor/google.golang.org/api/storage/v1/storage-gen.go11472
-rw-r--r--vendor/google.golang.org/api/transport/http/dial.go147
-rw-r--r--vendor/google.golang.org/api/transport/http/dial_appengine.go (renamed from vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto)32
-rw-r--r--vendor/google.golang.org/api/transport/http/internal/propagation/http.go96
-rw-r--r--vendor/google.golang.org/appengine/.travis.yml20
-rw-r--r--vendor/google.golang.org/appengine/CONTRIBUTING.md90
-rw-r--r--vendor/google.golang.org/appengine/LICENSE202
-rw-r--r--vendor/google.golang.org/appengine/README.md73
-rw-r--r--vendor/google.golang.org/appengine/appengine.go137
-rw-r--r--vendor/google.golang.org/appengine/appengine_vm.go20
-rw-r--r--vendor/google.golang.org/appengine/datastore/datastore.go407
-rw-r--r--vendor/google.golang.org/appengine/datastore/doc.go361
-rw-r--r--vendor/google.golang.org/appengine/datastore/key.go396
-rw-r--r--vendor/google.golang.org/appengine/datastore/load.go429
-rw-r--r--vendor/google.golang.org/appengine/datastore/metadata.go78
-rw-r--r--vendor/google.golang.org/appengine/datastore/prop.go330
-rw-r--r--vendor/google.golang.org/appengine/datastore/query.go757
-rw-r--r--vendor/google.golang.org/appengine/datastore/save.go333
-rw-r--r--vendor/google.golang.org/appengine/datastore/transaction.go96
-rw-r--r--vendor/google.golang.org/appengine/errors.go46
-rw-r--r--vendor/google.golang.org/appengine/go.mod7
-rw-r--r--vendor/google.golang.org/appengine/go.sum6
-rw-r--r--vendor/google.golang.org/appengine/identity.go142
-rw-r--r--vendor/google.golang.org/appengine/internal/api.go671
-rw-r--r--vendor/google.golang.org/appengine/internal/api_classic.go169
-rw-r--r--vendor/google.golang.org/appengine/internal/api_common.go123
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id.go28
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go611
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.pb.go308
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.proto33
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go4367
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto551
-rw-r--r--vendor/google.golang.org/appengine/internal/identity.go55
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_classic.go61
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_flex.go11
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_vm.go134
-rw-r--r--vendor/google.golang.org/appengine/internal/internal.go110
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.pb.go1313
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.proto150
-rw-r--r--vendor/google.golang.org/appengine/internal/main.go16
-rw-r--r--vendor/google.golang.org/appengine/internal/main_common.go7
-rw-r--r--vendor/google.golang.org/appengine/internal/main_vm.go69
-rw-r--r--vendor/google.golang.org/appengine/internal/metadata.go60
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go786
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.proto80
-rw-r--r--vendor/google.golang.org/appengine/internal/net.go56
-rw-r--r--vendor/google.golang.org/appengine/internal/regen.sh40
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go361
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto44
-rw-r--r--vendor/google.golang.org/appengine/internal/transaction.go115
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go527
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/namespace.go25
-rw-r--r--vendor/google.golang.org/appengine/timeout.go20
-rw-r--r--vendor/google.golang.org/appengine/travis_install.sh18
-rw-r--r--vendor/google.golang.org/appengine/travis_test.sh12
-rw-r--r--vendor/google.golang.org/appengine/urlfetch/urlfetch.go210
-rw-r--r--vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go54
-rw-r--r--vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go688
-rw-r--r--vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go411
-rw-r--r--vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go366
-rw-r--r--vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go246
-rw-r--r--vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go55
-rw-r--r--vendor/google.golang.org/grpc/.travis.yml42
-rw-r--r--vendor/google.golang.org/grpc/CONTRIBUTING.md6
-rw-r--r--vendor/google.golang.org/grpc/Makefile68
-rw-r--r--vendor/google.golang.org/grpc/README.md28
-rw-r--r--vendor/google.golang.org/grpc/backoff.go70
-rw-r--r--vendor/google.golang.org/grpc/balancer.go46
-rw-r--r--vendor/google.golang.org/grpc/balancer/balancer.go303
-rw-r--r--vendor/google.golang.org/grpc/balancer/base/balancer.go171
-rw-r--r--vendor/google.golang.org/grpc/balancer/base/base.go64
-rw-r--r--vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go79
-rw-r--r--vendor/google.golang.org/grpc/balancer_conn_wrappers.go328
-rw-r--r--vendor/google.golang.org/grpc/balancer_v1_wrapper.go326
-rw-r--r--vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go900
-rw-r--r--vendor/google.golang.org/grpc/call.go309
-rw-r--r--vendor/google.golang.org/grpc/clientconn.go1859
-rw-r--r--vendor/google.golang.org/grpc/codec.go88
-rw-r--r--vendor/google.golang.org/grpc/codes/code_string.go66
-rw-r--r--vendor/google.golang.org/grpc/codes/codes.go79
-rw-r--r--vendor/google.golang.org/grpc/connectivity/connectivity.go3
-rw-r--r--vendor/google.golang.org/grpc/coverage.sh48
-rw-r--r--vendor/google.golang.org/grpc/credentials/credentials.go145
-rw-r--r--vendor/google.golang.org/grpc/credentials/credentials_util_go17.go60
-rw-r--r--vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go57
-rw-r--r--vendor/google.golang.org/grpc/credentials/internal/syscallconn.go61
-rw-r--r--vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go (renamed from vendor/google.golang.org/grpc/naming/go17.go)18
-rw-r--r--vendor/google.golang.org/grpc/dialoptions.go492
-rw-r--r--vendor/google.golang.org/grpc/encoding/encoding.go118
-rw-r--r--vendor/google.golang.org/grpc/encoding/proto/proto.go110
-rw-r--r--vendor/google.golang.org/grpc/go.mod20
-rw-r--r--vendor/google.golang.org/grpc/go.sum32
-rw-r--r--vendor/google.golang.org/grpc/go16.go98
-rw-r--r--vendor/google.golang.org/grpc/go17.go98
-rw-r--r--vendor/google.golang.org/grpc/grpclb.go737
-rw-r--r--vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go629
-rw-r--r--vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto164
-rw-r--r--vendor/google.golang.org/grpc/grpclog/grpclog.go3
-rw-r--r--vendor/google.golang.org/grpc/grpclog/logger.go2
-rw-r--r--vendor/google.golang.org/grpc/health/client.go107
-rw-r--r--vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go263
-rw-r--r--vendor/google.golang.org/grpc/health/health.go70
-rw-r--r--vendor/google.golang.org/grpc/health/regenerate.sh33
-rw-r--r--vendor/google.golang.org/grpc/health/server.go165
-rw-r--r--vendor/google.golang.org/grpc/install_gae.sh6
-rw-r--r--vendor/google.golang.org/grpc/interceptor.go6
-rw-r--r--vendor/google.golang.org/grpc/internal/backoff/backoff.go78
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/binarylog.go167
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go42
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/env_config.go210
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/method_logger.go426
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh33
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/sink.go162
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/util.go41
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/funcs.go699
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types.go702
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types_linux.go53
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go44
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/util_linux.go (renamed from vendor/google.golang.org/grpc/credentials/credentials_util_go18.go)29
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go26
-rw-r--r--vendor/google.golang.org/grpc/internal/envconfig/envconfig.go70
-rw-r--r--vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go56
-rw-r--r--vendor/google.golang.org/grpc/internal/grpcsync/event.go61
-rw-r--r--vendor/google.golang.org/grpc/internal/internal.go44
-rw-r--r--vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go114
-rw-r--r--vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go63
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go (renamed from vendor/google.golang.org/grpc/transport/bdp_estimator.go)22
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/controlbuf.go852
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/defaults.go49
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/flowcontrol.go (renamed from vendor/google.golang.org/grpc/transport/control.go)204
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/handler_server.go (renamed from vendor/google.golang.org/grpc/transport/handler_server.go)136
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_client.go1380
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_server.go (renamed from vendor/google.golang.org/grpc/transport/http2_server.go)1051
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http_util.go (renamed from vendor/google.golang.org/grpc/transport/http_util.go)386
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/log.go (renamed from vendor/google.golang.org/grpc/transport/log.go)6
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/transport.go (renamed from vendor/google.golang.org/grpc/transport/transport.go)564
-rw-r--r--vendor/google.golang.org/grpc/keepalive/keepalive.go62
-rw-r--r--vendor/google.golang.org/grpc/metadata/metadata.go104
-rw-r--r--vendor/google.golang.org/grpc/naming/dns_resolver.go55
-rw-r--r--vendor/google.golang.org/grpc/naming/naming.go12
-rw-r--r--vendor/google.golang.org/grpc/peer/peer.go2
-rw-r--r--vendor/google.golang.org/grpc/picker_wrapper.go180
-rw-r--r--vendor/google.golang.org/grpc/pickfirst.go110
-rw-r--r--vendor/google.golang.org/grpc/proxy.go52
-rw-r--r--vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go436
-rw-r--r--vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go57
-rw-r--r--vendor/google.golang.org/grpc/resolver/resolver.go158
-rw-r--r--vendor/google.golang.org/grpc/resolver_conn_wrapper.go155
-rw-r--r--vendor/google.golang.org/grpc/rpc_util.go734
-rw-r--r--vendor/google.golang.org/grpc/server.go861
-rw-r--r--vendor/google.golang.org/grpc/service_config.go372
-rw-r--r--vendor/google.golang.org/grpc/stats/handlers.go3
-rw-r--r--vendor/google.golang.org/grpc/stats/stats.go89
-rw-r--r--vendor/google.golang.org/grpc/status/status.go52
-rw-r--r--vendor/google.golang.org/grpc/stream.go1539
-rw-r--r--vendor/google.golang.org/grpc/tap/tap.go22
-rw-r--r--vendor/google.golang.org/grpc/test/bufconn/bufconn.go244
-rw-r--r--vendor/google.golang.org/grpc/trace.go15
-rw-r--r--vendor/google.golang.org/grpc/transport/go16.go45
-rw-r--r--vendor/google.golang.org/grpc/transport/go17.go46
-rw-r--r--vendor/google.golang.org/grpc/transport/http2_client.go1369
-rw-r--r--vendor/google.golang.org/grpc/version.go (renamed from vendor/google.golang.org/grpc/naming/go18.go)14
-rw-r--r--vendor/google.golang.org/grpc/vet.sh141
194 files changed, 51625 insertions, 6975 deletions
diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS
new file mode 100644
index 0000000..f73b725
--- /dev/null
+++ b/vendor/google.golang.org/api/AUTHORS
@@ -0,0 +1,10 @@
1# This is the official list of authors for copyright purposes.
2# This file is distinct from the CONTRIBUTORS files.
3# See the latter for an explanation.
4
5# Names should be added to this file as
6# Name or Organization <email address>
7# The email address is not required for organizations.
8
9# Please keep the list sorted.
10Google Inc.
diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS
new file mode 100644
index 0000000..fe55ebf
--- /dev/null
+++ b/vendor/google.golang.org/api/CONTRIBUTORS
@@ -0,0 +1,55 @@
1# This is the official list of people who can contribute
2# (and typically have contributed) code to the repository.
3# The AUTHORS file lists the copyright holders; this file
4# lists people. For example, Google employees are listed here
5# but not in AUTHORS, because Google holds the copyright.
6#
7# The submission process automatically checks to make sure
8# that people submitting code are listed in this file (by email address).
9#
10# Names should be added to this file only after verifying that
11# the individual or the individual's organization has agreed to
12# the appropriate Contributor License Agreement, found here:
13#
14# https://cla.developers.google.com/about/google-individual
15# https://cla.developers.google.com/about/google-corporate
16#
17# The CLA can be filled out on the web:
18#
19# https://cla.developers.google.com/
20#
21# When adding J Random Contributor's name to this file,
22# either J's name or J's organization's name should be
23# added to the AUTHORS file, depending on whether the
24# individual or corporate CLA was used.
25
26# Names should be added to this file like so:
27# Name <email address>
28#
29# An entry with two email addresses specifies that the
30# first address should be used in the submit logs and
31# that the second address should be recognized as the
32# same person when interacting with Rietveld.
33
34# Please keep the list sorted.
35
36Alain Vongsouvanhalainv <alainv@google.com>
37Andrew Gerrand <adg@golang.org>
38Brad Fitzpatrick <bradfitz@golang.org>
39Eric Koleda <ekoleda+devrel@googlers.com>
40Francesc Campoy <campoy@golang.org>
41Garrick Evans <garrick@google.com>
42Glenn Lewis <gmlewis@google.com>
43Ivan Krasin <krasin@golang.org>
44Jason Hall <jasonhall@google.com>
45Johan Euphrosine <proppy@google.com>
46Kostik Shtoyk <kostik@google.com>
47Kunpei Sakai <namusyaka@gmail.com>
48Matthew Whisenhunt <matt.whisenhunt@gmail.com>
49Michael McGreevy <mcgreevy@golang.org>
50Nick Craig-Wood <nickcw@gmail.com>
51Robbie Trencheny <me@robbiet.us>
52Ross Light <light@google.com>
53Sarah Adams <shadams@google.com>
54Scott Van Woudenberg <scottvw@google.com>
55Takashi Matsuo <tmatsuo@google.com>
diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE
new file mode 100644
index 0000000..263aa7a
--- /dev/null
+++ b/vendor/google.golang.org/api/LICENSE
@@ -0,0 +1,27 @@
1Copyright (c) 2011 Google Inc. All rights reserved.
2
3Redistribution and use in source and binary forms, with or without
4modification, are permitted provided that the following conditions are
5met:
6
7 * Redistributions of source code must retain the above copyright
8notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above
10copyright notice, this list of conditions and the following disclaimer
11in the documentation and/or other materials provided with the
12distribution.
13 * Neither the name of Google Inc. nor the names of its
14contributors may be used to endorse or promote products derived from
15this software without specific prior written permission.
16
17THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/google.golang.org/api/gensupport/backoff.go
new file mode 100644
index 0000000..94b7789
--- /dev/null
+++ b/vendor/google.golang.org/api/gensupport/backoff.go
@@ -0,0 +1,51 @@
1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package gensupport
6
7import (
8 "math/rand"
9 "time"
10)
11
12// BackoffStrategy defines the set of functions that a backoff-er must
13// implement.
14type BackoffStrategy interface {
15 // Pause returns the duration of the next pause and true if the operation should be
16 // retried, or false if no further retries should be attempted.
17 Pause() (time.Duration, bool)
18
19 // Reset restores the strategy to its initial state.
20 Reset()
21}
22
23// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff.
24// The initial pause time is given by Base.
25// Once the total pause time exceeds Max, Pause will indicate no further retries.
26type ExponentialBackoff struct {
27 Base time.Duration
28 Max time.Duration
29 total time.Duration
30 n uint
31}
32
33// Pause returns the amount of time the caller should wait.
34func (eb *ExponentialBackoff) Pause() (time.Duration, bool) {
35 if eb.total > eb.Max {
36 return 0, false
37 }
38
39 // The next pause is selected from randomly from [0, 2^n * Base).
40 d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base)))
41 eb.total += d
42 eb.n++
43 return d, true
44}
45
46// Reset resets the backoff strategy such that the next Pause call will begin
47// counting from the start. It is not safe to call concurrently with Pause.
48func (eb *ExponentialBackoff) Reset() {
49 eb.n = 0
50 eb.total = 0
51}
diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go
new file mode 100644
index 0000000..3d0817e
--- /dev/null
+++ b/vendor/google.golang.org/api/gensupport/buffer.go
@@ -0,0 +1,79 @@
1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package gensupport
6
7import (
8 "bytes"
9 "io"
10
11 "google.golang.org/api/googleapi"
12)
13
14// MediaBuffer buffers data from an io.Reader to support uploading media in
15// retryable chunks. It should be created with NewMediaBuffer.
16type MediaBuffer struct {
17 media io.Reader
18
19 chunk []byte // The current chunk which is pending upload. The capacity is the chunk size.
20 err error // Any error generated when populating chunk by reading media.
21
22 // The absolute position of chunk in the underlying media.
23 off int64
24}
25
26// NewMediaBuffer initializes a MediaBuffer.
27func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer {
28 return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)}
29}
30
31// Chunk returns the current buffered chunk, the offset in the underlying media
32// from which the chunk is drawn, and the size of the chunk.
33// Successive calls to Chunk return the same chunk between calls to Next.
34func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) {
35 // There may already be data in chunk if Next has not been called since the previous call to Chunk.
36 if mb.err == nil && len(mb.chunk) == 0 {
37 mb.err = mb.loadChunk()
38 }
39 return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err
40}
41
42// loadChunk will read from media into chunk, up to the capacity of chunk.
43func (mb *MediaBuffer) loadChunk() error {
44 bufSize := cap(mb.chunk)
45 mb.chunk = mb.chunk[:bufSize]
46
47 read := 0
48 var err error
49 for err == nil && read < bufSize {
50 var n int
51 n, err = mb.media.Read(mb.chunk[read:])
52 read += n
53 }
54 mb.chunk = mb.chunk[:read]
55 return err
56}
57
58// Next advances to the next chunk, which will be returned by the next call to Chunk.
59// Calls to Next without a corresponding prior call to Chunk will have no effect.
60func (mb *MediaBuffer) Next() {
61 mb.off += int64(len(mb.chunk))
62 mb.chunk = mb.chunk[0:0]
63}
64
65type readerTyper struct {
66 io.Reader
67 googleapi.ContentTyper
68}
69
70// ReaderAtToReader adapts a ReaderAt to be used as a Reader.
71// If ra implements googleapi.ContentTyper, then the returned reader
72// will also implement googleapi.ContentTyper, delegating to ra.
73func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader {
74 r := io.NewSectionReader(ra, 0, size)
75 if typer, ok := ra.(googleapi.ContentTyper); ok {
76 return readerTyper{r, typer}
77 }
78 return r
79}
diff --git a/vendor/google.golang.org/api/gensupport/doc.go b/vendor/google.golang.org/api/gensupport/doc.go
new file mode 100644
index 0000000..752c4b4
--- /dev/null
+++ b/vendor/google.golang.org/api/gensupport/doc.go
@@ -0,0 +1,10 @@
1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package gensupport is an internal implementation detail used by code
6// generated by the google-api-go-generator tool.
7//
8// This package may be modified at any time without regard for backwards
9// compatibility. It should not be used directly by API users.
10package gensupport
diff --git a/vendor/google.golang.org/api/gensupport/header.go b/vendor/google.golang.org/api/gensupport/header.go
new file mode 100644
index 0000000..cb5e67c
--- /dev/null
+++ b/vendor/google.golang.org/api/gensupport/header.go
@@ -0,0 +1,22 @@
1// Copyright 2017 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package gensupport
6
7import (
8 "fmt"
9 "runtime"
10 "strings"
11)
12
13// GoogleClientHeader returns the value to use for the x-goog-api-client
14// header, which is used internally by Google.
15func GoogleClientHeader(generatorVersion, clientElement string) string {
16 elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)}
17 if clientElement != "" {
18 elts = append(elts, clientElement)
19 }
20 elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion))
21 return strings.Join(elts, " ")
22}
diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/gensupport/json.go
new file mode 100644
index 0000000..c01e321
--- /dev/null
+++ b/vendor/google.golang.org/api/gensupport/json.go
@@ -0,0 +1,211 @@
1// Copyright 2015 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package gensupport
6
7import (
8 "encoding/json"
9 "fmt"
10 "reflect"
11 "strings"
12)
13
14// MarshalJSON returns a JSON encoding of schema containing only selected fields.
15// A field is selected if any of the following is true:
16// * it has a non-empty value
17// * its field name is present in forceSendFields and it is not a nil pointer or nil interface
18// * its field name is present in nullFields.
19// The JSON key for each selected field is taken from the field's json: struct tag.
20func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) {
21 if len(forceSendFields) == 0 && len(nullFields) == 0 {
22 return json.Marshal(schema)
23 }
24
25 mustInclude := make(map[string]bool)
26 for _, f := range forceSendFields {
27 mustInclude[f] = true
28 }
29 useNull := make(map[string]bool)
30 useNullMaps := make(map[string]map[string]bool)
31 for _, nf := range nullFields {
32 parts := strings.SplitN(nf, ".", 2)
33 field := parts[0]
34 if len(parts) == 1 {
35 useNull[field] = true
36 } else {
37 if useNullMaps[field] == nil {
38 useNullMaps[field] = map[string]bool{}
39 }
40 useNullMaps[field][parts[1]] = true
41 }
42 }
43
44 dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps)
45 if err != nil {
46 return nil, err
47 }
48 return json.Marshal(dataMap)
49}
50
51func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) {
52 m := make(map[string]interface{})
53 s := reflect.ValueOf(schema)
54 st := s.Type()
55
56 for i := 0; i < s.NumField(); i++ {
57 jsonTag := st.Field(i).Tag.Get("json")
58 if jsonTag == "" {
59 continue
60 }
61 tag, err := parseJSONTag(jsonTag)
62 if err != nil {
63 return nil, err
64 }
65 if tag.ignore {
66 continue
67 }
68
69 v := s.Field(i)
70 f := st.Field(i)
71
72 if useNull[f.Name] {
73 if !isEmptyValue(v) {
74 return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name)
75 }
76 m[tag.apiName] = nil
77 continue
78 }
79
80 if !includeField(v, f, mustInclude) {
81 continue
82 }
83
84 // If map fields are explicitly set to null, use a map[string]interface{}.
85 if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil {
86 ms, ok := v.Interface().(map[string]string)
87 if !ok {
88 return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name)
89 }
90 mi := map[string]interface{}{}
91 for k, v := range ms {
92 mi[k] = v
93 }
94 for k := range useNullMaps[f.Name] {
95 mi[k] = nil
96 }
97 m[tag.apiName] = mi
98 continue
99 }
100
101 // nil maps are treated as empty maps.
102 if f.Type.Kind() == reflect.Map && v.IsNil() {
103 m[tag.apiName] = map[string]string{}
104 continue
105 }
106
107 // nil slices are treated as empty slices.
108 if f.Type.Kind() == reflect.Slice && v.IsNil() {
109 m[tag.apiName] = []bool{}
110 continue
111 }
112
113 if tag.stringFormat {
114 m[tag.apiName] = formatAsString(v, f.Type.Kind())
115 } else {
116 m[tag.apiName] = v.Interface()
117 }
118 }
119 return m, nil
120}
121
122// formatAsString returns a string representation of v, dereferencing it first if possible.
123func formatAsString(v reflect.Value, kind reflect.Kind) string {
124 if kind == reflect.Ptr && !v.IsNil() {
125 v = v.Elem()
126 }
127
128 return fmt.Sprintf("%v", v.Interface())
129}
130
131// jsonTag represents a restricted version of the struct tag format used by encoding/json.
132// It is used to describe the JSON encoding of fields in a Schema struct.
133type jsonTag struct {
134 apiName string
135 stringFormat bool
136 ignore bool
137}
138
139// parseJSONTag parses a restricted version of the struct tag format used by encoding/json.
140// The format of the tag must match that generated by the Schema.writeSchemaStruct method
141// in the api generator.
142func parseJSONTag(val string) (jsonTag, error) {
143 if val == "-" {
144 return jsonTag{ignore: true}, nil
145 }
146
147 var tag jsonTag
148
149 i := strings.Index(val, ",")
150 if i == -1 || val[:i] == "" {
151 return tag, fmt.Errorf("malformed json tag: %s", val)
152 }
153
154 tag = jsonTag{
155 apiName: val[:i],
156 }
157
158 switch val[i+1:] {
159 case "omitempty":
160 case "omitempty,string":
161 tag.stringFormat = true
162 default:
163 return tag, fmt.Errorf("malformed json tag: %s", val)
164 }
165
166 return tag, nil
167}
168
169// Reports whether the struct field "f" with value "v" should be included in JSON output.
170func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool {
171 // The regular JSON encoding of a nil pointer is "null", which means "delete this field".
172 // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
173 // However, many fields are not pointers, so there would be no way to delete these fields.
174 // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields.
175 // Deletion will be handled by a separate mechanism.
176 if f.Type.Kind() == reflect.Ptr && v.IsNil() {
177 return false
178 }
179
180 // The "any" type is represented as an interface{}. If this interface
181 // is nil, there is no reasonable representation to send. We ignore
182 // these fields, for the same reasons as given above for pointers.
183 if f.Type.Kind() == reflect.Interface && v.IsNil() {
184 return false
185 }
186
187 return mustInclude[f.Name] || !isEmptyValue(v)
188}
189
190// isEmptyValue reports whether v is the empty value for its type. This
191// implementation is based on that of the encoding/json package, but its
192// correctness does not depend on it being identical. What's important is that
193// this function return false in situations where v should not be sent as part
194// of a PATCH operation.
195func isEmptyValue(v reflect.Value) bool {
196 switch v.Kind() {
197 case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
198 return v.Len() == 0
199 case reflect.Bool:
200 return !v.Bool()
201 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
202 return v.Int() == 0
203 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
204 return v.Uint() == 0
205 case reflect.Float32, reflect.Float64:
206 return v.Float() == 0
207 case reflect.Interface, reflect.Ptr:
208 return v.IsNil()
209 }
210 return false
211}
diff --git a/vendor/google.golang.org/api/gensupport/jsonfloat.go b/vendor/google.golang.org/api/gensupport/jsonfloat.go
new file mode 100644
index 0000000..8377850
--- /dev/null
+++ b/vendor/google.golang.org/api/gensupport/jsonfloat.go
@@ -0,0 +1,57 @@
1// Copyright 2016 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package gensupport
16
17import (
18 "encoding/json"
19 "errors"
20 "fmt"
21 "math"
22)
23
24// JSONFloat64 is a float64 that supports proper unmarshaling of special float
25// values in JSON, according to
26// https://developers.google.com/protocol-buffers/docs/proto3#json. Although
27// that is a proto-to-JSON spec, it applies to all Google APIs.
28//
29// The jsonpb package
30// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has
31// similar functionality, but only for direct translation from proto messages
32// to JSON.
33type JSONFloat64 float64
34
35func (f *JSONFloat64) UnmarshalJSON(data []byte) error {
36 var ff float64
37 if err := json.Unmarshal(data, &ff); err == nil {
38 *f = JSONFloat64(ff)
39 return nil
40 }
41 var s string
42 if err := json.Unmarshal(data, &s); err == nil {
43 switch s {
44 case "NaN":
45 ff = math.NaN()
46 case "Infinity":
47 ff = math.Inf(1)
48 case "-Infinity":
49 ff = math.Inf(-1)
50 default:
51 return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s)
52 }
53 *f = JSONFloat64(ff)
54 return nil
55 }
56 return errors.New("google.golang.org/api/internal: data not float or string")
57}
diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go
new file mode 100644
index 0000000..4cef4ad
--- /dev/null
+++ b/vendor/google.golang.org/api/gensupport/media.go
@@ -0,0 +1,342 @@
1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package gensupport
6
7import (
8 "bytes"
9 "fmt"
10 "io"
11 "io/ioutil"
12 "mime/multipart"
13 "net/http"
14 "net/textproto"
15 "strings"
16 "sync"
17
18 "google.golang.org/api/googleapi"
19)
20
21const sniffBuffSize = 512
22
23func newContentSniffer(r io.Reader) *contentSniffer {
24 return &contentSniffer{r: r}
25}
26
27// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
28type contentSniffer struct {
29 r io.Reader
30 start []byte // buffer for the sniffed bytes.
31 err error // set to any error encountered while reading bytes to be sniffed.
32
33 ctype string // set on first sniff.
34 sniffed bool // set to true on first sniff.
35}
36
37func (cs *contentSniffer) Read(p []byte) (n int, err error) {
38 // Ensure that the content type is sniffed before any data is consumed from Reader.
39 _, _ = cs.ContentType()
40
41 if len(cs.start) > 0 {
42 n := copy(p, cs.start)
43 cs.start = cs.start[n:]
44 return n, nil
45 }
46
47 // We may have read some bytes into start while sniffing, even if the read ended in an error.
48 // We should first return those bytes, then the error.
49 if cs.err != nil {
50 return 0, cs.err
51 }
52
53 // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
54 return cs.r.Read(p)
55}
56
57// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed.
58func (cs *contentSniffer) ContentType() (string, bool) {
59 if cs.sniffed {
60 return cs.ctype, cs.ctype != ""
61 }
62 cs.sniffed = true
63 // If ReadAll hits EOF, it returns err==nil.
64 cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
65
66 // Don't try to detect the content type based on possibly incomplete data.
67 if cs.err != nil {
68 return "", false
69 }
70
71 cs.ctype = http.DetectContentType(cs.start)
72 return cs.ctype, true
73}
74
75// DetermineContentType determines the content type of the supplied reader.
76// If the content type is already known, it can be specified via ctype.
77// Otherwise, the content of media will be sniffed to determine the content type.
78// If media implements googleapi.ContentTyper (deprecated), this will be used
79// instead of sniffing the content.
80// After calling DetectContentType the caller must not perform further reads on
81// media, but rather read from the Reader that is returned.
82func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) {
83 // Note: callers could avoid calling DetectContentType if ctype != "",
84 // but doing the check inside this function reduces the amount of
85 // generated code.
86 if ctype != "" {
87 return media, ctype
88 }
89
90 // For backwards compatability, allow clients to set content
91 // type by providing a ContentTyper for media.
92 if typer, ok := media.(googleapi.ContentTyper); ok {
93 return media, typer.ContentType()
94 }
95
96 sniffer := newContentSniffer(media)
97 if ctype, ok := sniffer.ContentType(); ok {
98 return sniffer, ctype
99 }
100 // If content type could not be sniffed, reads from sniffer will eventually fail with an error.
101 return sniffer, ""
102}
103
104type typeReader struct {
105 io.Reader
106 typ string
107}
108
109// multipartReader combines the contents of multiple readers to create a multipart/related HTTP body.
110// Close must be called if reads from the multipartReader are abandoned before reaching EOF.
111type multipartReader struct {
112 pr *io.PipeReader
113 ctype string
114 mu sync.Mutex
115 pipeOpen bool
116}
117
118func newMultipartReader(parts []typeReader) *multipartReader {
119 mp := &multipartReader{pipeOpen: true}
120 var pw *io.PipeWriter
121 mp.pr, pw = io.Pipe()
122 mpw := multipart.NewWriter(pw)
123 mp.ctype = "multipart/related; boundary=" + mpw.Boundary()
124 go func() {
125 for _, part := range parts {
126 w, err := mpw.CreatePart(typeHeader(part.typ))
127 if err != nil {
128 mpw.Close()
129 pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err))
130 return
131 }
132 _, err = io.Copy(w, part.Reader)
133 if err != nil {
134 mpw.Close()
135 pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))
136 return
137 }
138 }
139
140 mpw.Close()
141 pw.Close()
142 }()
143 return mp
144}
145
146func (mp *multipartReader) Read(data []byte) (n int, err error) {
147 return mp.pr.Read(data)
148}
149
150func (mp *multipartReader) Close() error {
151 mp.mu.Lock()
152 if !mp.pipeOpen {
153 mp.mu.Unlock()
154 return nil
155 }
156 mp.pipeOpen = false
157 mp.mu.Unlock()
158 return mp.pr.Close()
159}
160
161// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body.
162// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary.
163//
164// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF.
165func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) {
166 mp := newMultipartReader([]typeReader{
167 {body, bodyContentType},
168 {media, mediaContentType},
169 })
170 return mp, mp.ctype
171}
172
173func typeHeader(contentType string) textproto.MIMEHeader {
174 h := make(textproto.MIMEHeader)
175 if contentType != "" {
176 h.Set("Content-Type", contentType)
177 }
178 return h
179}
180
181// PrepareUpload determines whether the data in the supplied reader should be
182// uploaded in a single request, or in sequential chunks.
183// chunkSize is the size of the chunk that media should be split into.
184//
185// If chunkSize is zero, media is returned as the first value, and the other
186// two return values are nil, true.
187//
188// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the
189// contents of media fit in a single chunk.
190//
191// After PrepareUpload has been called, media should no longer be used: the
192// media content should be accessed via one of the return values.
193func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) {
194 if chunkSize == 0 { // do not chunk
195 return media, nil, true
196 }
197 mb = NewMediaBuffer(media, chunkSize)
198 _, _, _, err := mb.Chunk()
199 // If err is io.EOF, we can upload this in a single request. Otherwise, err is
200 // either nil or a non-EOF error. If it is the latter, then the next call to
201 // mb.Chunk will return the same error. Returning a MediaBuffer ensures that this
202 // error will be handled at some point.
203 return nil, mb, err == io.EOF
204}
205
206// MediaInfo holds information for media uploads. It is intended for use by generated
207// code only.
208type MediaInfo struct {
209 // At most one of Media and MediaBuffer will be set.
210 media io.Reader
211 buffer *MediaBuffer
212 singleChunk bool
213 mType string
214 size int64 // mediaSize, if known. Used only for calls to progressUpdater_.
215 progressUpdater googleapi.ProgressUpdater
216}
217
218// NewInfoFromMedia should be invoked from the Media method of a call. It returns a
219// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer
220// if needed.
221func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo {
222 mi := &MediaInfo{}
223 opts := googleapi.ProcessMediaOptions(options)
224 if !opts.ForceEmptyContentType {
225 r, mi.mType = DetermineContentType(r, opts.ContentType)
226 }
227 mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize)
228 return mi
229}
230
231// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a
232// call. It returns a MediaInfo using the given reader, size and media type.
233func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo {
234 rdr := ReaderAtToReader(r, size)
235 rdr, mType := DetermineContentType(rdr, mediaType)
236 return &MediaInfo{
237 size: size,
238 mType: mType,
239 buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize),
240 media: nil,
241 singleChunk: false,
242 }
243}
244
245// SetProgressUpdater sets the progress updater for the media info.
246func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) {
247 if mi != nil {
248 mi.progressUpdater = pu
249 }
250}
251
252// UploadType determines the type of upload: a single request, or a resumable
253// series of requests.
254func (mi *MediaInfo) UploadType() string {
255 if mi.singleChunk {
256 return "multipart"
257 }
258 return "resumable"
259}
260
261// UploadRequest sets up an HTTP request for media upload. It adds headers
262// as necessary, and returns a replacement for the body and a function for http.Request.GetBody.
263func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) {
264 cleanup = func() {}
265 if mi == nil {
266 return body, nil, cleanup
267 }
268 var media io.Reader
269 if mi.media != nil {
270 // This only happens when the caller has turned off chunking. In that
271 // case, we write all of media in a single non-retryable request.
272 media = mi.media
273 } else if mi.singleChunk {
274 // The data fits in a single chunk, which has now been read into the MediaBuffer.
275 // We obtain that chunk so we can write it in a single request. The request can
276 // be retried because the data is stored in the MediaBuffer.
277 media, _, _, _ = mi.buffer.Chunk()
278 }
279 if media != nil {
280 fb := readerFunc(body)
281 fm := readerFunc(media)
282 combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType)
283 if fb != nil && fm != nil {
284 getBody = func() (io.ReadCloser, error) {
285 rb := ioutil.NopCloser(fb())
286 rm := ioutil.NopCloser(fm())
287 r, _ := CombineBodyMedia(rb, "application/json", rm, mi.mType)
288 return r, nil
289 }
290 }
291 cleanup = func() { combined.Close() }
292 reqHeaders.Set("Content-Type", ctype)
293 body = combined
294 }
295 if mi.buffer != nil && mi.mType != "" && !mi.singleChunk {
296 reqHeaders.Set("X-Upload-Content-Type", mi.mType)
297 }
298 return body, getBody, cleanup
299}
300
301// readerFunc returns a function that always returns an io.Reader that has the same
302// contents as r, provided that can be done without consuming r. Otherwise, it
303// returns nil.
304// See http.NewRequest (in net/http/request.go).
305func readerFunc(r io.Reader) func() io.Reader {
306 switch r := r.(type) {
307 case *bytes.Buffer:
308 buf := r.Bytes()
309 return func() io.Reader { return bytes.NewReader(buf) }
310 case *bytes.Reader:
311 snapshot := *r
312 return func() io.Reader { r := snapshot; return &r }
313 case *strings.Reader:
314 snapshot := *r
315 return func() io.Reader { r := snapshot; return &r }
316 default:
317 return nil
318 }
319}
320
321// ResumableUpload returns an appropriately configured ResumableUpload value if the
322// upload is resumable, or nil otherwise.
323func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload {
324 if mi == nil || mi.singleChunk {
325 return nil
326 }
327 return &ResumableUpload{
328 URI: locURI,
329 Media: mi.buffer,
330 MediaType: mi.mType,
331 Callback: func(curr int64) {
332 if mi.progressUpdater != nil {
333 mi.progressUpdater(curr, mi.size)
334 }
335 },
336 }
337}
338
339// SetGetBody sets the GetBody field of req to f.
340func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {
341 req.GetBody = f
342}
diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/gensupport/params.go
new file mode 100644
index 0000000..0e878a4
--- /dev/null
+++ b/vendor/google.golang.org/api/gensupport/params.go
@@ -0,0 +1,51 @@
1// Copyright 2015 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package gensupport
6
7import (
8 "net/url"
9
10 "google.golang.org/api/googleapi"
11)
12
13// URLParams is a simplified replacement for url.Values
14// that safely builds up URL parameters for encoding.
15type URLParams map[string][]string
16
17// Get returns the first value for the given key, or "".
18func (u URLParams) Get(key string) string {
19 vs := u[key]
20 if len(vs) == 0 {
21 return ""
22 }
23 return vs[0]
24}
25
26// Set sets the key to value.
27// It replaces any existing values.
28func (u URLParams) Set(key, value string) {
29 u[key] = []string{value}
30}
31
32// SetMulti sets the key to an array of values.
33// It replaces any existing values.
34// Note that values must not be modified after calling SetMulti
35// so the caller is responsible for making a copy if necessary.
36func (u URLParams) SetMulti(key string, values []string) {
37 u[key] = values
38}
39
40// Encode encodes the values into ``URL encoded'' form
41// ("bar=baz&foo=quux") sorted by key.
42func (u URLParams) Encode() string {
43 return url.Values(u).Encode()
44}
45
46// SetOptions sets the URL params and any additional call options.
47func SetOptions(u URLParams, opts ...googleapi.CallOption) {
48 for _, o := range opts {
49 u.Set(o.Get())
50 }
51}
diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go
new file mode 100644
index 0000000..2552a6a
--- /dev/null
+++ b/vendor/google.golang.org/api/gensupport/resumable.go
@@ -0,0 +1,216 @@
1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package gensupport
6
7import (
8 "context"
9 "errors"
10 "fmt"
11 "io"
12 "net/http"
13 "sync"
14 "time"
15)
16
17const (
18 // statusTooManyRequests is returned by the storage API if the
19 // per-project limits have been temporarily exceeded. The request
20 // should be retried.
21 // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes
22 statusTooManyRequests = 429
23)
24
25// ResumableUpload is used by the generated APIs to provide resumable uploads.
26// It is not used by developers directly.
27type ResumableUpload struct {
28 Client *http.Client
29 // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
30 URI string
31 UserAgent string // User-Agent for header of the request
32 // Media is the object being uploaded.
33 Media *MediaBuffer
34 // MediaType defines the media type, e.g. "image/jpeg".
35 MediaType string
36
37 mu sync.Mutex // guards progress
38 progress int64 // number of bytes uploaded so far
39
40 // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded.
41 Callback func(int64)
42
43 // If not specified, a default exponential backoff strategy will be used.
44 Backoff BackoffStrategy
45}
46
47// Progress returns the number of bytes uploaded at this point.
48func (rx *ResumableUpload) Progress() int64 {
49 rx.mu.Lock()
50 defer rx.mu.Unlock()
51 return rx.progress
52}
53
54// doUploadRequest performs a single HTTP request to upload data.
55// off specifies the offset in rx.Media from which data is drawn.
56// size is the number of bytes in data.
57// final specifies whether data is the final chunk to be uploaded.
58func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) {
59 req, err := http.NewRequest("POST", rx.URI, data)
60 if err != nil {
61 return nil, err
62 }
63
64 req.ContentLength = size
65 var contentRange string
66 if final {
67 if size == 0 {
68 contentRange = fmt.Sprintf("bytes */%v", off)
69 } else {
70 contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size)
71 }
72 } else {
73 contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1)
74 }
75 req.Header.Set("Content-Range", contentRange)
76 req.Header.Set("Content-Type", rx.MediaType)
77 req.Header.Set("User-Agent", rx.UserAgent)
78
79 // Google's upload endpoint uses status code 308 for a
80 // different purpose than the "308 Permanent Redirect"
81 // since-standardized in RFC 7238. Because of the conflict in
82 // semantics, Google added this new request header which
83 // causes it to not use "308" and instead reply with 200 OK
84 // and sets the upload-specific "X-HTTP-Status-Code-Override:
85 // 308" response header.
86 req.Header.Set("X-GUploader-No-308", "yes")
87
88 return SendRequest(ctx, rx.Client, req)
89}
90
91func statusResumeIncomplete(resp *http.Response) bool {
92 // This is how the server signals "status resume incomplete"
93 // when X-GUploader-No-308 is set to "yes":
94 return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308"
95}
96
97// reportProgress calls a user-supplied callback to report upload progress.
98// If old==updated, the callback is not called.
99func (rx *ResumableUpload) reportProgress(old, updated int64) {
100 if updated-old == 0 {
101 return
102 }
103 rx.mu.Lock()
104 rx.progress = updated
105 rx.mu.Unlock()
106 if rx.Callback != nil {
107 rx.Callback(updated)
108 }
109}
110
111// transferChunk performs a single HTTP request to upload a single chunk from rx.Media.
112func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) {
113 chunk, off, size, err := rx.Media.Chunk()
114
115 done := err == io.EOF
116 if !done && err != nil {
117 return nil, err
118 }
119
120 res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done)
121 if err != nil {
122 return res, err
123 }
124
125 // We sent "X-GUploader-No-308: yes" (see comment elsewhere in
126 // this file), so we don't expect to get a 308.
127 if res.StatusCode == 308 {
128 return nil, errors.New("unexpected 308 response status code")
129 }
130
131 if res.StatusCode == http.StatusOK {
132 rx.reportProgress(off, off+int64(size))
133 }
134
135 if statusResumeIncomplete(res) {
136 rx.Media.Next()
137 }
138 return res, nil
139}
140
141func contextDone(ctx context.Context) bool {
142 select {
143 case <-ctx.Done():
144 return true
145 default:
146 return false
147 }
148}
149
150// Upload starts the process of a resumable upload with a cancellable context.
151// It retries using the provided back off strategy until cancelled or the
152// strategy indicates to stop retrying.
153// It is called from the auto-generated API code and is not visible to the user.
154// Before sending an HTTP request, Upload calls any registered hook functions,
155// and calls the returned functions after the request returns (see send.go).
156// rx is private to the auto-generated API code.
157// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close.
158func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
159 var pause time.Duration
160 backoff := rx.Backoff
161 if backoff == nil {
162 backoff = DefaultBackoffStrategy()
163 }
164
165 for {
166 // Ensure that we return in the case of cancelled context, even if pause is 0.
167 if contextDone(ctx) {
168 return nil, ctx.Err()
169 }
170 select {
171 case <-ctx.Done():
172 return nil, ctx.Err()
173 case <-time.After(pause):
174 }
175
176 resp, err = rx.transferChunk(ctx)
177
178 var status int
179 if resp != nil {
180 status = resp.StatusCode
181 }
182
183 // Check if we should retry the request.
184 if shouldRetry(status, err) {
185 var retry bool
186 pause, retry = backoff.Pause()
187 if retry {
188 if resp != nil && resp.Body != nil {
189 resp.Body.Close()
190 }
191 continue
192 }
193 }
194
195 // If the chunk was uploaded successfully, but there's still
196 // more to go, upload the next chunk without any delay.
197 if statusResumeIncomplete(resp) {
198 pause = 0
199 backoff.Reset()
200 resp.Body.Close()
201 continue
202 }
203
204 // It's possible for err and resp to both be non-nil here, but we expose a simpler
205 // contract to our callers: exactly one of resp and err will be non-nil. This means
206 // that any response body must be closed here before returning a non-nil error.
207 if err != nil {
208 if resp != nil && resp.Body != nil {
209 resp.Body.Close()
210 }
211 return nil, err
212 }
213
214 return resp, nil
215 }
216}
diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go
new file mode 100644
index 0000000..fdde3f4
--- /dev/null
+++ b/vendor/google.golang.org/api/gensupport/retry.go
@@ -0,0 +1,84 @@
1// Copyright 2017 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package gensupport
16
17import (
18 "context"
19 "io"
20 "net"
21 "net/http"
22 "time"
23)
24
25// Retry invokes the given function, retrying it multiple times if the connection failed or
26// the HTTP status response indicates the request should be attempted again. ctx may be nil.
27func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) {
28 for {
29 resp, err := f()
30
31 var status int
32 if resp != nil {
33 status = resp.StatusCode
34 }
35
36 // Return if we shouldn't retry.
37 pause, retry := backoff.Pause()
38 if !shouldRetry(status, err) || !retry {
39 return resp, err
40 }
41
42 // Ensure the response body is closed, if any.
43 if resp != nil && resp.Body != nil {
44 resp.Body.Close()
45 }
46
47 // Pause, but still listen to ctx.Done if context is not nil.
48 var done <-chan struct{}
49 if ctx != nil {
50 done = ctx.Done()
51 }
52 select {
53 case <-done:
54 return nil, ctx.Err()
55 case <-time.After(pause):
56 }
57 }
58}
59
60// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests.
61func DefaultBackoffStrategy() BackoffStrategy {
62 return &ExponentialBackoff{
63 Base: 250 * time.Millisecond,
64 Max: 16 * time.Second,
65 }
66}
67
68// shouldRetry returns true if the HTTP response / error indicates that the
69// request should be attempted again.
70func shouldRetry(status int, err error) bool {
71 if 500 <= status && status <= 599 {
72 return true
73 }
74 if status == statusTooManyRequests {
75 return true
76 }
77 if err == io.ErrUnexpectedEOF {
78 return true
79 }
80 if err, ok := err.(net.Error); ok {
81 return err.Temporary()
82 }
83 return false
84}
diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go
new file mode 100644
index 0000000..5799393
--- /dev/null
+++ b/vendor/google.golang.org/api/gensupport/send.go
@@ -0,0 +1,87 @@
1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package gensupport
6
7import (
8 "context"
9 "encoding/json"
10 "errors"
11 "net/http"
12)
13
14// Hook is the type of a function that is called once before each HTTP request
15// that is sent by a generated API. It returns a function that is called after
16// the request returns.
17// Hooks are not called if the context is nil.
18type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response)
19
20var hooks []Hook
21
22// RegisterHook registers a Hook to be called before each HTTP request by a
23// generated API. Hooks are called in the order they are registered. Each
24// hook can return a function; if it is non-nil, it is called after the HTTP
25// request returns. These functions are called in the reverse order.
26// RegisterHook should not be called concurrently with itself or SendRequest.
27func RegisterHook(h Hook) {
28 hooks = append(hooks, h)
29}
30
31// SendRequest sends a single HTTP request using the given client.
32// If ctx is non-nil, it calls all hooks, then sends the request with
33// req.WithContext, then calls any functions returned by the hooks in
34// reverse order.
35func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
36 // Disallow Accept-Encoding because it interferes with the automatic gzip handling
37 // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
38 if _, ok := req.Header["Accept-Encoding"]; ok {
39 return nil, errors.New("google api: custom Accept-Encoding headers not allowed")
40 }
41 if ctx == nil {
42 return client.Do(req)
43 }
44 // Call hooks in order of registration, store returned funcs.
45 post := make([]func(resp *http.Response), len(hooks))
46 for i, h := range hooks {
47 fn := h(ctx, req)
48 post[i] = fn
49 }
50
51 // Send request.
52 resp, err := send(ctx, client, req)
53
54 // Call returned funcs in reverse order.
55 for i := len(post) - 1; i >= 0; i-- {
56 if fn := post[i]; fn != nil {
57 fn(resp)
58 }
59 }
60 return resp, err
61}
62
63func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
64 if client == nil {
65 client = http.DefaultClient
66 }
67 resp, err := client.Do(req.WithContext(ctx))
68 // If we got an error, and the context has been canceled,
69 // the context's error is probably more useful.
70 if err != nil {
71 select {
72 case <-ctx.Done():
73 err = ctx.Err()
74 default:
75 }
76 }
77 return resp, err
78}
79
80// DecodeResponse decodes the body of res into target. If there is no body,
81// target is unchanged.
82func DecodeResponse(target interface{}, res *http.Response) error {
83 if res.StatusCode == http.StatusNoContent {
84 return nil
85 }
86 return json.NewDecoder(res.Body).Decode(target)
87}
diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go
new file mode 100644
index 0000000..8cdb03b
--- /dev/null
+++ b/vendor/google.golang.org/api/googleapi/googleapi.go
@@ -0,0 +1,429 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package googleapi contains the common code shared by all Google API
6// libraries.
7package googleapi // import "google.golang.org/api/googleapi"
8
9import (
10 "bytes"
11 "encoding/json"
12 "fmt"
13 "io"
14 "io/ioutil"
15 "net/http"
16 "net/url"
17 "strings"
18
19 "google.golang.org/api/googleapi/internal/uritemplates"
20)
21
22// ContentTyper is an interface for Readers which know (or would like
23// to override) their Content-Type. If a media body doesn't implement
24// ContentTyper, the type is sniffed from the content using
25// http.DetectContentType.
26type ContentTyper interface {
27 ContentType() string
28}
29
30// A SizeReaderAt is a ReaderAt with a Size method.
31// An io.SectionReader implements SizeReaderAt.
32type SizeReaderAt interface {
33 io.ReaderAt
34 Size() int64
35}
36
37// ServerResponse is embedded in each Do response and
38// provides the HTTP status code and header sent by the server.
39type ServerResponse struct {
40 // HTTPStatusCode is the server's response status code. When using a
41 // resource method's Do call, this will always be in the 2xx range.
42 HTTPStatusCode int
43 // Header contains the response header fields from the server.
44 Header http.Header
45}
46
47const (
48 // Version defines the gax version being used. This is typically sent
49 // in an HTTP header to services.
50 Version = "0.5"
51
52 // UserAgent is the header string used to identify this package.
53 UserAgent = "google-api-go-client/" + Version
54
55 // DefaultUploadChunkSize is the default chunk size to use for resumable
56 // uploads if not specified by the user.
57 DefaultUploadChunkSize = 8 * 1024 * 1024
58
59 // MinUploadChunkSize is the minimum chunk size that can be used for
60 // resumable uploads. All user-specified chunk sizes must be multiple of
61 // this value.
62 MinUploadChunkSize = 256 * 1024
63)
64
65// Error contains an error response from the server.
66type Error struct {
67 // Code is the HTTP response status code and will always be populated.
68 Code int `json:"code"`
69 // Message is the server response message and is only populated when
70 // explicitly referenced by the JSON server response.
71 Message string `json:"message"`
72 // Body is the raw response returned by the server.
73 // It is often but not always JSON, depending on how the request fails.
74 Body string
75 // Header contains the response header fields from the server.
76 Header http.Header
77
78 Errors []ErrorItem
79}
80
81// ErrorItem is a detailed error code & message from the Google API frontend.
82type ErrorItem struct {
83 // Reason is the typed error code. For example: "some_example".
84 Reason string `json:"reason"`
85 // Message is the human-readable description of the error.
86 Message string `json:"message"`
87}
88
89func (e *Error) Error() string {
90 if len(e.Errors) == 0 && e.Message == "" {
91 return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body)
92 }
93 var buf bytes.Buffer
94 fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code)
95 if e.Message != "" {
96 fmt.Fprintf(&buf, "%s", e.Message)
97 }
98 if len(e.Errors) == 0 {
99 return strings.TrimSpace(buf.String())
100 }
101 if len(e.Errors) == 1 && e.Errors[0].Message == e.Message {
102 fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason)
103 return buf.String()
104 }
105 fmt.Fprintln(&buf, "\nMore details:")
106 for _, v := range e.Errors {
107 fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message)
108 }
109 return buf.String()
110}
111
112type errorReply struct {
113 Error *Error `json:"error"`
114}
115
116// CheckResponse returns an error (of type *Error) if the response
117// status code is not 2xx.
118func CheckResponse(res *http.Response) error {
119 if res.StatusCode >= 200 && res.StatusCode <= 299 {
120 return nil
121 }
122 slurp, err := ioutil.ReadAll(res.Body)
123 if err == nil {
124 jerr := new(errorReply)
125 err = json.Unmarshal(slurp, jerr)
126 if err == nil && jerr.Error != nil {
127 if jerr.Error.Code == 0 {
128 jerr.Error.Code = res.StatusCode
129 }
130 jerr.Error.Body = string(slurp)
131 return jerr.Error
132 }
133 }
134 return &Error{
135 Code: res.StatusCode,
136 Body: string(slurp),
137 Header: res.Header,
138 }
139}
140
141// IsNotModified reports whether err is the result of the
142// server replying with http.StatusNotModified.
143// Such error values are sometimes returned by "Do" methods
144// on calls when If-None-Match is used.
145func IsNotModified(err error) bool {
146 if err == nil {
147 return false
148 }
149 ae, ok := err.(*Error)
150 return ok && ae.Code == http.StatusNotModified
151}
152
153// CheckMediaResponse returns an error (of type *Error) if the response
154// status code is not 2xx. Unlike CheckResponse it does not assume the
155// body is a JSON error document.
156// It is the caller's responsibility to close res.Body.
157func CheckMediaResponse(res *http.Response) error {
158 if res.StatusCode >= 200 && res.StatusCode <= 299 {
159 return nil
160 }
161 slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
162 return &Error{
163 Code: res.StatusCode,
164 Body: string(slurp),
165 }
166}
167
168// MarshalStyle defines whether to marshal JSON with a {"data": ...} wrapper.
169type MarshalStyle bool
170
171// WithDataWrapper marshals JSON with a {"data": ...} wrapper.
172var WithDataWrapper = MarshalStyle(true)
173
174// WithoutDataWrapper marshals JSON without a {"data": ...} wrapper.
175var WithoutDataWrapper = MarshalStyle(false)
176
177func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
178 buf := new(bytes.Buffer)
179 if wrap {
180 buf.Write([]byte(`{"data": `))
181 }
182 err := json.NewEncoder(buf).Encode(v)
183 if err != nil {
184 return nil, err
185 }
186 if wrap {
187 buf.Write([]byte(`}`))
188 }
189 return buf, nil
190}
191
192// endingWithErrorReader from r until it returns an error. If the
193// final error from r is io.EOF and e is non-nil, e is used instead.
194type endingWithErrorReader struct {
195 r io.Reader
196 e error
197}
198
199func (er endingWithErrorReader) Read(p []byte) (n int, err error) {
200 n, err = er.r.Read(p)
201 if err == io.EOF && er.e != nil {
202 err = er.e
203 }
204 return
205}
206
207// countingWriter counts the number of bytes it receives to write, but
208// discards them.
209type countingWriter struct {
210 n *int64
211}
212
213func (w countingWriter) Write(p []byte) (int, error) {
214 *w.n += int64(len(p))
215 return len(p), nil
216}
217
218// ProgressUpdater is a function that is called upon every progress update of a resumable upload.
219// This is the only part of a resumable upload (from googleapi) that is usable by the developer.
220// The remaining usable pieces of resumable uploads is exposed in each auto-generated API.
221type ProgressUpdater func(current, total int64)
222
223// MediaOption defines the interface for setting media options.
224type MediaOption interface {
225 setOptions(o *MediaOptions)
226}
227
228type contentTypeOption string
229
230func (ct contentTypeOption) setOptions(o *MediaOptions) {
231 o.ContentType = string(ct)
232 if o.ContentType == "" {
233 o.ForceEmptyContentType = true
234 }
235}
236
237// ContentType returns a MediaOption which sets the Content-Type header for media uploads.
238// If ctype is empty, the Content-Type header will be omitted.
239func ContentType(ctype string) MediaOption {
240 return contentTypeOption(ctype)
241}
242
243type chunkSizeOption int
244
245func (cs chunkSizeOption) setOptions(o *MediaOptions) {
246 size := int(cs)
247 if size%MinUploadChunkSize != 0 {
248 size += MinUploadChunkSize - (size % MinUploadChunkSize)
249 }
250 o.ChunkSize = size
251}
252
253// ChunkSize returns a MediaOption which sets the chunk size for media uploads.
254// size will be rounded up to the nearest multiple of 256K.
255// Media which contains fewer than size bytes will be uploaded in a single request.
256// Media which contains size bytes or more will be uploaded in separate chunks.
257// If size is zero, media will be uploaded in a single request.
258func ChunkSize(size int) MediaOption {
259 return chunkSizeOption(size)
260}
261
262// MediaOptions stores options for customizing media upload. It is not used by developers directly.
263type MediaOptions struct {
264 ContentType string
265 ForceEmptyContentType bool
266
267 ChunkSize int
268}
269
270// ProcessMediaOptions stores options from opts in a MediaOptions.
271// It is not used by developers directly.
272func ProcessMediaOptions(opts []MediaOption) *MediaOptions {
273 mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize}
274 for _, o := range opts {
275 o.setOptions(mo)
276 }
277 return mo
278}
279
280// ResolveRelative resolves relatives such as "http://www.golang.org/" and
281// "topics/myproject/mytopic" into a single string, such as
282// "http://www.golang.org/topics/myproject/mytopic". It strips all parent
283// references (e.g. ../..) as well as anything after the host
284// (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz).
285func ResolveRelative(basestr, relstr string) string {
286 u, _ := url.Parse(basestr)
287 afterColonPath := ""
288 if i := strings.IndexRune(relstr, ':'); i > 0 {
289 afterColonPath = relstr[i+1:]
290 relstr = relstr[:i]
291 }
292 rel, _ := url.Parse(relstr)
293 u = u.ResolveReference(rel)
294 us := u.String()
295 if afterColonPath != "" {
296 us = fmt.Sprintf("%s:%s", us, afterColonPath)
297 }
298 us = strings.Replace(us, "%7B", "{", -1)
299 us = strings.Replace(us, "%7D", "}", -1)
300 us = strings.Replace(us, "%2A", "*", -1)
301 return us
302}
303
304// Expand subsitutes any {encoded} strings in the URL passed in using
305// the map supplied.
306//
307// This calls SetOpaque to avoid encoding of the parameters in the URL path.
308func Expand(u *url.URL, expansions map[string]string) {
309 escaped, unescaped, err := uritemplates.Expand(u.Path, expansions)
310 if err == nil {
311 u.Path = unescaped
312 u.RawPath = escaped
313 }
314}
315
316// CloseBody is used to close res.Body.
317// Prior to calling Close, it also tries to Read a small amount to see an EOF.
318// Not seeing an EOF can prevent HTTP Transports from reusing connections.
319func CloseBody(res *http.Response) {
320 if res == nil || res.Body == nil {
321 return
322 }
323 // Justification for 3 byte reads: two for up to "\r\n" after
324 // a JSON/XML document, and then 1 to see EOF if we haven't yet.
325 // TODO(bradfitz): detect Go 1.3+ and skip these reads.
326 // See https://codereview.appspot.com/58240043
327 // and https://codereview.appspot.com/49570044
328 buf := make([]byte, 1)
329 for i := 0; i < 3; i++ {
330 _, err := res.Body.Read(buf)
331 if err != nil {
332 break
333 }
334 }
335 res.Body.Close()
336
337}
338
339// VariantType returns the type name of the given variant.
340// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned.
341// This is used to support "variant" APIs that can return one of a number of different types.
342func VariantType(t map[string]interface{}) string {
343 s, _ := t["type"].(string)
344 return s
345}
346
347// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'.
348// This is used to support "variant" APIs that can return one of a number of different types.
349// It reports whether the conversion was successful.
350func ConvertVariant(v map[string]interface{}, dst interface{}) bool {
351 var buf bytes.Buffer
352 err := json.NewEncoder(&buf).Encode(v)
353 if err != nil {
354 return false
355 }
356 return json.Unmarshal(buf.Bytes(), dst) == nil
357}
358
359// A Field names a field to be retrieved with a partial response.
360// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
361//
362// Partial responses can dramatically reduce the amount of data that must be sent to your application.
363// In order to request partial responses, you can specify the full list of fields
364// that your application needs by adding the Fields option to your request.
365//
366// Field strings use camelCase with leading lower-case characters to identify fields within the response.
367//
368// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields,
369// you could request just those fields like this:
370//
371// svc.Events.List().Fields("nextPageToken", "items/id").Do()
372//
373// or if you were also interested in each Item's "Updated" field, you can combine them like this:
374//
375// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do()
376//
377// More information about field formatting can be found here:
378// https://developers.google.com/+/api/#fields-syntax
379//
380// Another way to find field names is through the Google API explorer:
381// https://developers.google.com/apis-explorer/#p/
382type Field string
383
384// CombineFields combines fields into a single string.
385func CombineFields(s []Field) string {
386 r := make([]string, len(s))
387 for i, v := range s {
388 r[i] = string(v)
389 }
390 return strings.Join(r, ",")
391}
392
393// A CallOption is an optional argument to an API call.
394// It should be treated as an opaque value by users of Google APIs.
395//
396// A CallOption is something that configures an API call in a way that is
397// not specific to that API; for instance, controlling the quota user for
398// an API call is common across many APIs, and is thus a CallOption.
399type CallOption interface {
400 Get() (key, value string)
401}
402
403// QuotaUser returns a CallOption that will set the quota user for a call.
404// The quota user can be used by server-side applications to control accounting.
405// It can be an arbitrary string up to 40 characters, and will override UserIP
406// if both are provided.
407func QuotaUser(u string) CallOption { return quotaUser(u) }
408
409type quotaUser string
410
411func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) }
412
413// UserIP returns a CallOption that will set the "userIp" parameter of a call.
414// This should be the IP address of the originating request.
415func UserIP(ip string) CallOption { return userIP(ip) }
416
417type userIP string
418
419func (i userIP) Get() (string, string) { return "userIp", string(i) }
420
421// Trace returns a CallOption that enables diagnostic tracing for a call.
422// traceToken is an ID supplied by Google support.
423func Trace(traceToken string) CallOption { return traceTok(traceToken) }
424
425type traceTok string
426
427func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) }
428
429// TODO: Fields too
diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
new file mode 100644
index 0000000..de9c88c
--- /dev/null
+++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
@@ -0,0 +1,18 @@
1Copyright (c) 2013 Joshua Tacoma
2
3Permission is hereby granted, free of charge, to any person obtaining a copy of
4this software and associated documentation files (the "Software"), to deal in
5the Software without restriction, including without limitation the rights to
6use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
7the Software, and to permit persons to whom the Software is furnished to do so,
8subject to the following conditions:
9
10The above copyright notice and this permission notice shall be included in all
11copies or substantial portions of the Software.
12
13THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
15FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
16COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
17IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
new file mode 100644
index 0000000..63bf053
--- /dev/null
+++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
@@ -0,0 +1,248 @@
1// Copyright 2013 Joshua Tacoma. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package uritemplates is a level 3 implementation of RFC 6570 (URI
6// Template, http://tools.ietf.org/html/rfc6570).
7// uritemplates does not support composite values (in Go: slices or maps)
8// and so does not qualify as a level 4 implementation.
9package uritemplates
10
11import (
12 "bytes"
13 "errors"
14 "regexp"
15 "strconv"
16 "strings"
17)
18
19var (
20 unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
21 reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
22 validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
23 hex = []byte("0123456789ABCDEF")
24)
25
26func pctEncode(src []byte) []byte {
27 dst := make([]byte, len(src)*3)
28 for i, b := range src {
29 buf := dst[i*3 : i*3+3]
30 buf[0] = 0x25
31 buf[1] = hex[b/16]
32 buf[2] = hex[b%16]
33 }
34 return dst
35}
36
37// pairWriter is a convenience struct which allows escaped and unescaped
38// versions of the template to be written in parallel.
39type pairWriter struct {
40 escaped, unescaped bytes.Buffer
41}
42
43// Write writes the provided string directly without any escaping.
44func (w *pairWriter) Write(s string) {
45 w.escaped.WriteString(s)
46 w.unescaped.WriteString(s)
47}
48
49// Escape writes the provided string, escaping the string for the
50// escaped output.
51func (w *pairWriter) Escape(s string, allowReserved bool) {
52 w.unescaped.WriteString(s)
53 if allowReserved {
54 w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode))
55 } else {
56 w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
57 }
58}
59
60// Escaped returns the escaped string.
61func (w *pairWriter) Escaped() string {
62 return w.escaped.String()
63}
64
65// Unescaped returns the unescaped string.
66func (w *pairWriter) Unescaped() string {
67 return w.unescaped.String()
68}
69
70// A uriTemplate is a parsed representation of a URI template.
71type uriTemplate struct {
72 raw string
73 parts []templatePart
74}
75
76// parse parses a URI template string into a uriTemplate object.
77func parse(rawTemplate string) (*uriTemplate, error) {
78 split := strings.Split(rawTemplate, "{")
79 parts := make([]templatePart, len(split)*2-1)
80 for i, s := range split {
81 if i == 0 {
82 if strings.Contains(s, "}") {
83 return nil, errors.New("unexpected }")
84 }
85 parts[i].raw = s
86 continue
87 }
88 subsplit := strings.Split(s, "}")
89 if len(subsplit) != 2 {
90 return nil, errors.New("malformed template")
91 }
92 expression := subsplit[0]
93 var err error
94 parts[i*2-1], err = parseExpression(expression)
95 if err != nil {
96 return nil, err
97 }
98 parts[i*2].raw = subsplit[1]
99 }
100 return &uriTemplate{
101 raw: rawTemplate,
102 parts: parts,
103 }, nil
104}
105
106type templatePart struct {
107 raw string
108 terms []templateTerm
109 first string
110 sep string
111 named bool
112 ifemp string
113 allowReserved bool
114}
115
116type templateTerm struct {
117 name string
118 explode bool
119 truncate int
120}
121
122func parseExpression(expression string) (result templatePart, err error) {
123 switch expression[0] {
124 case '+':
125 result.sep = ","
126 result.allowReserved = true
127 expression = expression[1:]
128 case '.':
129 result.first = "."
130 result.sep = "."
131 expression = expression[1:]
132 case '/':
133 result.first = "/"
134 result.sep = "/"
135 expression = expression[1:]
136 case ';':
137 result.first = ";"
138 result.sep = ";"
139 result.named = true
140 expression = expression[1:]
141 case '?':
142 result.first = "?"
143 result.sep = "&"
144 result.named = true
145 result.ifemp = "="
146 expression = expression[1:]
147 case '&':
148 result.first = "&"
149 result.sep = "&"
150 result.named = true
151 result.ifemp = "="
152 expression = expression[1:]
153 case '#':
154 result.first = "#"
155 result.sep = ","
156 result.allowReserved = true
157 expression = expression[1:]
158 default:
159 result.sep = ","
160 }
161 rawterms := strings.Split(expression, ",")
162 result.terms = make([]templateTerm, len(rawterms))
163 for i, raw := range rawterms {
164 result.terms[i], err = parseTerm(raw)
165 if err != nil {
166 break
167 }
168 }
169 return result, err
170}
171
172func parseTerm(term string) (result templateTerm, err error) {
173 // TODO(djd): Remove "*" suffix parsing once we check that no APIs have
174 // mistakenly used that attribute.
175 if strings.HasSuffix(term, "*") {
176 result.explode = true
177 term = term[:len(term)-1]
178 }
179 split := strings.Split(term, ":")
180 if len(split) == 1 {
181 result.name = term
182 } else if len(split) == 2 {
183 result.name = split[0]
184 var parsed int64
185 parsed, err = strconv.ParseInt(split[1], 10, 0)
186 result.truncate = int(parsed)
187 } else {
188 err = errors.New("multiple colons in same term")
189 }
190 if !validname.MatchString(result.name) {
191 err = errors.New("not a valid name: " + result.name)
192 }
193 if result.explode && result.truncate > 0 {
194 err = errors.New("both explode and prefix modifers on same term")
195 }
196 return result, err
197}
198
199// Expand expands a URI template with a set of values to produce the
200// resultant URI. Two forms of the result are returned: one with all the
201// elements escaped, and one with the elements unescaped.
202func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) {
203 var w pairWriter
204 for _, p := range t.parts {
205 p.expand(&w, values)
206 }
207 return w.Escaped(), w.Unescaped()
208}
209
210func (tp *templatePart) expand(w *pairWriter, values map[string]string) {
211 if len(tp.raw) > 0 {
212 w.Write(tp.raw)
213 return
214 }
215 var first = true
216 for _, term := range tp.terms {
217 value, exists := values[term.name]
218 if !exists {
219 continue
220 }
221 if first {
222 w.Write(tp.first)
223 first = false
224 } else {
225 w.Write(tp.sep)
226 }
227 tp.expandString(w, term, value)
228 }
229}
230
231func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) {
232 if tp.named {
233 w.Write(name)
234 if empty {
235 w.Write(tp.ifemp)
236 } else {
237 w.Write("=")
238 }
239 }
240}
241
242func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) {
243 if len(s) > t.truncate && t.truncate > 0 {
244 s = s[:t.truncate]
245 }
246 tp.expandName(w, t.name, len(s) == 0)
247 w.Escape(s, tp.allowReserved)
248}
diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go
new file mode 100644
index 0000000..2e70b81
--- /dev/null
+++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go
@@ -0,0 +1,17 @@
1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package uritemplates
6
7// Expand parses then expands a URI template with a set of values to produce
8// the resultant URI. Two forms of the result are returned: one with all the
9// elements escaped, and one with the elements unescaped.
10func Expand(path string, values map[string]string) (escaped, unescaped string, err error) {
11 template, err := parse(path)
12 if err != nil {
13 return "", "", err
14 }
15 escaped, unescaped = template.Expand(values)
16 return escaped, unescaped, nil
17}
diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go
new file mode 100644
index 0000000..eca1ea2
--- /dev/null
+++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go
@@ -0,0 +1,38 @@
1// Copyright 2012 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package transport contains HTTP transports used to make
6// authenticated API requests.
7package transport
8
9import (
10 "errors"
11 "net/http"
12)
13
14// APIKey is an HTTP Transport which wraps an underlying transport and
15// appends an API Key "key" parameter to the URL of outgoing requests.
16type APIKey struct {
17 // Key is the API Key to set on requests.
18 Key string
19
20 // Transport is the underlying HTTP transport.
21 // If nil, http.DefaultTransport is used.
22 Transport http.RoundTripper
23}
24
25func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
26 rt := t.Transport
27 if rt == nil {
28 rt = http.DefaultTransport
29 if rt == nil {
30 return nil, errors.New("googleapi/transport: no Transport specified or available")
31 }
32 }
33 newReq := *req
34 args := newReq.URL.Query()
35 args.Set("key", t.Key)
36 newReq.URL.RawQuery = args.Encode()
37 return rt.RoundTrip(&newReq)
38}
diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go
new file mode 100644
index 0000000..a280e30
--- /dev/null
+++ b/vendor/google.golang.org/api/googleapi/types.go
@@ -0,0 +1,202 @@
1// Copyright 2013 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package googleapi
6
7import (
8 "encoding/json"
9 "errors"
10 "strconv"
11)
12
13// Int64s is a slice of int64s that marshal as quoted strings in JSON.
14type Int64s []int64
15
16func (q *Int64s) UnmarshalJSON(raw []byte) error {
17 *q = (*q)[:0]
18 var ss []string
19 if err := json.Unmarshal(raw, &ss); err != nil {
20 return err
21 }
22 for _, s := range ss {
23 v, err := strconv.ParseInt(s, 10, 64)
24 if err != nil {
25 return err
26 }
27 *q = append(*q, int64(v))
28 }
29 return nil
30}
31
32// Int32s is a slice of int32s that marshal as quoted strings in JSON.
33type Int32s []int32
34
35func (q *Int32s) UnmarshalJSON(raw []byte) error {
36 *q = (*q)[:0]
37 var ss []string
38 if err := json.Unmarshal(raw, &ss); err != nil {
39 return err
40 }
41 for _, s := range ss {
42 v, err := strconv.ParseInt(s, 10, 32)
43 if err != nil {
44 return err
45 }
46 *q = append(*q, int32(v))
47 }
48 return nil
49}
50
51// Uint64s is a slice of uint64s that marshal as quoted strings in JSON.
52type Uint64s []uint64
53
54func (q *Uint64s) UnmarshalJSON(raw []byte) error {
55 *q = (*q)[:0]
56 var ss []string
57 if err := json.Unmarshal(raw, &ss); err != nil {
58 return err
59 }
60 for _, s := range ss {
61 v, err := strconv.ParseUint(s, 10, 64)
62 if err != nil {
63 return err
64 }
65 *q = append(*q, uint64(v))
66 }
67 return nil
68}
69
70// Uint32s is a slice of uint32s that marshal as quoted strings in JSON.
71type Uint32s []uint32
72
73func (q *Uint32s) UnmarshalJSON(raw []byte) error {
74 *q = (*q)[:0]
75 var ss []string
76 if err := json.Unmarshal(raw, &ss); err != nil {
77 return err
78 }
79 for _, s := range ss {
80 v, err := strconv.ParseUint(s, 10, 32)
81 if err != nil {
82 return err
83 }
84 *q = append(*q, uint32(v))
85 }
86 return nil
87}
88
89// Float64s is a slice of float64s that marshal as quoted strings in JSON.
90type Float64s []float64
91
92func (q *Float64s) UnmarshalJSON(raw []byte) error {
93 *q = (*q)[:0]
94 var ss []string
95 if err := json.Unmarshal(raw, &ss); err != nil {
96 return err
97 }
98 for _, s := range ss {
99 v, err := strconv.ParseFloat(s, 64)
100 if err != nil {
101 return err
102 }
103 *q = append(*q, float64(v))
104 }
105 return nil
106}
107
108func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) {
109 dst := make([]byte, 0, 2+n*10) // somewhat arbitrary
110 dst = append(dst, '[')
111 for i := 0; i < n; i++ {
112 if i > 0 {
113 dst = append(dst, ',')
114 }
115 dst = append(dst, '"')
116 dst = fn(dst, i)
117 dst = append(dst, '"')
118 }
119 dst = append(dst, ']')
120 return dst, nil
121}
122
123func (q Int64s) MarshalJSON() ([]byte, error) {
124 return quotedList(len(q), func(dst []byte, i int) []byte {
125 return strconv.AppendInt(dst, q[i], 10)
126 })
127}
128
129func (q Int32s) MarshalJSON() ([]byte, error) {
130 return quotedList(len(q), func(dst []byte, i int) []byte {
131 return strconv.AppendInt(dst, int64(q[i]), 10)
132 })
133}
134
135func (q Uint64s) MarshalJSON() ([]byte, error) {
136 return quotedList(len(q), func(dst []byte, i int) []byte {
137 return strconv.AppendUint(dst, q[i], 10)
138 })
139}
140
141func (q Uint32s) MarshalJSON() ([]byte, error) {
142 return quotedList(len(q), func(dst []byte, i int) []byte {
143 return strconv.AppendUint(dst, uint64(q[i]), 10)
144 })
145}
146
147func (q Float64s) MarshalJSON() ([]byte, error) {
148 return quotedList(len(q), func(dst []byte, i int) []byte {
149 return strconv.AppendFloat(dst, q[i], 'g', -1, 64)
150 })
151}
152
153// RawMessage is a raw encoded JSON value.
154// It is identical to json.RawMessage, except it does not suffer from
155// https://golang.org/issue/14493.
156type RawMessage []byte
157
158// MarshalJSON returns m.
159func (m RawMessage) MarshalJSON() ([]byte, error) {
160 return m, nil
161}
162
163// UnmarshalJSON sets *m to a copy of data.
164func (m *RawMessage) UnmarshalJSON(data []byte) error {
165 if m == nil {
166 return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer")
167 }
168 *m = append((*m)[:0], data...)
169 return nil
170}
171
172/*
173 * Helper routines for simplifying the creation of optional fields of basic type.
174 */
175
176// Bool is a helper routine that allocates a new bool value
177// to store v and returns a pointer to it.
178func Bool(v bool) *bool { return &v }
179
180// Int32 is a helper routine that allocates a new int32 value
181// to store v and returns a pointer to it.
182func Int32(v int32) *int32 { return &v }
183
184// Int64 is a helper routine that allocates a new int64 value
185// to store v and returns a pointer to it.
186func Int64(v int64) *int64 { return &v }
187
188// Float64 is a helper routine that allocates a new float64 value
189// to store v and returns a pointer to it.
190func Float64(v float64) *float64 { return &v }
191
192// Uint32 is a helper routine that allocates a new uint32 value
193// to store v and returns a pointer to it.
194func Uint32(v uint32) *uint32 { return &v }
195
196// Uint64 is a helper routine that allocates a new uint64 value
197// to store v and returns a pointer to it.
198func Uint64(v uint64) *uint64 { return &v }
199
200// String is a helper routine that allocates a new string value
201// to store v and returns a pointer to it.
202func String(v string) *string { return &v }
diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go
new file mode 100644
index 0000000..e5b849b
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/creds.go
@@ -0,0 +1,45 @@
1// Copyright 2017 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package internal
16
17import (
18 "context"
19 "fmt"
20 "io/ioutil"
21
22 "golang.org/x/oauth2/google"
23)
24
25// Creds returns credential information obtained from DialSettings, or if none, then
26// it returns default credential information.
27func Creds(ctx context.Context, ds *DialSettings) (*google.DefaultCredentials, error) {
28 if ds.Credentials != nil {
29 return ds.Credentials, nil
30 }
31 if ds.CredentialsJSON != nil {
32 return google.CredentialsFromJSON(ctx, ds.CredentialsJSON, ds.Scopes...)
33 }
34 if ds.CredentialsFile != "" {
35 data, err := ioutil.ReadFile(ds.CredentialsFile)
36 if err != nil {
37 return nil, fmt.Errorf("cannot read credentials file: %v", err)
38 }
39 return google.CredentialsFromJSON(ctx, data, ds.Scopes...)
40 }
41 if ds.TokenSource != nil {
42 return &google.DefaultCredentials{TokenSource: ds.TokenSource}, nil
43 }
44 return google.FindDefaultCredentials(ctx, ds.Scopes...)
45}
diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go
new file mode 100644
index 0000000..ba40624
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/pool.go
@@ -0,0 +1,61 @@
1// Copyright 2016 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package internal
16
17import (
18 "errors"
19
20 "google.golang.org/grpc/naming"
21)
22
23// PoolResolver provides a fixed list of addresses to load balance between
24// and does not provide further updates.
25type PoolResolver struct {
26 poolSize int
27 dialOpt *DialSettings
28 ch chan []*naming.Update
29}
30
31// NewPoolResolver returns a PoolResolver
32// This is an EXPERIMENTAL API and may be changed or removed in the future.
33func NewPoolResolver(size int, o *DialSettings) *PoolResolver {
34 return &PoolResolver{poolSize: size, dialOpt: o}
35}
36
37// Resolve returns a Watcher for the endpoint defined by the DialSettings
38// provided to NewPoolResolver.
39func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) {
40 if r.dialOpt.Endpoint == "" {
41 return nil, errors.New("No endpoint configured")
42 }
43 addrs := make([]*naming.Update, 0, r.poolSize)
44 for i := 0; i < r.poolSize; i++ {
45 addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i})
46 }
47 r.ch = make(chan []*naming.Update, 1)
48 r.ch <- addrs
49 return r, nil
50}
51
52// Next returns a static list of updates on the first call,
53// and blocks indefinitely until Close is called on subsequent calls.
54func (r *PoolResolver) Next() ([]*naming.Update, error) {
55 return <-r.ch, nil
56}
57
58// Close releases resources associated with the pool and causes Next to unblock.
59func (r *PoolResolver) Close() {
60 close(r.ch)
61}
diff --git a/vendor/google.golang.org/api/internal/service-account.json b/vendor/google.golang.org/api/internal/service-account.json
new file mode 100644
index 0000000..2cb54c2
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/service-account.json
@@ -0,0 +1,12 @@
1{
2 "type": "service_account",
3 "project_id": "project_id",
4 "private_key_id": "private_key_id",
5 "private_key": "private_key",
6 "client_email": "xyz@developer.gserviceaccount.com",
7 "client_id": "123",
8 "auth_uri": "https://accounts.google.com/o/oauth2/auth",
9 "token_uri": "https://accounts.google.com/o/oauth2/token",
10 "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
11 "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com"
12}
diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
new file mode 100644
index 0000000..afabdc4
--- /dev/null
+++ b/vendor/google.golang.org/api/internal/settings.go
@@ -0,0 +1,81 @@
1// Copyright 2017 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// Package internal supports the options and transport packages.
16package internal
17
18import (
19 "errors"
20 "net/http"
21
22 "golang.org/x/oauth2"
23 "golang.org/x/oauth2/google"
24 "google.golang.org/grpc"
25)
26
27// DialSettings holds information needed to establish a connection with a
28// Google API service.
29type DialSettings struct {
30 Endpoint string
31 Scopes []string
32 TokenSource oauth2.TokenSource
33 Credentials *google.DefaultCredentials
34 CredentialsFile string // if set, Token Source is ignored.
35 CredentialsJSON []byte
36 UserAgent string
37 APIKey string
38 HTTPClient *http.Client
39 GRPCDialOpts []grpc.DialOption
40 GRPCConn *grpc.ClientConn
41 NoAuth bool
42}
43
44// Validate reports an error if ds is invalid.
45func (ds *DialSettings) Validate() error {
46 hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil
47 if ds.NoAuth && hasCreds {
48 return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials")
49 }
50 // Credentials should not appear with other options.
51 // We currently allow TokenSource and CredentialsFile to coexist.
52 // TODO(jba): make TokenSource & CredentialsFile an error (breaking change).
53 nCreds := 0
54 if ds.Credentials != nil {
55 nCreds++
56 }
57 if ds.CredentialsJSON != nil {
58 nCreds++
59 }
60 if ds.CredentialsFile != "" {
61 nCreds++
62 }
63 if ds.APIKey != "" {
64 nCreds++
65 }
66 if ds.TokenSource != nil {
67 nCreds++
68 }
69 // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility.
70 if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") {
71 return errors.New("multiple credential options provided")
72 }
73 if ds.HTTPClient != nil && ds.GRPCConn != nil {
74 return errors.New("WithHTTPClient is incompatible with WithGRPCConn")
75 }
76 if ds.HTTPClient != nil && ds.GRPCDialOpts != nil {
77 return errors.New("WithHTTPClient is incompatible with gRPC dial options")
78 }
79
80 return nil
81}
diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go
new file mode 100644
index 0000000..3c8ea77
--- /dev/null
+++ b/vendor/google.golang.org/api/iterator/iterator.go
@@ -0,0 +1,231 @@
1// Copyright 2016 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// Package iterator provides support for standard Google API iterators.
16// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines.
17package iterator
18
19import (
20 "errors"
21 "fmt"
22 "reflect"
23)
24
25// Done is returned by an iterator's Next method when the iteration is
26// complete; when there are no more items to return.
27var Done = errors.New("no more items in iterator")
28
29// We don't support mixed calls to Next and NextPage because they play
30// with the paging state in incompatible ways.
31var errMixed = errors.New("iterator: Next and NextPage called on same iterator")
32
33// PageInfo contains information about an iterator's paging state.
34type PageInfo struct {
35 // Token is the token used to retrieve the next page of items from the
36 // API. You may set Token immediately after creating an iterator to
37 // begin iteration at a particular point. If Token is the empty string,
38 // the iterator will begin with the first eligible item.
39 //
40 // The result of setting Token after the first call to Next is undefined.
41 //
42 // After the underlying API method is called to retrieve a page of items,
43 // Token is set to the next-page token in the response.
44 Token string
45
46 // MaxSize is the maximum number of items returned by a call to the API.
47 // Set MaxSize as a hint to optimize the buffering behavior of the iterator.
48 // If zero, the page size is determined by the underlying service.
49 //
50 // Use Pager to retrieve a page of a specific, exact size.
51 MaxSize int
52
53 // The error state of the iterator. Manipulated by PageInfo.next and Pager.
54 // This is a latch: it starts as nil, and once set should never change.
55 err error
56
57 // If true, no more calls to fetch should be made. Set to true when fetch
58 // returns an empty page token. The iterator is Done when this is true AND
59 // the buffer is empty.
60 atEnd bool
61
62 // Function that fetches a page from the underlying service. It should pass
63 // the pageSize and pageToken arguments to the service, fill the buffer
64 // with the results from the call, and return the next-page token returned
65 // by the service. The function must not remove any existing items from the
66 // buffer. If the underlying RPC takes an int32 page size, pageSize should
67 // be silently truncated.
68 fetch func(pageSize int, pageToken string) (nextPageToken string, err error)
69
70 // Function that returns the number of currently buffered items.
71 bufLen func() int
72
73 // Function that returns the buffer, after setting the buffer variable to nil.
74 takeBuf func() interface{}
75
76 // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check
77 // for calls to both Next and NextPage with the same iterator.
78 nextCalled, nextPageCalled bool
79}
80
81// NewPageInfo exposes internals for iterator implementations.
82// It is not a stable interface.
83var NewPageInfo = newPageInfo
84
85// If an iterator can support paging, its iterator-creating method should call
86// this (via the NewPageInfo variable above).
87//
88// The fetch, bufLen and takeBuf arguments provide access to the
89// iterator's internal slice of buffered items. They behave as described in
90// PageInfo, above.
91//
92// The return value is the PageInfo.next method bound to the returned PageInfo value.
93// (Returning it avoids exporting PageInfo.next.)
94func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) {
95 pi := &PageInfo{
96 fetch: fetch,
97 bufLen: bufLen,
98 takeBuf: takeBuf,
99 }
100 return pi, pi.next
101}
102
103// Remaining returns the number of items available before the iterator makes another API call.
104func (pi *PageInfo) Remaining() int { return pi.bufLen() }
105
106// next provides support for an iterator's Next function. An iterator's Next
107// should return the error returned by next if non-nil; else it can assume
108// there is at least one item in its buffer, and it should return that item and
109// remove it from the buffer.
110func (pi *PageInfo) next() error {
111 pi.nextCalled = true
112 if pi.err != nil { // Once we get an error, always return it.
113 // TODO(jba): fix so users can retry on transient errors? Probably not worth it.
114 return pi.err
115 }
116 if pi.nextPageCalled {
117 pi.err = errMixed
118 return pi.err
119 }
120 // Loop until we get some items or reach the end.
121 for pi.bufLen() == 0 && !pi.atEnd {
122 if err := pi.fill(pi.MaxSize); err != nil {
123 pi.err = err
124 return pi.err
125 }
126 if pi.Token == "" {
127 pi.atEnd = true
128 }
129 }
130 // Either the buffer is non-empty or pi.atEnd is true (or both).
131 if pi.bufLen() == 0 {
132 // The buffer is empty and pi.atEnd is true, i.e. the service has no
133 // more items.
134 pi.err = Done
135 }
136 return pi.err
137}
138
139// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the
140// next-page token returned by the call.
141// If fill returns a non-nil error, the buffer will be empty.
142func (pi *PageInfo) fill(size int) error {
143 tok, err := pi.fetch(size, pi.Token)
144 if err != nil {
145 pi.takeBuf() // clear the buffer
146 return err
147 }
148 pi.Token = tok
149 return nil
150}
151
152// Pageable is implemented by iterators that support paging.
153type Pageable interface {
154 // PageInfo returns paging information associated with the iterator.
155 PageInfo() *PageInfo
156}
157
158// Pager supports retrieving iterator items a page at a time.
159type Pager struct {
160 pageInfo *PageInfo
161 pageSize int
162}
163
164// NewPager returns a pager that uses iter. Calls to its NextPage method will
165// obtain exactly pageSize items, unless fewer remain. The pageToken argument
166// indicates where to start the iteration. Pass the empty string to start at
167// the beginning, or pass a token retrieved from a call to Pager.NextPage.
168//
169// If you use an iterator with a Pager, you must not call Next on the iterator.
170func NewPager(iter Pageable, pageSize int, pageToken string) *Pager {
171 p := &Pager{
172 pageInfo: iter.PageInfo(),
173 pageSize: pageSize,
174 }
175 p.pageInfo.Token = pageToken
176 if pageSize <= 0 {
177 p.pageInfo.err = errors.New("iterator: page size must be positive")
178 }
179 return p
180}
181
182// NextPage retrieves a sequence of items from the iterator and appends them
183// to slicep, which must be a pointer to a slice of the iterator's item type.
184// Exactly p.pageSize items will be appended, unless fewer remain.
185//
186// The first return value is the page token to use for the next page of items.
187// If empty, there are no more pages. Aside from checking for the end of the
188// iteration, the returned page token is only needed if the iteration is to be
189// resumed a later time, in another context (possibly another process).
190//
191// The second return value is non-nil if an error occurred. It will never be
192// the special iterator sentinel value Done. To recognize the end of the
193// iteration, compare nextPageToken to the empty string.
194//
195// It is possible for NextPage to return a single zero-length page along with
196// an empty page token when there are no more items in the iteration.
197func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) {
198 p.pageInfo.nextPageCalled = true
199 if p.pageInfo.err != nil {
200 return "", p.pageInfo.err
201 }
202 if p.pageInfo.nextCalled {
203 p.pageInfo.err = errMixed
204 return "", p.pageInfo.err
205 }
206 if p.pageInfo.bufLen() > 0 {
207 return "", errors.New("must call NextPage with an empty buffer")
208 }
209 // The buffer must be empty here, so takeBuf is a no-op. We call it just to get
210 // the buffer's type.
211 wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type())
212 if slicep == nil {
213 return "", errors.New("nil passed to Pager.NextPage")
214 }
215 vslicep := reflect.ValueOf(slicep)
216 if vslicep.Type() != wantSliceType {
217 return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep)
218 }
219 for p.pageInfo.bufLen() < p.pageSize {
220 if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil {
221 p.pageInfo.err = err
222 return "", p.pageInfo.err
223 }
224 if p.pageInfo.Token == "" {
225 break
226 }
227 }
228 e := vslicep.Elem()
229 e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf())))
230 return p.pageInfo.Token, nil
231}
diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go
new file mode 100644
index 0000000..0636a82
--- /dev/null
+++ b/vendor/google.golang.org/api/option/credentials_go19.go
@@ -0,0 +1,33 @@
1// Copyright 2018 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// +build go1.9
16
17package option
18
19import (
20 "golang.org/x/oauth2/google"
21 "google.golang.org/api/internal"
22)
23
24type withCreds google.Credentials
25
26func (w *withCreds) Apply(o *internal.DialSettings) {
27 o.Credentials = (*google.Credentials)(w)
28}
29
30// WithCredentials returns a ClientOption that authenticates API calls.
31func WithCredentials(creds *google.Credentials) ClientOption {
32 return (*withCreds)(creds)
33}
diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go
new file mode 100644
index 0000000..74d3a4b
--- /dev/null
+++ b/vendor/google.golang.org/api/option/credentials_notgo19.go
@@ -0,0 +1,32 @@
1// Copyright 2018 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// +build !go1.9
16
17package option
18
19import (
20 "golang.org/x/oauth2/google"
21 "google.golang.org/api/internal"
22)
23
24type withCreds google.DefaultCredentials
25
26func (w *withCreds) Apply(o *internal.DialSettings) {
27 o.Credentials = (*google.DefaultCredentials)(w)
28}
29
30func WithCredentials(creds *google.DefaultCredentials) ClientOption {
31 return (*withCreds)(creds)
32}
diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go
new file mode 100644
index 0000000..e7ecfe3
--- /dev/null
+++ b/vendor/google.golang.org/api/option/option.go
@@ -0,0 +1,191 @@
1// Copyright 2017 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// Package option contains options for Google API clients.
16package option
17
18import (
19 "net/http"
20
21 "golang.org/x/oauth2"
22 "google.golang.org/api/internal"
23 "google.golang.org/grpc"
24)
25
26// A ClientOption is an option for a Google API client.
27type ClientOption interface {
28 Apply(*internal.DialSettings)
29}
30
31// WithTokenSource returns a ClientOption that specifies an OAuth2 token
32// source to be used as the basis for authentication.
33func WithTokenSource(s oauth2.TokenSource) ClientOption {
34 return withTokenSource{s}
35}
36
37type withTokenSource struct{ ts oauth2.TokenSource }
38
39func (w withTokenSource) Apply(o *internal.DialSettings) {
40 o.TokenSource = w.ts
41}
42
43type withCredFile string
44
45func (w withCredFile) Apply(o *internal.DialSettings) {
46 o.CredentialsFile = string(w)
47}
48
49// WithCredentialsFile returns a ClientOption that authenticates
50// API calls with the given service account or refresh token JSON
51// credentials file.
52func WithCredentialsFile(filename string) ClientOption {
53 return withCredFile(filename)
54}
55
56// WithServiceAccountFile returns a ClientOption that uses a Google service
57// account credentials file to authenticate.
58//
59// Deprecated: Use WithCredentialsFile instead.
60func WithServiceAccountFile(filename string) ClientOption {
61 return WithCredentialsFile(filename)
62}
63
64// WithCredentialsJSON returns a ClientOption that authenticates
65// API calls with the given service account or refresh token JSON
66// credentials.
67func WithCredentialsJSON(p []byte) ClientOption {
68 return withCredentialsJSON(p)
69}
70
71type withCredentialsJSON []byte
72
73func (w withCredentialsJSON) Apply(o *internal.DialSettings) {
74 o.CredentialsJSON = make([]byte, len(w))
75 copy(o.CredentialsJSON, w)
76}
77
78// WithEndpoint returns a ClientOption that overrides the default endpoint
79// to be used for a service.
80func WithEndpoint(url string) ClientOption {
81 return withEndpoint(url)
82}
83
84type withEndpoint string
85
86func (w withEndpoint) Apply(o *internal.DialSettings) {
87 o.Endpoint = string(w)
88}
89
90// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
91// to be used for a service.
92func WithScopes(scope ...string) ClientOption {
93 return withScopes(scope)
94}
95
96type withScopes []string
97
98func (w withScopes) Apply(o *internal.DialSettings) {
99 o.Scopes = make([]string, len(w))
100 copy(o.Scopes, w)
101}
102
103// WithUserAgent returns a ClientOption that sets the User-Agent.
104func WithUserAgent(ua string) ClientOption {
105 return withUA(ua)
106}
107
108type withUA string
109
110func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) }
111
112// WithHTTPClient returns a ClientOption that specifies the HTTP client to use
113// as the basis of communications. This option may only be used with services
114// that support HTTP as their communication transport. When used, the
115// WithHTTPClient option takes precedent over all other supplied options.
116func WithHTTPClient(client *http.Client) ClientOption {
117 return withHTTPClient{client}
118}
119
120type withHTTPClient struct{ client *http.Client }
121
122func (w withHTTPClient) Apply(o *internal.DialSettings) {
123 o.HTTPClient = w.client
124}
125
126// WithGRPCConn returns a ClientOption that specifies the gRPC client
127// connection to use as the basis of communications. This option many only be
128// used with services that support gRPC as their communication transport. When
129// used, the WithGRPCConn option takes precedent over all other supplied
130// options.
131func WithGRPCConn(conn *grpc.ClientConn) ClientOption {
132 return withGRPCConn{conn}
133}
134
135type withGRPCConn struct{ conn *grpc.ClientConn }
136
137func (w withGRPCConn) Apply(o *internal.DialSettings) {
138 o.GRPCConn = w.conn
139}
140
141// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption
142// to an underlying gRPC dial. It does not work with WithGRPCConn.
143func WithGRPCDialOption(opt grpc.DialOption) ClientOption {
144 return withGRPCDialOption{opt}
145}
146
147type withGRPCDialOption struct{ opt grpc.DialOption }
148
149func (w withGRPCDialOption) Apply(o *internal.DialSettings) {
150 o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt)
151}
152
153// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC
154// connections that requests will be balanced between.
155// This is an EXPERIMENTAL API and may be changed or removed in the future.
156func WithGRPCConnectionPool(size int) ClientOption {
157 return withGRPCConnectionPool(size)
158}
159
160type withGRPCConnectionPool int
161
162func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) {
163 balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o))
164 o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer))
165}
166
167// WithAPIKey returns a ClientOption that specifies an API key to be used
168// as the basis for authentication.
169//
170// API Keys can only be used for JSON-over-HTTP APIs, including those under
171// the import path google.golang.org/api/....
172func WithAPIKey(apiKey string) ClientOption {
173 return withAPIKey(apiKey)
174}
175
176type withAPIKey string
177
178func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) }
179
180// WithoutAuthentication returns a ClientOption that specifies that no
181// authentication should be used. It is suitable only for testing and for
182// accessing public resources, like public Google Cloud Storage buckets.
183// It is an error to provide both WithoutAuthentication and any of WithAPIKey,
184// WithTokenSource, WithCredentialsFile or WithServiceAccountFile.
185func WithoutAuthentication() ClientOption {
186 return withoutAuthentication{}
187}
188
189type withoutAuthentication struct{}
190
191func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true }
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
new file mode 100644
index 0000000..49d0a51
--- /dev/null
+++ b/vendor/google.golang.org/api/storage/v1/storage-api.json
@@ -0,0 +1,3818 @@
1{
2 "auth": {
3 "oauth2": {
4 "scopes": {
5 "https://www.googleapis.com/auth/cloud-platform": {
6 "description": "View and manage your data across Google Cloud Platform services"
7 },
8 "https://www.googleapis.com/auth/cloud-platform.read-only": {
9 "description": "View your data across Google Cloud Platform services"
10 },
11 "https://www.googleapis.com/auth/devstorage.full_control": {
12 "description": "Manage your data and permissions in Google Cloud Storage"
13 },
14 "https://www.googleapis.com/auth/devstorage.read_only": {
15 "description": "View your data in Google Cloud Storage"
16 },
17 "https://www.googleapis.com/auth/devstorage.read_write": {
18 "description": "Manage your data in Google Cloud Storage"
19 }
20 }
21 }
22 },
23 "basePath": "/storage/v1/",
24 "baseUrl": "https://www.googleapis.com/storage/v1/",
25 "batchPath": "batch/storage/v1",
26 "description": "Stores and retrieves potentially large, immutable data objects.",
27 "discoveryVersion": "v1",
28 "documentationLink": "https://developers.google.com/storage/docs/json_api/",
29 "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/KPalWULMnQfaqumeaBhBrVfHFNM\"",
30 "icons": {
31 "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
32 "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
33 },
34 "id": "storage:v1",
35 "kind": "discovery#restDescription",
36 "labels": [
37 "labs"
38 ],
39 "name": "storage",
40 "ownerDomain": "google.com",
41 "ownerName": "Google",
42 "parameters": {
43 "alt": {
44 "default": "json",
45 "description": "Data format for the response.",
46 "enum": [
47 "json"
48 ],
49 "enumDescriptions": [
50 "Responses with Content-Type of application/json"
51 ],
52 "location": "query",
53 "type": "string"
54 },
55 "fields": {
56 "description": "Selector specifying which fields to include in a partial response.",
57 "location": "query",
58 "type": "string"
59 },
60 "key": {
61 "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
62 "location": "query",
63 "type": "string"
64 },
65 "oauth_token": {
66 "description": "OAuth 2.0 token for the current user.",
67 "location": "query",
68 "type": "string"
69 },
70 "prettyPrint": {
71 "default": "true",
72 "description": "Returns response with indentations and line breaks.",
73 "location": "query",
74 "type": "boolean"
75 },
76 "quotaUser": {
77 "description": "An opaque string that represents a user for quota purposes. Must not exceed 40 characters.",
78 "location": "query",
79 "type": "string"
80 },
81 "userIp": {
82 "description": "Deprecated. Please use quotaUser instead.",
83 "location": "query",
84 "type": "string"
85 }
86 },
87 "protocol": "rest",
88 "resources": {
89 "bucketAccessControls": {
90 "methods": {
91 "delete": {
92 "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.",
93 "httpMethod": "DELETE",
94 "id": "storage.bucketAccessControls.delete",
95 "parameterOrder": [
96 "bucket",
97 "entity"
98 ],
99 "parameters": {
100 "bucket": {
101 "description": "Name of a bucket.",
102 "location": "path",
103 "required": true,
104 "type": "string"
105 },
106 "entity": {
107 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
108 "location": "path",
109 "required": true,
110 "type": "string"
111 },
112 "userProject": {
113 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
114 "location": "query",
115 "type": "string"
116 }
117 },
118 "path": "b/{bucket}/acl/{entity}",
119 "scopes": [
120 "https://www.googleapis.com/auth/cloud-platform",
121 "https://www.googleapis.com/auth/devstorage.full_control"
122 ]
123 },
124 "get": {
125 "description": "Returns the ACL entry for the specified entity on the specified bucket.",
126 "httpMethod": "GET",
127 "id": "storage.bucketAccessControls.get",
128 "parameterOrder": [
129 "bucket",
130 "entity"
131 ],
132 "parameters": {
133 "bucket": {
134 "description": "Name of a bucket.",
135 "location": "path",
136 "required": true,
137 "type": "string"
138 },
139 "entity": {
140 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
141 "location": "path",
142 "required": true,
143 "type": "string"
144 },
145 "userProject": {
146 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
147 "location": "query",
148 "type": "string"
149 }
150 },
151 "path": "b/{bucket}/acl/{entity}",
152 "response": {
153 "$ref": "BucketAccessControl"
154 },
155 "scopes": [
156 "https://www.googleapis.com/auth/cloud-platform",
157 "https://www.googleapis.com/auth/devstorage.full_control"
158 ]
159 },
160 "insert": {
161 "description": "Creates a new ACL entry on the specified bucket.",
162 "httpMethod": "POST",
163 "id": "storage.bucketAccessControls.insert",
164 "parameterOrder": [
165 "bucket"
166 ],
167 "parameters": {
168 "bucket": {
169 "description": "Name of a bucket.",
170 "location": "path",
171 "required": true,
172 "type": "string"
173 },
174 "userProject": {
175 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
176 "location": "query",
177 "type": "string"
178 }
179 },
180 "path": "b/{bucket}/acl",
181 "request": {
182 "$ref": "BucketAccessControl"
183 },
184 "response": {
185 "$ref": "BucketAccessControl"
186 },
187 "scopes": [
188 "https://www.googleapis.com/auth/cloud-platform",
189 "https://www.googleapis.com/auth/devstorage.full_control"
190 ]
191 },
192 "list": {
193 "description": "Retrieves ACL entries on the specified bucket.",
194 "httpMethod": "GET",
195 "id": "storage.bucketAccessControls.list",
196 "parameterOrder": [
197 "bucket"
198 ],
199 "parameters": {
200 "bucket": {
201 "description": "Name of a bucket.",
202 "location": "path",
203 "required": true,
204 "type": "string"
205 },
206 "userProject": {
207 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
208 "location": "query",
209 "type": "string"
210 }
211 },
212 "path": "b/{bucket}/acl",
213 "response": {
214 "$ref": "BucketAccessControls"
215 },
216 "scopes": [
217 "https://www.googleapis.com/auth/cloud-platform",
218 "https://www.googleapis.com/auth/devstorage.full_control"
219 ]
220 },
221 "patch": {
222 "description": "Patches an ACL entry on the specified bucket.",
223 "httpMethod": "PATCH",
224 "id": "storage.bucketAccessControls.patch",
225 "parameterOrder": [
226 "bucket",
227 "entity"
228 ],
229 "parameters": {
230 "bucket": {
231 "description": "Name of a bucket.",
232 "location": "path",
233 "required": true,
234 "type": "string"
235 },
236 "entity": {
237 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
238 "location": "path",
239 "required": true,
240 "type": "string"
241 },
242 "userProject": {
243 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
244 "location": "query",
245 "type": "string"
246 }
247 },
248 "path": "b/{bucket}/acl/{entity}",
249 "request": {
250 "$ref": "BucketAccessControl"
251 },
252 "response": {
253 "$ref": "BucketAccessControl"
254 },
255 "scopes": [
256 "https://www.googleapis.com/auth/cloud-platform",
257 "https://www.googleapis.com/auth/devstorage.full_control"
258 ]
259 },
260 "update": {
261 "description": "Updates an ACL entry on the specified bucket.",
262 "httpMethod": "PUT",
263 "id": "storage.bucketAccessControls.update",
264 "parameterOrder": [
265 "bucket",
266 "entity"
267 ],
268 "parameters": {
269 "bucket": {
270 "description": "Name of a bucket.",
271 "location": "path",
272 "required": true,
273 "type": "string"
274 },
275 "entity": {
276 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
277 "location": "path",
278 "required": true,
279 "type": "string"
280 },
281 "userProject": {
282 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
283 "location": "query",
284 "type": "string"
285 }
286 },
287 "path": "b/{bucket}/acl/{entity}",
288 "request": {
289 "$ref": "BucketAccessControl"
290 },
291 "response": {
292 "$ref": "BucketAccessControl"
293 },
294 "scopes": [
295 "https://www.googleapis.com/auth/cloud-platform",
296 "https://www.googleapis.com/auth/devstorage.full_control"
297 ]
298 }
299 }
300 },
301 "buckets": {
302 "methods": {
303 "delete": {
304 "description": "Permanently deletes an empty bucket.",
305 "httpMethod": "DELETE",
306 "id": "storage.buckets.delete",
307 "parameterOrder": [
308 "bucket"
309 ],
310 "parameters": {
311 "bucket": {
312 "description": "Name of a bucket.",
313 "location": "path",
314 "required": true,
315 "type": "string"
316 },
317 "ifMetagenerationMatch": {
318 "description": "If set, only deletes the bucket if its metageneration matches this value.",
319 "format": "int64",
320 "location": "query",
321 "type": "string"
322 },
323 "ifMetagenerationNotMatch": {
324 "description": "If set, only deletes the bucket if its metageneration does not match this value.",
325 "format": "int64",
326 "location": "query",
327 "type": "string"
328 },
329 "userProject": {
330 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
331 "location": "query",
332 "type": "string"
333 }
334 },
335 "path": "b/{bucket}",
336 "scopes": [
337 "https://www.googleapis.com/auth/cloud-platform",
338 "https://www.googleapis.com/auth/devstorage.full_control",
339 "https://www.googleapis.com/auth/devstorage.read_write"
340 ]
341 },
342 "get": {
343 "description": "Returns metadata for the specified bucket.",
344 "httpMethod": "GET",
345 "id": "storage.buckets.get",
346 "parameterOrder": [
347 "bucket"
348 ],
349 "parameters": {
350 "bucket": {
351 "description": "Name of a bucket.",
352 "location": "path",
353 "required": true,
354 "type": "string"
355 },
356 "ifMetagenerationMatch": {
357 "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
358 "format": "int64",
359 "location": "query",
360 "type": "string"
361 },
362 "ifMetagenerationNotMatch": {
363 "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
364 "format": "int64",
365 "location": "query",
366 "type": "string"
367 },
368 "projection": {
369 "description": "Set of properties to return. Defaults to noAcl.",
370 "enum": [
371 "full",
372 "noAcl"
373 ],
374 "enumDescriptions": [
375 "Include all properties.",
376 "Omit owner, acl and defaultObjectAcl properties."
377 ],
378 "location": "query",
379 "type": "string"
380 },
381 "userProject": {
382 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
383 "location": "query",
384 "type": "string"
385 }
386 },
387 "path": "b/{bucket}",
388 "response": {
389 "$ref": "Bucket"
390 },
391 "scopes": [
392 "https://www.googleapis.com/auth/cloud-platform",
393 "https://www.googleapis.com/auth/cloud-platform.read-only",
394 "https://www.googleapis.com/auth/devstorage.full_control",
395 "https://www.googleapis.com/auth/devstorage.read_only",
396 "https://www.googleapis.com/auth/devstorage.read_write"
397 ]
398 },
399 "getIamPolicy": {
400 "description": "Returns an IAM policy for the specified bucket.",
401 "httpMethod": "GET",
402 "id": "storage.buckets.getIamPolicy",
403 "parameterOrder": [
404 "bucket"
405 ],
406 "parameters": {
407 "bucket": {
408 "description": "Name of a bucket.",
409 "location": "path",
410 "required": true,
411 "type": "string"
412 },
413 "userProject": {
414 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
415 "location": "query",
416 "type": "string"
417 }
418 },
419 "path": "b/{bucket}/iam",
420 "response": {
421 "$ref": "Policy"
422 },
423 "scopes": [
424 "https://www.googleapis.com/auth/cloud-platform",
425 "https://www.googleapis.com/auth/cloud-platform.read-only",
426 "https://www.googleapis.com/auth/devstorage.full_control",
427 "https://www.googleapis.com/auth/devstorage.read_only",
428 "https://www.googleapis.com/auth/devstorage.read_write"
429 ]
430 },
431 "insert": {
432 "description": "Creates a new bucket.",
433 "httpMethod": "POST",
434 "id": "storage.buckets.insert",
435 "parameterOrder": [
436 "project"
437 ],
438 "parameters": {
439 "predefinedAcl": {
440 "description": "Apply a predefined set of access controls to this bucket.",
441 "enum": [
442 "authenticatedRead",
443 "private",
444 "projectPrivate",
445 "publicRead",
446 "publicReadWrite"
447 ],
448 "enumDescriptions": [
449 "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
450 "Project team owners get OWNER access.",
451 "Project team members get access according to their roles.",
452 "Project team owners get OWNER access, and allUsers get READER access.",
453 "Project team owners get OWNER access, and allUsers get WRITER access."
454 ],
455 "location": "query",
456 "type": "string"
457 },
458 "predefinedDefaultObjectAcl": {
459 "description": "Apply a predefined set of default object access controls to this bucket.",
460 "enum": [
461 "authenticatedRead",
462 "bucketOwnerFullControl",
463 "bucketOwnerRead",
464 "private",
465 "projectPrivate",
466 "publicRead"
467 ],
468 "enumDescriptions": [
469 "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
470 "Object owner gets OWNER access, and project team owners get OWNER access.",
471 "Object owner gets OWNER access, and project team owners get READER access.",
472 "Object owner gets OWNER access.",
473 "Object owner gets OWNER access, and project team members get access according to their roles.",
474 "Object owner gets OWNER access, and allUsers get READER access."
475 ],
476 "location": "query",
477 "type": "string"
478 },
479 "project": {
480 "description": "A valid API project identifier.",
481 "location": "query",
482 "required": true,
483 "type": "string"
484 },
485 "projection": {
486 "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.",
487 "enum": [
488 "full",
489 "noAcl"
490 ],
491 "enumDescriptions": [
492 "Include all properties.",
493 "Omit owner, acl and defaultObjectAcl properties."
494 ],
495 "location": "query",
496 "type": "string"
497 },
498 "userProject": {
499 "description": "The project to be billed for this request.",
500 "location": "query",
501 "type": "string"
502 }
503 },
504 "path": "b",
505 "request": {
506 "$ref": "Bucket"
507 },
508 "response": {
509 "$ref": "Bucket"
510 },
511 "scopes": [
512 "https://www.googleapis.com/auth/cloud-platform",
513 "https://www.googleapis.com/auth/devstorage.full_control",
514 "https://www.googleapis.com/auth/devstorage.read_write"
515 ]
516 },
517 "list": {
518 "description": "Retrieves a list of buckets for a given project.",
519 "httpMethod": "GET",
520 "id": "storage.buckets.list",
521 "parameterOrder": [
522 "project"
523 ],
524 "parameters": {
525 "maxResults": {
526 "default": "1000",
527 "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.",
528 "format": "uint32",
529 "location": "query",
530 "minimum": "0",
531 "type": "integer"
532 },
533 "pageToken": {
534 "description": "A previously-returned page token representing part of the larger set of results to view.",
535 "location": "query",
536 "type": "string"
537 },
538 "prefix": {
539 "description": "Filter results to buckets whose names begin with this prefix.",
540 "location": "query",
541 "type": "string"
542 },
543 "project": {
544 "description": "A valid API project identifier.",
545 "location": "query",
546 "required": true,
547 "type": "string"
548 },
549 "projection": {
550 "description": "Set of properties to return. Defaults to noAcl.",
551 "enum": [
552 "full",
553 "noAcl"
554 ],
555 "enumDescriptions": [
556 "Include all properties.",
557 "Omit owner, acl and defaultObjectAcl properties."
558 ],
559 "location": "query",
560 "type": "string"
561 },
562 "userProject": {
563 "description": "The project to be billed for this request.",
564 "location": "query",
565 "type": "string"
566 }
567 },
568 "path": "b",
569 "response": {
570 "$ref": "Buckets"
571 },
572 "scopes": [
573 "https://www.googleapis.com/auth/cloud-platform",
574 "https://www.googleapis.com/auth/cloud-platform.read-only",
575 "https://www.googleapis.com/auth/devstorage.full_control",
576 "https://www.googleapis.com/auth/devstorage.read_only",
577 "https://www.googleapis.com/auth/devstorage.read_write"
578 ]
579 },
580 "lockRetentionPolicy": {
581 "description": "Locks retention policy on a bucket.",
582 "httpMethod": "POST",
583 "id": "storage.buckets.lockRetentionPolicy",
584 "parameterOrder": [
585 "bucket",
586 "ifMetagenerationMatch"
587 ],
588 "parameters": {
589 "bucket": {
590 "description": "Name of a bucket.",
591 "location": "path",
592 "required": true,
593 "type": "string"
594 },
595 "ifMetagenerationMatch": {
596 "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.",
597 "format": "int64",
598 "location": "query",
599 "required": true,
600 "type": "string"
601 },
602 "userProject": {
603 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
604 "location": "query",
605 "type": "string"
606 }
607 },
608 "path": "b/{bucket}/lockRetentionPolicy",
609 "response": {
610 "$ref": "Bucket"
611 },
612 "scopes": [
613 "https://www.googleapis.com/auth/cloud-platform",
614 "https://www.googleapis.com/auth/devstorage.full_control",
615 "https://www.googleapis.com/auth/devstorage.read_write"
616 ]
617 },
618 "patch": {
619 "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.",
620 "httpMethod": "PATCH",
621 "id": "storage.buckets.patch",
622 "parameterOrder": [
623 "bucket"
624 ],
625 "parameters": {
626 "bucket": {
627 "description": "Name of a bucket.",
628 "location": "path",
629 "required": true,
630 "type": "string"
631 },
632 "ifMetagenerationMatch": {
633 "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
634 "format": "int64",
635 "location": "query",
636 "type": "string"
637 },
638 "ifMetagenerationNotMatch": {
639 "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
640 "format": "int64",
641 "location": "query",
642 "type": "string"
643 },
644 "predefinedAcl": {
645 "description": "Apply a predefined set of access controls to this bucket.",
646 "enum": [
647 "authenticatedRead",
648 "private",
649 "projectPrivate",
650 "publicRead",
651 "publicReadWrite"
652 ],
653 "enumDescriptions": [
654 "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
655 "Project team owners get OWNER access.",
656 "Project team members get access according to their roles.",
657 "Project team owners get OWNER access, and allUsers get READER access.",
658 "Project team owners get OWNER access, and allUsers get WRITER access."
659 ],
660 "location": "query",
661 "type": "string"
662 },
663 "predefinedDefaultObjectAcl": {
664 "description": "Apply a predefined set of default object access controls to this bucket.",
665 "enum": [
666 "authenticatedRead",
667 "bucketOwnerFullControl",
668 "bucketOwnerRead",
669 "private",
670 "projectPrivate",
671 "publicRead"
672 ],
673 "enumDescriptions": [
674 "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
675 "Object owner gets OWNER access, and project team owners get OWNER access.",
676 "Object owner gets OWNER access, and project team owners get READER access.",
677 "Object owner gets OWNER access.",
678 "Object owner gets OWNER access, and project team members get access according to their roles.",
679 "Object owner gets OWNER access, and allUsers get READER access."
680 ],
681 "location": "query",
682 "type": "string"
683 },
684 "projection": {
685 "description": "Set of properties to return. Defaults to full.",
686 "enum": [
687 "full",
688 "noAcl"
689 ],
690 "enumDescriptions": [
691 "Include all properties.",
692 "Omit owner, acl and defaultObjectAcl properties."
693 ],
694 "location": "query",
695 "type": "string"
696 },
697 "userProject": {
698 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
699 "location": "query",
700 "type": "string"
701 }
702 },
703 "path": "b/{bucket}",
704 "request": {
705 "$ref": "Bucket"
706 },
707 "response": {
708 "$ref": "Bucket"
709 },
710 "scopes": [
711 "https://www.googleapis.com/auth/cloud-platform",
712 "https://www.googleapis.com/auth/devstorage.full_control"
713 ]
714 },
715 "setIamPolicy": {
716 "description": "Updates an IAM policy for the specified bucket.",
717 "httpMethod": "PUT",
718 "id": "storage.buckets.setIamPolicy",
719 "parameterOrder": [
720 "bucket"
721 ],
722 "parameters": {
723 "bucket": {
724 "description": "Name of a bucket.",
725 "location": "path",
726 "required": true,
727 "type": "string"
728 },
729 "userProject": {
730 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
731 "location": "query",
732 "type": "string"
733 }
734 },
735 "path": "b/{bucket}/iam",
736 "request": {
737 "$ref": "Policy"
738 },
739 "response": {
740 "$ref": "Policy"
741 },
742 "scopes": [
743 "https://www.googleapis.com/auth/cloud-platform",
744 "https://www.googleapis.com/auth/devstorage.full_control",
745 "https://www.googleapis.com/auth/devstorage.read_write"
746 ]
747 },
748 "testIamPermissions": {
749 "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.",
750 "httpMethod": "GET",
751 "id": "storage.buckets.testIamPermissions",
752 "parameterOrder": [
753 "bucket",
754 "permissions"
755 ],
756 "parameters": {
757 "bucket": {
758 "description": "Name of a bucket.",
759 "location": "path",
760 "required": true,
761 "type": "string"
762 },
763 "permissions": {
764 "description": "Permissions to test.",
765 "location": "query",
766 "repeated": true,
767 "required": true,
768 "type": "string"
769 },
770 "userProject": {
771 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
772 "location": "query",
773 "type": "string"
774 }
775 },
776 "path": "b/{bucket}/iam/testPermissions",
777 "response": {
778 "$ref": "TestIamPermissionsResponse"
779 },
780 "scopes": [
781 "https://www.googleapis.com/auth/cloud-platform",
782 "https://www.googleapis.com/auth/cloud-platform.read-only",
783 "https://www.googleapis.com/auth/devstorage.full_control",
784 "https://www.googleapis.com/auth/devstorage.read_only",
785 "https://www.googleapis.com/auth/devstorage.read_write"
786 ]
787 },
788 "update": {
789 "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.",
790 "httpMethod": "PUT",
791 "id": "storage.buckets.update",
792 "parameterOrder": [
793 "bucket"
794 ],
795 "parameters": {
796 "bucket": {
797 "description": "Name of a bucket.",
798 "location": "path",
799 "required": true,
800 "type": "string"
801 },
802 "ifMetagenerationMatch": {
803 "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
804 "format": "int64",
805 "location": "query",
806 "type": "string"
807 },
808 "ifMetagenerationNotMatch": {
809 "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
810 "format": "int64",
811 "location": "query",
812 "type": "string"
813 },
814 "predefinedAcl": {
815 "description": "Apply a predefined set of access controls to this bucket.",
816 "enum": [
817 "authenticatedRead",
818 "private",
819 "projectPrivate",
820 "publicRead",
821 "publicReadWrite"
822 ],
823 "enumDescriptions": [
824 "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
825 "Project team owners get OWNER access.",
826 "Project team members get access according to their roles.",
827 "Project team owners get OWNER access, and allUsers get READER access.",
828 "Project team owners get OWNER access, and allUsers get WRITER access."
829 ],
830 "location": "query",
831 "type": "string"
832 },
833 "predefinedDefaultObjectAcl": {
834 "description": "Apply a predefined set of default object access controls to this bucket.",
835 "enum": [
836 "authenticatedRead",
837 "bucketOwnerFullControl",
838 "bucketOwnerRead",
839 "private",
840 "projectPrivate",
841 "publicRead"
842 ],
843 "enumDescriptions": [
844 "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
845 "Object owner gets OWNER access, and project team owners get OWNER access.",
846 "Object owner gets OWNER access, and project team owners get READER access.",
847 "Object owner gets OWNER access.",
848 "Object owner gets OWNER access, and project team members get access according to their roles.",
849 "Object owner gets OWNER access, and allUsers get READER access."
850 ],
851 "location": "query",
852 "type": "string"
853 },
854 "projection": {
855 "description": "Set of properties to return. Defaults to full.",
856 "enum": [
857 "full",
858 "noAcl"
859 ],
860 "enumDescriptions": [
861 "Include all properties.",
862 "Omit owner, acl and defaultObjectAcl properties."
863 ],
864 "location": "query",
865 "type": "string"
866 },
867 "userProject": {
868 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
869 "location": "query",
870 "type": "string"
871 }
872 },
873 "path": "b/{bucket}",
874 "request": {
875 "$ref": "Bucket"
876 },
877 "response": {
878 "$ref": "Bucket"
879 },
880 "scopes": [
881 "https://www.googleapis.com/auth/cloud-platform",
882 "https://www.googleapis.com/auth/devstorage.full_control"
883 ]
884 }
885 }
886 },
887 "channels": {
888 "methods": {
889 "stop": {
890 "description": "Stop watching resources through this channel",
891 "httpMethod": "POST",
892 "id": "storage.channels.stop",
893 "path": "channels/stop",
894 "request": {
895 "$ref": "Channel",
896 "parameterName": "resource"
897 },
898 "scopes": [
899 "https://www.googleapis.com/auth/cloud-platform",
900 "https://www.googleapis.com/auth/cloud-platform.read-only",
901 "https://www.googleapis.com/auth/devstorage.full_control",
902 "https://www.googleapis.com/auth/devstorage.read_only",
903 "https://www.googleapis.com/auth/devstorage.read_write"
904 ]
905 }
906 }
907 },
908 "defaultObjectAccessControls": {
909 "methods": {
910 "delete": {
911 "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.",
912 "httpMethod": "DELETE",
913 "id": "storage.defaultObjectAccessControls.delete",
914 "parameterOrder": [
915 "bucket",
916 "entity"
917 ],
918 "parameters": {
919 "bucket": {
920 "description": "Name of a bucket.",
921 "location": "path",
922 "required": true,
923 "type": "string"
924 },
925 "entity": {
926 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
927 "location": "path",
928 "required": true,
929 "type": "string"
930 },
931 "userProject": {
932 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
933 "location": "query",
934 "type": "string"
935 }
936 },
937 "path": "b/{bucket}/defaultObjectAcl/{entity}",
938 "scopes": [
939 "https://www.googleapis.com/auth/cloud-platform",
940 "https://www.googleapis.com/auth/devstorage.full_control"
941 ]
942 },
943 "get": {
944 "description": "Returns the default object ACL entry for the specified entity on the specified bucket.",
945 "httpMethod": "GET",
946 "id": "storage.defaultObjectAccessControls.get",
947 "parameterOrder": [
948 "bucket",
949 "entity"
950 ],
951 "parameters": {
952 "bucket": {
953 "description": "Name of a bucket.",
954 "location": "path",
955 "required": true,
956 "type": "string"
957 },
958 "entity": {
959 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
960 "location": "path",
961 "required": true,
962 "type": "string"
963 },
964 "userProject": {
965 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
966 "location": "query",
967 "type": "string"
968 }
969 },
970 "path": "b/{bucket}/defaultObjectAcl/{entity}",
971 "response": {
972 "$ref": "ObjectAccessControl"
973 },
974 "scopes": [
975 "https://www.googleapis.com/auth/cloud-platform",
976 "https://www.googleapis.com/auth/devstorage.full_control"
977 ]
978 },
979 "insert": {
980 "description": "Creates a new default object ACL entry on the specified bucket.",
981 "httpMethod": "POST",
982 "id": "storage.defaultObjectAccessControls.insert",
983 "parameterOrder": [
984 "bucket"
985 ],
986 "parameters": {
987 "bucket": {
988 "description": "Name of a bucket.",
989 "location": "path",
990 "required": true,
991 "type": "string"
992 },
993 "userProject": {
994 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
995 "location": "query",
996 "type": "string"
997 }
998 },
999 "path": "b/{bucket}/defaultObjectAcl",
1000 "request": {
1001 "$ref": "ObjectAccessControl"
1002 },
1003 "response": {
1004 "$ref": "ObjectAccessControl"
1005 },
1006 "scopes": [
1007 "https://www.googleapis.com/auth/cloud-platform",
1008 "https://www.googleapis.com/auth/devstorage.full_control"
1009 ]
1010 },
1011 "list": {
1012 "description": "Retrieves default object ACL entries on the specified bucket.",
1013 "httpMethod": "GET",
1014 "id": "storage.defaultObjectAccessControls.list",
1015 "parameterOrder": [
1016 "bucket"
1017 ],
1018 "parameters": {
1019 "bucket": {
1020 "description": "Name of a bucket.",
1021 "location": "path",
1022 "required": true,
1023 "type": "string"
1024 },
1025 "ifMetagenerationMatch": {
1026 "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.",
1027 "format": "int64",
1028 "location": "query",
1029 "type": "string"
1030 },
1031 "ifMetagenerationNotMatch": {
1032 "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.",
1033 "format": "int64",
1034 "location": "query",
1035 "type": "string"
1036 },
1037 "userProject": {
1038 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1039 "location": "query",
1040 "type": "string"
1041 }
1042 },
1043 "path": "b/{bucket}/defaultObjectAcl",
1044 "response": {
1045 "$ref": "ObjectAccessControls"
1046 },
1047 "scopes": [
1048 "https://www.googleapis.com/auth/cloud-platform",
1049 "https://www.googleapis.com/auth/devstorage.full_control"
1050 ]
1051 },
1052 "patch": {
1053 "description": "Patches a default object ACL entry on the specified bucket.",
1054 "httpMethod": "PATCH",
1055 "id": "storage.defaultObjectAccessControls.patch",
1056 "parameterOrder": [
1057 "bucket",
1058 "entity"
1059 ],
1060 "parameters": {
1061 "bucket": {
1062 "description": "Name of a bucket.",
1063 "location": "path",
1064 "required": true,
1065 "type": "string"
1066 },
1067 "entity": {
1068 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
1069 "location": "path",
1070 "required": true,
1071 "type": "string"
1072 },
1073 "userProject": {
1074 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1075 "location": "query",
1076 "type": "string"
1077 }
1078 },
1079 "path": "b/{bucket}/defaultObjectAcl/{entity}",
1080 "request": {
1081 "$ref": "ObjectAccessControl"
1082 },
1083 "response": {
1084 "$ref": "ObjectAccessControl"
1085 },
1086 "scopes": [
1087 "https://www.googleapis.com/auth/cloud-platform",
1088 "https://www.googleapis.com/auth/devstorage.full_control"
1089 ]
1090 },
1091 "update": {
1092 "description": "Updates a default object ACL entry on the specified bucket.",
1093 "httpMethod": "PUT",
1094 "id": "storage.defaultObjectAccessControls.update",
1095 "parameterOrder": [
1096 "bucket",
1097 "entity"
1098 ],
1099 "parameters": {
1100 "bucket": {
1101 "description": "Name of a bucket.",
1102 "location": "path",
1103 "required": true,
1104 "type": "string"
1105 },
1106 "entity": {
1107 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
1108 "location": "path",
1109 "required": true,
1110 "type": "string"
1111 },
1112 "userProject": {
1113 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1114 "location": "query",
1115 "type": "string"
1116 }
1117 },
1118 "path": "b/{bucket}/defaultObjectAcl/{entity}",
1119 "request": {
1120 "$ref": "ObjectAccessControl"
1121 },
1122 "response": {
1123 "$ref": "ObjectAccessControl"
1124 },
1125 "scopes": [
1126 "https://www.googleapis.com/auth/cloud-platform",
1127 "https://www.googleapis.com/auth/devstorage.full_control"
1128 ]
1129 }
1130 }
1131 },
1132 "notifications": {
1133 "methods": {
1134 "delete": {
1135 "description": "Permanently deletes a notification subscription.",
1136 "httpMethod": "DELETE",
1137 "id": "storage.notifications.delete",
1138 "parameterOrder": [
1139 "bucket",
1140 "notification"
1141 ],
1142 "parameters": {
1143 "bucket": {
1144 "description": "The parent bucket of the notification.",
1145 "location": "path",
1146 "required": true,
1147 "type": "string"
1148 },
1149 "notification": {
1150 "description": "ID of the notification to delete.",
1151 "location": "path",
1152 "required": true,
1153 "type": "string"
1154 },
1155 "userProject": {
1156 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1157 "location": "query",
1158 "type": "string"
1159 }
1160 },
1161 "path": "b/{bucket}/notificationConfigs/{notification}",
1162 "scopes": [
1163 "https://www.googleapis.com/auth/cloud-platform",
1164 "https://www.googleapis.com/auth/devstorage.full_control",
1165 "https://www.googleapis.com/auth/devstorage.read_write"
1166 ]
1167 },
1168 "get": {
1169 "description": "View a notification configuration.",
1170 "httpMethod": "GET",
1171 "id": "storage.notifications.get",
1172 "parameterOrder": [
1173 "bucket",
1174 "notification"
1175 ],
1176 "parameters": {
1177 "bucket": {
1178 "description": "The parent bucket of the notification.",
1179 "location": "path",
1180 "required": true,
1181 "type": "string"
1182 },
1183 "notification": {
1184 "description": "Notification ID",
1185 "location": "path",
1186 "required": true,
1187 "type": "string"
1188 },
1189 "userProject": {
1190 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1191 "location": "query",
1192 "type": "string"
1193 }
1194 },
1195 "path": "b/{bucket}/notificationConfigs/{notification}",
1196 "response": {
1197 "$ref": "Notification"
1198 },
1199 "scopes": [
1200 "https://www.googleapis.com/auth/cloud-platform",
1201 "https://www.googleapis.com/auth/cloud-platform.read-only",
1202 "https://www.googleapis.com/auth/devstorage.full_control",
1203 "https://www.googleapis.com/auth/devstorage.read_only",
1204 "https://www.googleapis.com/auth/devstorage.read_write"
1205 ]
1206 },
1207 "insert": {
1208 "description": "Creates a notification subscription for a given bucket.",
1209 "httpMethod": "POST",
1210 "id": "storage.notifications.insert",
1211 "parameterOrder": [
1212 "bucket"
1213 ],
1214 "parameters": {
1215 "bucket": {
1216 "description": "The parent bucket of the notification.",
1217 "location": "path",
1218 "required": true,
1219 "type": "string"
1220 },
1221 "userProject": {
1222 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1223 "location": "query",
1224 "type": "string"
1225 }
1226 },
1227 "path": "b/{bucket}/notificationConfigs",
1228 "request": {
1229 "$ref": "Notification"
1230 },
1231 "response": {
1232 "$ref": "Notification"
1233 },
1234 "scopes": [
1235 "https://www.googleapis.com/auth/cloud-platform",
1236 "https://www.googleapis.com/auth/devstorage.full_control",
1237 "https://www.googleapis.com/auth/devstorage.read_write"
1238 ]
1239 },
1240 "list": {
1241 "description": "Retrieves a list of notification subscriptions for a given bucket.",
1242 "httpMethod": "GET",
1243 "id": "storage.notifications.list",
1244 "parameterOrder": [
1245 "bucket"
1246 ],
1247 "parameters": {
1248 "bucket": {
1249 "description": "Name of a Google Cloud Storage bucket.",
1250 "location": "path",
1251 "required": true,
1252 "type": "string"
1253 },
1254 "userProject": {
1255 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1256 "location": "query",
1257 "type": "string"
1258 }
1259 },
1260 "path": "b/{bucket}/notificationConfigs",
1261 "response": {
1262 "$ref": "Notifications"
1263 },
1264 "scopes": [
1265 "https://www.googleapis.com/auth/cloud-platform",
1266 "https://www.googleapis.com/auth/cloud-platform.read-only",
1267 "https://www.googleapis.com/auth/devstorage.full_control",
1268 "https://www.googleapis.com/auth/devstorage.read_only",
1269 "https://www.googleapis.com/auth/devstorage.read_write"
1270 ]
1271 }
1272 }
1273 },
1274 "objectAccessControls": {
1275 "methods": {
1276 "delete": {
1277 "description": "Permanently deletes the ACL entry for the specified entity on the specified object.",
1278 "httpMethod": "DELETE",
1279 "id": "storage.objectAccessControls.delete",
1280 "parameterOrder": [
1281 "bucket",
1282 "object",
1283 "entity"
1284 ],
1285 "parameters": {
1286 "bucket": {
1287 "description": "Name of a bucket.",
1288 "location": "path",
1289 "required": true,
1290 "type": "string"
1291 },
1292 "entity": {
1293 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
1294 "location": "path",
1295 "required": true,
1296 "type": "string"
1297 },
1298 "generation": {
1299 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
1300 "format": "int64",
1301 "location": "query",
1302 "type": "string"
1303 },
1304 "object": {
1305 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1306 "location": "path",
1307 "required": true,
1308 "type": "string"
1309 },
1310 "userProject": {
1311 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1312 "location": "query",
1313 "type": "string"
1314 }
1315 },
1316 "path": "b/{bucket}/o/{object}/acl/{entity}",
1317 "scopes": [
1318 "https://www.googleapis.com/auth/cloud-platform",
1319 "https://www.googleapis.com/auth/devstorage.full_control"
1320 ]
1321 },
1322 "get": {
1323 "description": "Returns the ACL entry for the specified entity on the specified object.",
1324 "httpMethod": "GET",
1325 "id": "storage.objectAccessControls.get",
1326 "parameterOrder": [
1327 "bucket",
1328 "object",
1329 "entity"
1330 ],
1331 "parameters": {
1332 "bucket": {
1333 "description": "Name of a bucket.",
1334 "location": "path",
1335 "required": true,
1336 "type": "string"
1337 },
1338 "entity": {
1339 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
1340 "location": "path",
1341 "required": true,
1342 "type": "string"
1343 },
1344 "generation": {
1345 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
1346 "format": "int64",
1347 "location": "query",
1348 "type": "string"
1349 },
1350 "object": {
1351 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1352 "location": "path",
1353 "required": true,
1354 "type": "string"
1355 },
1356 "userProject": {
1357 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1358 "location": "query",
1359 "type": "string"
1360 }
1361 },
1362 "path": "b/{bucket}/o/{object}/acl/{entity}",
1363 "response": {
1364 "$ref": "ObjectAccessControl"
1365 },
1366 "scopes": [
1367 "https://www.googleapis.com/auth/cloud-platform",
1368 "https://www.googleapis.com/auth/devstorage.full_control"
1369 ]
1370 },
1371 "insert": {
1372 "description": "Creates a new ACL entry on the specified object.",
1373 "httpMethod": "POST",
1374 "id": "storage.objectAccessControls.insert",
1375 "parameterOrder": [
1376 "bucket",
1377 "object"
1378 ],
1379 "parameters": {
1380 "bucket": {
1381 "description": "Name of a bucket.",
1382 "location": "path",
1383 "required": true,
1384 "type": "string"
1385 },
1386 "generation": {
1387 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
1388 "format": "int64",
1389 "location": "query",
1390 "type": "string"
1391 },
1392 "object": {
1393 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1394 "location": "path",
1395 "required": true,
1396 "type": "string"
1397 },
1398 "userProject": {
1399 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1400 "location": "query",
1401 "type": "string"
1402 }
1403 },
1404 "path": "b/{bucket}/o/{object}/acl",
1405 "request": {
1406 "$ref": "ObjectAccessControl"
1407 },
1408 "response": {
1409 "$ref": "ObjectAccessControl"
1410 },
1411 "scopes": [
1412 "https://www.googleapis.com/auth/cloud-platform",
1413 "https://www.googleapis.com/auth/devstorage.full_control"
1414 ]
1415 },
1416 "list": {
1417 "description": "Retrieves ACL entries on the specified object.",
1418 "httpMethod": "GET",
1419 "id": "storage.objectAccessControls.list",
1420 "parameterOrder": [
1421 "bucket",
1422 "object"
1423 ],
1424 "parameters": {
1425 "bucket": {
1426 "description": "Name of a bucket.",
1427 "location": "path",
1428 "required": true,
1429 "type": "string"
1430 },
1431 "generation": {
1432 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
1433 "format": "int64",
1434 "location": "query",
1435 "type": "string"
1436 },
1437 "object": {
1438 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1439 "location": "path",
1440 "required": true,
1441 "type": "string"
1442 },
1443 "userProject": {
1444 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1445 "location": "query",
1446 "type": "string"
1447 }
1448 },
1449 "path": "b/{bucket}/o/{object}/acl",
1450 "response": {
1451 "$ref": "ObjectAccessControls"
1452 },
1453 "scopes": [
1454 "https://www.googleapis.com/auth/cloud-platform",
1455 "https://www.googleapis.com/auth/devstorage.full_control"
1456 ]
1457 },
1458 "patch": {
1459 "description": "Patches an ACL entry on the specified object.",
1460 "httpMethod": "PATCH",
1461 "id": "storage.objectAccessControls.patch",
1462 "parameterOrder": [
1463 "bucket",
1464 "object",
1465 "entity"
1466 ],
1467 "parameters": {
1468 "bucket": {
1469 "description": "Name of a bucket.",
1470 "location": "path",
1471 "required": true,
1472 "type": "string"
1473 },
1474 "entity": {
1475 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
1476 "location": "path",
1477 "required": true,
1478 "type": "string"
1479 },
1480 "generation": {
1481 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
1482 "format": "int64",
1483 "location": "query",
1484 "type": "string"
1485 },
1486 "object": {
1487 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1488 "location": "path",
1489 "required": true,
1490 "type": "string"
1491 },
1492 "userProject": {
1493 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1494 "location": "query",
1495 "type": "string"
1496 }
1497 },
1498 "path": "b/{bucket}/o/{object}/acl/{entity}",
1499 "request": {
1500 "$ref": "ObjectAccessControl"
1501 },
1502 "response": {
1503 "$ref": "ObjectAccessControl"
1504 },
1505 "scopes": [
1506 "https://www.googleapis.com/auth/cloud-platform",
1507 "https://www.googleapis.com/auth/devstorage.full_control"
1508 ]
1509 },
1510 "update": {
1511 "description": "Updates an ACL entry on the specified object.",
1512 "httpMethod": "PUT",
1513 "id": "storage.objectAccessControls.update",
1514 "parameterOrder": [
1515 "bucket",
1516 "object",
1517 "entity"
1518 ],
1519 "parameters": {
1520 "bucket": {
1521 "description": "Name of a bucket.",
1522 "location": "path",
1523 "required": true,
1524 "type": "string"
1525 },
1526 "entity": {
1527 "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
1528 "location": "path",
1529 "required": true,
1530 "type": "string"
1531 },
1532 "generation": {
1533 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
1534 "format": "int64",
1535 "location": "query",
1536 "type": "string"
1537 },
1538 "object": {
1539 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1540 "location": "path",
1541 "required": true,
1542 "type": "string"
1543 },
1544 "userProject": {
1545 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1546 "location": "query",
1547 "type": "string"
1548 }
1549 },
1550 "path": "b/{bucket}/o/{object}/acl/{entity}",
1551 "request": {
1552 "$ref": "ObjectAccessControl"
1553 },
1554 "response": {
1555 "$ref": "ObjectAccessControl"
1556 },
1557 "scopes": [
1558 "https://www.googleapis.com/auth/cloud-platform",
1559 "https://www.googleapis.com/auth/devstorage.full_control"
1560 ]
1561 }
1562 }
1563 },
1564 "objects": {
1565 "methods": {
1566 "compose": {
1567 "description": "Concatenates a list of existing objects into a new object in the same bucket.",
1568 "httpMethod": "POST",
1569 "id": "storage.objects.compose",
1570 "parameterOrder": [
1571 "destinationBucket",
1572 "destinationObject"
1573 ],
1574 "parameters": {
1575 "destinationBucket": {
1576 "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.",
1577 "location": "path",
1578 "required": true,
1579 "type": "string"
1580 },
1581 "destinationObject": {
1582 "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1583 "location": "path",
1584 "required": true,
1585 "type": "string"
1586 },
1587 "destinationPredefinedAcl": {
1588 "description": "Apply a predefined set of access controls to the destination object.",
1589 "enum": [
1590 "authenticatedRead",
1591 "bucketOwnerFullControl",
1592 "bucketOwnerRead",
1593 "private",
1594 "projectPrivate",
1595 "publicRead"
1596 ],
1597 "enumDescriptions": [
1598 "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
1599 "Object owner gets OWNER access, and project team owners get OWNER access.",
1600 "Object owner gets OWNER access, and project team owners get READER access.",
1601 "Object owner gets OWNER access.",
1602 "Object owner gets OWNER access, and project team members get access according to their roles.",
1603 "Object owner gets OWNER access, and allUsers get READER access."
1604 ],
1605 "location": "query",
1606 "type": "string"
1607 },
1608 "ifGenerationMatch": {
1609 "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
1610 "format": "int64",
1611 "location": "query",
1612 "type": "string"
1613 },
1614 "ifMetagenerationMatch": {
1615 "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
1616 "format": "int64",
1617 "location": "query",
1618 "type": "string"
1619 },
1620 "kmsKeyName": {
1621 "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
1622 "location": "query",
1623 "type": "string"
1624 },
1625 "userProject": {
1626 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1627 "location": "query",
1628 "type": "string"
1629 }
1630 },
1631 "path": "b/{destinationBucket}/o/{destinationObject}/compose",
1632 "request": {
1633 "$ref": "ComposeRequest"
1634 },
1635 "response": {
1636 "$ref": "Object"
1637 },
1638 "scopes": [
1639 "https://www.googleapis.com/auth/cloud-platform",
1640 "https://www.googleapis.com/auth/devstorage.full_control",
1641 "https://www.googleapis.com/auth/devstorage.read_write"
1642 ]
1643 },
1644 "copy": {
1645 "description": "Copies a source object to a destination object. Optionally overrides metadata.",
1646 "httpMethod": "POST",
1647 "id": "storage.objects.copy",
1648 "parameterOrder": [
1649 "sourceBucket",
1650 "sourceObject",
1651 "destinationBucket",
1652 "destinationObject"
1653 ],
1654 "parameters": {
1655 "destinationBucket": {
1656 "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1657 "location": "path",
1658 "required": true,
1659 "type": "string"
1660 },
1661 "destinationObject": {
1662 "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
1663 "location": "path",
1664 "required": true,
1665 "type": "string"
1666 },
1667 "destinationPredefinedAcl": {
1668 "description": "Apply a predefined set of access controls to the destination object.",
1669 "enum": [
1670 "authenticatedRead",
1671 "bucketOwnerFullControl",
1672 "bucketOwnerRead",
1673 "private",
1674 "projectPrivate",
1675 "publicRead"
1676 ],
1677 "enumDescriptions": [
1678 "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
1679 "Object owner gets OWNER access, and project team owners get OWNER access.",
1680 "Object owner gets OWNER access, and project team owners get READER access.",
1681 "Object owner gets OWNER access.",
1682 "Object owner gets OWNER access, and project team members get access according to their roles.",
1683 "Object owner gets OWNER access, and allUsers get READER access."
1684 ],
1685 "location": "query",
1686 "type": "string"
1687 },
1688 "ifGenerationMatch": {
1689 "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
1690 "format": "int64",
1691 "location": "query",
1692 "type": "string"
1693 },
1694 "ifGenerationNotMatch": {
1695 "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
1696 "format": "int64",
1697 "location": "query",
1698 "type": "string"
1699 },
1700 "ifMetagenerationMatch": {
1701 "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
1702 "format": "int64",
1703 "location": "query",
1704 "type": "string"
1705 },
1706 "ifMetagenerationNotMatch": {
1707 "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
1708 "format": "int64",
1709 "location": "query",
1710 "type": "string"
1711 },
1712 "ifSourceGenerationMatch": {
1713 "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
1714 "format": "int64",
1715 "location": "query",
1716 "type": "string"
1717 },
1718 "ifSourceGenerationNotMatch": {
1719 "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
1720 "format": "int64",
1721 "location": "query",
1722 "type": "string"
1723 },
1724 "ifSourceMetagenerationMatch": {
1725 "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
1726 "format": "int64",
1727 "location": "query",
1728 "type": "string"
1729 },
1730 "ifSourceMetagenerationNotMatch": {
1731 "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
1732 "format": "int64",
1733 "location": "query",
1734 "type": "string"
1735 },
1736 "projection": {
1737 "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
1738 "enum": [
1739 "full",
1740 "noAcl"
1741 ],
1742 "enumDescriptions": [
1743 "Include all properties.",
1744 "Omit the owner, acl property."
1745 ],
1746 "location": "query",
1747 "type": "string"
1748 },
1749 "sourceBucket": {
1750 "description": "Name of the bucket in which to find the source object.",
1751 "location": "path",
1752 "required": true,
1753 "type": "string"
1754 },
1755 "sourceGeneration": {
1756 "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
1757 "format": "int64",
1758 "location": "query",
1759 "type": "string"
1760 },
1761 "sourceObject": {
1762 "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1763 "location": "path",
1764 "required": true,
1765 "type": "string"
1766 },
1767 "userProject": {
1768 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1769 "location": "query",
1770 "type": "string"
1771 }
1772 },
1773 "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}",
1774 "request": {
1775 "$ref": "Object"
1776 },
1777 "response": {
1778 "$ref": "Object"
1779 },
1780 "scopes": [
1781 "https://www.googleapis.com/auth/cloud-platform",
1782 "https://www.googleapis.com/auth/devstorage.full_control",
1783 "https://www.googleapis.com/auth/devstorage.read_write"
1784 ]
1785 },
1786 "delete": {
1787 "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.",
1788 "httpMethod": "DELETE",
1789 "id": "storage.objects.delete",
1790 "parameterOrder": [
1791 "bucket",
1792 "object"
1793 ],
1794 "parameters": {
1795 "bucket": {
1796 "description": "Name of the bucket in which the object resides.",
1797 "location": "path",
1798 "required": true,
1799 "type": "string"
1800 },
1801 "generation": {
1802 "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).",
1803 "format": "int64",
1804 "location": "query",
1805 "type": "string"
1806 },
1807 "ifGenerationMatch": {
1808 "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
1809 "format": "int64",
1810 "location": "query",
1811 "type": "string"
1812 },
1813 "ifGenerationNotMatch": {
1814 "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
1815 "format": "int64",
1816 "location": "query",
1817 "type": "string"
1818 },
1819 "ifMetagenerationMatch": {
1820 "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
1821 "format": "int64",
1822 "location": "query",
1823 "type": "string"
1824 },
1825 "ifMetagenerationNotMatch": {
1826 "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
1827 "format": "int64",
1828 "location": "query",
1829 "type": "string"
1830 },
1831 "object": {
1832 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1833 "location": "path",
1834 "required": true,
1835 "type": "string"
1836 },
1837 "userProject": {
1838 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1839 "location": "query",
1840 "type": "string"
1841 }
1842 },
1843 "path": "b/{bucket}/o/{object}",
1844 "scopes": [
1845 "https://www.googleapis.com/auth/cloud-platform",
1846 "https://www.googleapis.com/auth/devstorage.full_control",
1847 "https://www.googleapis.com/auth/devstorage.read_write"
1848 ]
1849 },
1850 "get": {
1851 "description": "Retrieves an object or its metadata.",
1852 "httpMethod": "GET",
1853 "id": "storage.objects.get",
1854 "parameterOrder": [
1855 "bucket",
1856 "object"
1857 ],
1858 "parameters": {
1859 "bucket": {
1860 "description": "Name of the bucket in which the object resides.",
1861 "location": "path",
1862 "required": true,
1863 "type": "string"
1864 },
1865 "generation": {
1866 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
1867 "format": "int64",
1868 "location": "query",
1869 "type": "string"
1870 },
1871 "ifGenerationMatch": {
1872 "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
1873 "format": "int64",
1874 "location": "query",
1875 "type": "string"
1876 },
1877 "ifGenerationNotMatch": {
1878 "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
1879 "format": "int64",
1880 "location": "query",
1881 "type": "string"
1882 },
1883 "ifMetagenerationMatch": {
1884 "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
1885 "format": "int64",
1886 "location": "query",
1887 "type": "string"
1888 },
1889 "ifMetagenerationNotMatch": {
1890 "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
1891 "format": "int64",
1892 "location": "query",
1893 "type": "string"
1894 },
1895 "object": {
1896 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1897 "location": "path",
1898 "required": true,
1899 "type": "string"
1900 },
1901 "projection": {
1902 "description": "Set of properties to return. Defaults to noAcl.",
1903 "enum": [
1904 "full",
1905 "noAcl"
1906 ],
1907 "enumDescriptions": [
1908 "Include all properties.",
1909 "Omit the owner, acl property."
1910 ],
1911 "location": "query",
1912 "type": "string"
1913 },
1914 "userProject": {
1915 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1916 "location": "query",
1917 "type": "string"
1918 }
1919 },
1920 "path": "b/{bucket}/o/{object}",
1921 "response": {
1922 "$ref": "Object"
1923 },
1924 "scopes": [
1925 "https://www.googleapis.com/auth/cloud-platform",
1926 "https://www.googleapis.com/auth/cloud-platform.read-only",
1927 "https://www.googleapis.com/auth/devstorage.full_control",
1928 "https://www.googleapis.com/auth/devstorage.read_only",
1929 "https://www.googleapis.com/auth/devstorage.read_write"
1930 ],
1931 "supportsMediaDownload": true,
1932 "useMediaDownloadService": true
1933 },
1934 "getIamPolicy": {
1935 "description": "Returns an IAM policy for the specified object.",
1936 "httpMethod": "GET",
1937 "id": "storage.objects.getIamPolicy",
1938 "parameterOrder": [
1939 "bucket",
1940 "object"
1941 ],
1942 "parameters": {
1943 "bucket": {
1944 "description": "Name of the bucket in which the object resides.",
1945 "location": "path",
1946 "required": true,
1947 "type": "string"
1948 },
1949 "generation": {
1950 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
1951 "format": "int64",
1952 "location": "query",
1953 "type": "string"
1954 },
1955 "object": {
1956 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
1957 "location": "path",
1958 "required": true,
1959 "type": "string"
1960 },
1961 "userProject": {
1962 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
1963 "location": "query",
1964 "type": "string"
1965 }
1966 },
1967 "path": "b/{bucket}/o/{object}/iam",
1968 "response": {
1969 "$ref": "Policy"
1970 },
1971 "scopes": [
1972 "https://www.googleapis.com/auth/cloud-platform",
1973 "https://www.googleapis.com/auth/cloud-platform.read-only",
1974 "https://www.googleapis.com/auth/devstorage.full_control",
1975 "https://www.googleapis.com/auth/devstorage.read_only",
1976 "https://www.googleapis.com/auth/devstorage.read_write"
1977 ]
1978 },
1979 "insert": {
1980 "description": "Stores a new object and metadata.",
1981 "httpMethod": "POST",
1982 "id": "storage.objects.insert",
1983 "mediaUpload": {
1984 "accept": [
1985 "*/*"
1986 ],
1987 "protocols": {
1988 "resumable": {
1989 "multipart": true,
1990 "path": "/resumable/upload/storage/v1/b/{bucket}/o"
1991 },
1992 "simple": {
1993 "multipart": true,
1994 "path": "/upload/storage/v1/b/{bucket}/o"
1995 }
1996 }
1997 },
1998 "parameterOrder": [
1999 "bucket"
2000 ],
2001 "parameters": {
2002 "bucket": {
2003 "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
2004 "location": "path",
2005 "required": true,
2006 "type": "string"
2007 },
2008 "contentEncoding": {
2009 "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.",
2010 "location": "query",
2011 "type": "string"
2012 },
2013 "ifGenerationMatch": {
2014 "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
2015 "format": "int64",
2016 "location": "query",
2017 "type": "string"
2018 },
2019 "ifGenerationNotMatch": {
2020 "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
2021 "format": "int64",
2022 "location": "query",
2023 "type": "string"
2024 },
2025 "ifMetagenerationMatch": {
2026 "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
2027 "format": "int64",
2028 "location": "query",
2029 "type": "string"
2030 },
2031 "ifMetagenerationNotMatch": {
2032 "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
2033 "format": "int64",
2034 "location": "query",
2035 "type": "string"
2036 },
2037 "kmsKeyName": {
2038 "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
2039 "location": "query",
2040 "type": "string"
2041 },
2042 "name": {
2043 "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
2044 "location": "query",
2045 "type": "string"
2046 },
2047 "predefinedAcl": {
2048 "description": "Apply a predefined set of access controls to this object.",
2049 "enum": [
2050 "authenticatedRead",
2051 "bucketOwnerFullControl",
2052 "bucketOwnerRead",
2053 "private",
2054 "projectPrivate",
2055 "publicRead"
2056 ],
2057 "enumDescriptions": [
2058 "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
2059 "Object owner gets OWNER access, and project team owners get OWNER access.",
2060 "Object owner gets OWNER access, and project team owners get READER access.",
2061 "Object owner gets OWNER access.",
2062 "Object owner gets OWNER access, and project team members get access according to their roles.",
2063 "Object owner gets OWNER access, and allUsers get READER access."
2064 ],
2065 "location": "query",
2066 "type": "string"
2067 },
2068 "projection": {
2069 "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
2070 "enum": [
2071 "full",
2072 "noAcl"
2073 ],
2074 "enumDescriptions": [
2075 "Include all properties.",
2076 "Omit the owner, acl property."
2077 ],
2078 "location": "query",
2079 "type": "string"
2080 },
2081 "userProject": {
2082 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2083 "location": "query",
2084 "type": "string"
2085 }
2086 },
2087 "path": "b/{bucket}/o",
2088 "request": {
2089 "$ref": "Object"
2090 },
2091 "response": {
2092 "$ref": "Object"
2093 },
2094 "scopes": [
2095 "https://www.googleapis.com/auth/cloud-platform",
2096 "https://www.googleapis.com/auth/devstorage.full_control",
2097 "https://www.googleapis.com/auth/devstorage.read_write"
2098 ],
2099 "supportsMediaUpload": true
2100 },
2101 "list": {
2102 "description": "Retrieves a list of objects matching the criteria.",
2103 "httpMethod": "GET",
2104 "id": "storage.objects.list",
2105 "parameterOrder": [
2106 "bucket"
2107 ],
2108 "parameters": {
2109 "bucket": {
2110 "description": "Name of the bucket in which to look for objects.",
2111 "location": "path",
2112 "required": true,
2113 "type": "string"
2114 },
2115 "delimiter": {
2116 "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
2117 "location": "query",
2118 "type": "string"
2119 },
2120 "includeTrailingDelimiter": {
2121 "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
2122 "location": "query",
2123 "type": "boolean"
2124 },
2125 "maxResults": {
2126 "default": "1000",
2127 "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
2128 "format": "uint32",
2129 "location": "query",
2130 "minimum": "0",
2131 "type": "integer"
2132 },
2133 "pageToken": {
2134 "description": "A previously-returned page token representing part of the larger set of results to view.",
2135 "location": "query",
2136 "type": "string"
2137 },
2138 "prefix": {
2139 "description": "Filter results to objects whose names begin with this prefix.",
2140 "location": "query",
2141 "type": "string"
2142 },
2143 "projection": {
2144 "description": "Set of properties to return. Defaults to noAcl.",
2145 "enum": [
2146 "full",
2147 "noAcl"
2148 ],
2149 "enumDescriptions": [
2150 "Include all properties.",
2151 "Omit the owner, acl property."
2152 ],
2153 "location": "query",
2154 "type": "string"
2155 },
2156 "userProject": {
2157 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2158 "location": "query",
2159 "type": "string"
2160 },
2161 "versions": {
2162 "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
2163 "location": "query",
2164 "type": "boolean"
2165 }
2166 },
2167 "path": "b/{bucket}/o",
2168 "response": {
2169 "$ref": "Objects"
2170 },
2171 "scopes": [
2172 "https://www.googleapis.com/auth/cloud-platform",
2173 "https://www.googleapis.com/auth/cloud-platform.read-only",
2174 "https://www.googleapis.com/auth/devstorage.full_control",
2175 "https://www.googleapis.com/auth/devstorage.read_only",
2176 "https://www.googleapis.com/auth/devstorage.read_write"
2177 ],
2178 "supportsSubscription": true
2179 },
2180 "patch": {
2181 "description": "Patches an object's metadata.",
2182 "httpMethod": "PATCH",
2183 "id": "storage.objects.patch",
2184 "parameterOrder": [
2185 "bucket",
2186 "object"
2187 ],
2188 "parameters": {
2189 "bucket": {
2190 "description": "Name of the bucket in which the object resides.",
2191 "location": "path",
2192 "required": true,
2193 "type": "string"
2194 },
2195 "generation": {
2196 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
2197 "format": "int64",
2198 "location": "query",
2199 "type": "string"
2200 },
2201 "ifGenerationMatch": {
2202 "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
2203 "format": "int64",
2204 "location": "query",
2205 "type": "string"
2206 },
2207 "ifGenerationNotMatch": {
2208 "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
2209 "format": "int64",
2210 "location": "query",
2211 "type": "string"
2212 },
2213 "ifMetagenerationMatch": {
2214 "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
2215 "format": "int64",
2216 "location": "query",
2217 "type": "string"
2218 },
2219 "ifMetagenerationNotMatch": {
2220 "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
2221 "format": "int64",
2222 "location": "query",
2223 "type": "string"
2224 },
2225 "object": {
2226 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
2227 "location": "path",
2228 "required": true,
2229 "type": "string"
2230 },
2231 "predefinedAcl": {
2232 "description": "Apply a predefined set of access controls to this object.",
2233 "enum": [
2234 "authenticatedRead",
2235 "bucketOwnerFullControl",
2236 "bucketOwnerRead",
2237 "private",
2238 "projectPrivate",
2239 "publicRead"
2240 ],
2241 "enumDescriptions": [
2242 "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
2243 "Object owner gets OWNER access, and project team owners get OWNER access.",
2244 "Object owner gets OWNER access, and project team owners get READER access.",
2245 "Object owner gets OWNER access.",
2246 "Object owner gets OWNER access, and project team members get access according to their roles.",
2247 "Object owner gets OWNER access, and allUsers get READER access."
2248 ],
2249 "location": "query",
2250 "type": "string"
2251 },
2252 "projection": {
2253 "description": "Set of properties to return. Defaults to full.",
2254 "enum": [
2255 "full",
2256 "noAcl"
2257 ],
2258 "enumDescriptions": [
2259 "Include all properties.",
2260 "Omit the owner, acl property."
2261 ],
2262 "location": "query",
2263 "type": "string"
2264 },
2265 "userProject": {
2266 "description": "The project to be billed for this request, for Requester Pays buckets.",
2267 "location": "query",
2268 "type": "string"
2269 }
2270 },
2271 "path": "b/{bucket}/o/{object}",
2272 "request": {
2273 "$ref": "Object"
2274 },
2275 "response": {
2276 "$ref": "Object"
2277 },
2278 "scopes": [
2279 "https://www.googleapis.com/auth/cloud-platform",
2280 "https://www.googleapis.com/auth/devstorage.full_control"
2281 ]
2282 },
2283 "rewrite": {
2284 "description": "Rewrites a source object to a destination object. Optionally overrides metadata.",
2285 "httpMethod": "POST",
2286 "id": "storage.objects.rewrite",
2287 "parameterOrder": [
2288 "sourceBucket",
2289 "sourceObject",
2290 "destinationBucket",
2291 "destinationObject"
2292 ],
2293 "parameters": {
2294 "destinationBucket": {
2295 "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
2296 "location": "path",
2297 "required": true,
2298 "type": "string"
2299 },
2300 "destinationKmsKeyName": {
2301 "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
2302 "location": "query",
2303 "type": "string"
2304 },
2305 "destinationObject": {
2306 "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
2307 "location": "path",
2308 "required": true,
2309 "type": "string"
2310 },
2311 "destinationPredefinedAcl": {
2312 "description": "Apply a predefined set of access controls to the destination object.",
2313 "enum": [
2314 "authenticatedRead",
2315 "bucketOwnerFullControl",
2316 "bucketOwnerRead",
2317 "private",
2318 "projectPrivate",
2319 "publicRead"
2320 ],
2321 "enumDescriptions": [
2322 "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
2323 "Object owner gets OWNER access, and project team owners get OWNER access.",
2324 "Object owner gets OWNER access, and project team owners get READER access.",
2325 "Object owner gets OWNER access.",
2326 "Object owner gets OWNER access, and project team members get access according to their roles.",
2327 "Object owner gets OWNER access, and allUsers get READER access."
2328 ],
2329 "location": "query",
2330 "type": "string"
2331 },
2332 "ifGenerationMatch": {
2333 "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
2334 "format": "int64",
2335 "location": "query",
2336 "type": "string"
2337 },
2338 "ifGenerationNotMatch": {
2339 "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
2340 "format": "int64",
2341 "location": "query",
2342 "type": "string"
2343 },
2344 "ifMetagenerationMatch": {
2345 "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
2346 "format": "int64",
2347 "location": "query",
2348 "type": "string"
2349 },
2350 "ifMetagenerationNotMatch": {
2351 "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
2352 "format": "int64",
2353 "location": "query",
2354 "type": "string"
2355 },
2356 "ifSourceGenerationMatch": {
2357 "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
2358 "format": "int64",
2359 "location": "query",
2360 "type": "string"
2361 },
2362 "ifSourceGenerationNotMatch": {
2363 "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
2364 "format": "int64",
2365 "location": "query",
2366 "type": "string"
2367 },
2368 "ifSourceMetagenerationMatch": {
2369 "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
2370 "format": "int64",
2371 "location": "query",
2372 "type": "string"
2373 },
2374 "ifSourceMetagenerationNotMatch": {
2375 "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
2376 "format": "int64",
2377 "location": "query",
2378 "type": "string"
2379 },
2380 "maxBytesRewrittenPerCall": {
2381 "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.",
2382 "format": "int64",
2383 "location": "query",
2384 "type": "string"
2385 },
2386 "projection": {
2387 "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
2388 "enum": [
2389 "full",
2390 "noAcl"
2391 ],
2392 "enumDescriptions": [
2393 "Include all properties.",
2394 "Omit the owner, acl property."
2395 ],
2396 "location": "query",
2397 "type": "string"
2398 },
2399 "rewriteToken": {
2400 "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.",
2401 "location": "query",
2402 "type": "string"
2403 },
2404 "sourceBucket": {
2405 "description": "Name of the bucket in which to find the source object.",
2406 "location": "path",
2407 "required": true,
2408 "type": "string"
2409 },
2410 "sourceGeneration": {
2411 "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
2412 "format": "int64",
2413 "location": "query",
2414 "type": "string"
2415 },
2416 "sourceObject": {
2417 "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
2418 "location": "path",
2419 "required": true,
2420 "type": "string"
2421 },
2422 "userProject": {
2423 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2424 "location": "query",
2425 "type": "string"
2426 }
2427 },
2428 "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}",
2429 "request": {
2430 "$ref": "Object"
2431 },
2432 "response": {
2433 "$ref": "RewriteResponse"
2434 },
2435 "scopes": [
2436 "https://www.googleapis.com/auth/cloud-platform",
2437 "https://www.googleapis.com/auth/devstorage.full_control",
2438 "https://www.googleapis.com/auth/devstorage.read_write"
2439 ]
2440 },
2441 "setIamPolicy": {
2442 "description": "Updates an IAM policy for the specified object.",
2443 "httpMethod": "PUT",
2444 "id": "storage.objects.setIamPolicy",
2445 "parameterOrder": [
2446 "bucket",
2447 "object"
2448 ],
2449 "parameters": {
2450 "bucket": {
2451 "description": "Name of the bucket in which the object resides.",
2452 "location": "path",
2453 "required": true,
2454 "type": "string"
2455 },
2456 "generation": {
2457 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
2458 "format": "int64",
2459 "location": "query",
2460 "type": "string"
2461 },
2462 "object": {
2463 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
2464 "location": "path",
2465 "required": true,
2466 "type": "string"
2467 },
2468 "userProject": {
2469 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2470 "location": "query",
2471 "type": "string"
2472 }
2473 },
2474 "path": "b/{bucket}/o/{object}/iam",
2475 "request": {
2476 "$ref": "Policy"
2477 },
2478 "response": {
2479 "$ref": "Policy"
2480 },
2481 "scopes": [
2482 "https://www.googleapis.com/auth/cloud-platform",
2483 "https://www.googleapis.com/auth/devstorage.full_control",
2484 "https://www.googleapis.com/auth/devstorage.read_write"
2485 ]
2486 },
2487 "testIamPermissions": {
2488 "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.",
2489 "httpMethod": "GET",
2490 "id": "storage.objects.testIamPermissions",
2491 "parameterOrder": [
2492 "bucket",
2493 "object",
2494 "permissions"
2495 ],
2496 "parameters": {
2497 "bucket": {
2498 "description": "Name of the bucket in which the object resides.",
2499 "location": "path",
2500 "required": true,
2501 "type": "string"
2502 },
2503 "generation": {
2504 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
2505 "format": "int64",
2506 "location": "query",
2507 "type": "string"
2508 },
2509 "object": {
2510 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
2511 "location": "path",
2512 "required": true,
2513 "type": "string"
2514 },
2515 "permissions": {
2516 "description": "Permissions to test.",
2517 "location": "query",
2518 "repeated": true,
2519 "required": true,
2520 "type": "string"
2521 },
2522 "userProject": {
2523 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2524 "location": "query",
2525 "type": "string"
2526 }
2527 },
2528 "path": "b/{bucket}/o/{object}/iam/testPermissions",
2529 "response": {
2530 "$ref": "TestIamPermissionsResponse"
2531 },
2532 "scopes": [
2533 "https://www.googleapis.com/auth/cloud-platform",
2534 "https://www.googleapis.com/auth/cloud-platform.read-only",
2535 "https://www.googleapis.com/auth/devstorage.full_control",
2536 "https://www.googleapis.com/auth/devstorage.read_only",
2537 "https://www.googleapis.com/auth/devstorage.read_write"
2538 ]
2539 },
2540 "update": {
2541 "description": "Updates an object's metadata.",
2542 "httpMethod": "PUT",
2543 "id": "storage.objects.update",
2544 "parameterOrder": [
2545 "bucket",
2546 "object"
2547 ],
2548 "parameters": {
2549 "bucket": {
2550 "description": "Name of the bucket in which the object resides.",
2551 "location": "path",
2552 "required": true,
2553 "type": "string"
2554 },
2555 "generation": {
2556 "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
2557 "format": "int64",
2558 "location": "query",
2559 "type": "string"
2560 },
2561 "ifGenerationMatch": {
2562 "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
2563 "format": "int64",
2564 "location": "query",
2565 "type": "string"
2566 },
2567 "ifGenerationNotMatch": {
2568 "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
2569 "format": "int64",
2570 "location": "query",
2571 "type": "string"
2572 },
2573 "ifMetagenerationMatch": {
2574 "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
2575 "format": "int64",
2576 "location": "query",
2577 "type": "string"
2578 },
2579 "ifMetagenerationNotMatch": {
2580 "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
2581 "format": "int64",
2582 "location": "query",
2583 "type": "string"
2584 },
2585 "object": {
2586 "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
2587 "location": "path",
2588 "required": true,
2589 "type": "string"
2590 },
2591 "predefinedAcl": {
2592 "description": "Apply a predefined set of access controls to this object.",
2593 "enum": [
2594 "authenticatedRead",
2595 "bucketOwnerFullControl",
2596 "bucketOwnerRead",
2597 "private",
2598 "projectPrivate",
2599 "publicRead"
2600 ],
2601 "enumDescriptions": [
2602 "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
2603 "Object owner gets OWNER access, and project team owners get OWNER access.",
2604 "Object owner gets OWNER access, and project team owners get READER access.",
2605 "Object owner gets OWNER access.",
2606 "Object owner gets OWNER access, and project team members get access according to their roles.",
2607 "Object owner gets OWNER access, and allUsers get READER access."
2608 ],
2609 "location": "query",
2610 "type": "string"
2611 },
2612 "projection": {
2613 "description": "Set of properties to return. Defaults to full.",
2614 "enum": [
2615 "full",
2616 "noAcl"
2617 ],
2618 "enumDescriptions": [
2619 "Include all properties.",
2620 "Omit the owner, acl property."
2621 ],
2622 "location": "query",
2623 "type": "string"
2624 },
2625 "userProject": {
2626 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2627 "location": "query",
2628 "type": "string"
2629 }
2630 },
2631 "path": "b/{bucket}/o/{object}",
2632 "request": {
2633 "$ref": "Object"
2634 },
2635 "response": {
2636 "$ref": "Object"
2637 },
2638 "scopes": [
2639 "https://www.googleapis.com/auth/cloud-platform",
2640 "https://www.googleapis.com/auth/devstorage.full_control"
2641 ]
2642 },
2643 "watchAll": {
2644 "description": "Watch for changes on all objects in a bucket.",
2645 "httpMethod": "POST",
2646 "id": "storage.objects.watchAll",
2647 "parameterOrder": [
2648 "bucket"
2649 ],
2650 "parameters": {
2651 "bucket": {
2652 "description": "Name of the bucket in which to look for objects.",
2653 "location": "path",
2654 "required": true,
2655 "type": "string"
2656 },
2657 "delimiter": {
2658 "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
2659 "location": "query",
2660 "type": "string"
2661 },
2662 "includeTrailingDelimiter": {
2663 "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
2664 "location": "query",
2665 "type": "boolean"
2666 },
2667 "maxResults": {
2668 "default": "1000",
2669 "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
2670 "format": "uint32",
2671 "location": "query",
2672 "minimum": "0",
2673 "type": "integer"
2674 },
2675 "pageToken": {
2676 "description": "A previously-returned page token representing part of the larger set of results to view.",
2677 "location": "query",
2678 "type": "string"
2679 },
2680 "prefix": {
2681 "description": "Filter results to objects whose names begin with this prefix.",
2682 "location": "query",
2683 "type": "string"
2684 },
2685 "projection": {
2686 "description": "Set of properties to return. Defaults to noAcl.",
2687 "enum": [
2688 "full",
2689 "noAcl"
2690 ],
2691 "enumDescriptions": [
2692 "Include all properties.",
2693 "Omit the owner, acl property."
2694 ],
2695 "location": "query",
2696 "type": "string"
2697 },
2698 "userProject": {
2699 "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2700 "location": "query",
2701 "type": "string"
2702 },
2703 "versions": {
2704 "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
2705 "location": "query",
2706 "type": "boolean"
2707 }
2708 },
2709 "path": "b/{bucket}/o/watch",
2710 "request": {
2711 "$ref": "Channel",
2712 "parameterName": "resource"
2713 },
2714 "response": {
2715 "$ref": "Channel"
2716 },
2717 "scopes": [
2718 "https://www.googleapis.com/auth/cloud-platform",
2719 "https://www.googleapis.com/auth/cloud-platform.read-only",
2720 "https://www.googleapis.com/auth/devstorage.full_control",
2721 "https://www.googleapis.com/auth/devstorage.read_only",
2722 "https://www.googleapis.com/auth/devstorage.read_write"
2723 ],
2724 "supportsSubscription": true
2725 }
2726 }
2727 },
2728 "projects": {
2729 "resources": {
2730 "serviceAccount": {
2731 "methods": {
2732 "get": {
2733 "description": "Get the email address of this project's Google Cloud Storage service account.",
2734 "httpMethod": "GET",
2735 "id": "storage.projects.serviceAccount.get",
2736 "parameterOrder": [
2737 "projectId"
2738 ],
2739 "parameters": {
2740 "projectId": {
2741 "description": "Project ID",
2742 "location": "path",
2743 "required": true,
2744 "type": "string"
2745 },
2746 "userProject": {
2747 "description": "The project to be billed for this request.",
2748 "location": "query",
2749 "type": "string"
2750 }
2751 },
2752 "path": "projects/{projectId}/serviceAccount",
2753 "response": {
2754 "$ref": "ServiceAccount"
2755 },
2756 "scopes": [
2757 "https://www.googleapis.com/auth/cloud-platform",
2758 "https://www.googleapis.com/auth/cloud-platform.read-only",
2759 "https://www.googleapis.com/auth/devstorage.full_control",
2760 "https://www.googleapis.com/auth/devstorage.read_only",
2761 "https://www.googleapis.com/auth/devstorage.read_write"
2762 ]
2763 }
2764 }
2765 }
2766 }
2767 }
2768 },
2769 "revision": "20181109",
2770 "rootUrl": "https://www.googleapis.com/",
2771 "schemas": {
2772 "Bucket": {
2773 "description": "A bucket.",
2774 "id": "Bucket",
2775 "properties": {
2776 "acl": {
2777 "annotations": {
2778 "required": [
2779 "storage.buckets.update"
2780 ]
2781 },
2782 "description": "Access controls on the bucket.",
2783 "items": {
2784 "$ref": "BucketAccessControl"
2785 },
2786 "type": "array"
2787 },
2788 "billing": {
2789 "description": "The bucket's billing configuration.",
2790 "properties": {
2791 "requesterPays": {
2792 "description": "When set to true, Requester Pays is enabled for this bucket.",
2793 "type": "boolean"
2794 }
2795 },
2796 "type": "object"
2797 },
2798 "cors": {
2799 "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.",
2800 "items": {
2801 "properties": {
2802 "maxAgeSeconds": {
2803 "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.",
2804 "format": "int32",
2805 "type": "integer"
2806 },
2807 "method": {
2808 "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".",
2809 "items": {
2810 "type": "string"
2811 },
2812 "type": "array"
2813 },
2814 "origin": {
2815 "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".",
2816 "items": {
2817 "type": "string"
2818 },
2819 "type": "array"
2820 },
2821 "responseHeader": {
2822 "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.",
2823 "items": {
2824 "type": "string"
2825 },
2826 "type": "array"
2827 }
2828 },
2829 "type": "object"
2830 },
2831 "type": "array"
2832 },
2833 "defaultEventBasedHold": {
2834 "description": "The default value for event-based hold on newly created objects in this bucket. Event-based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false. Objects under event-based hold cannot be deleted, overwritten or archived until the hold is removed.",
2835 "type": "boolean"
2836 },
2837 "defaultObjectAcl": {
2838 "description": "Default access controls to apply to new objects when no ACL is provided.",
2839 "items": {
2840 "$ref": "ObjectAccessControl"
2841 },
2842 "type": "array"
2843 },
2844 "encryption": {
2845 "description": "Encryption configuration for a bucket.",
2846 "properties": {
2847 "defaultKmsKeyName": {
2848 "description": "A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified.",
2849 "type": "string"
2850 }
2851 },
2852 "type": "object"
2853 },
2854 "etag": {
2855 "description": "HTTP 1.1 Entity tag for the bucket.",
2856 "type": "string"
2857 },
2858 "iamConfiguration": {
2859 "description": "The bucket's IAM configuration.",
2860 "properties": {
2861 "bucketPolicyOnly": {
2862 "properties": {
2863 "enabled": {
2864 "description": "If set, access checks only use bucket-level IAM policies or above.",
2865 "type": "boolean"
2866 },
2867 "lockedTime": {
2868 "description": "The deadline time for changing iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true to false until the locked time, after which the field is immutable.",
2869 "format": "date-time",
2870 "type": "string"
2871 }
2872 },
2873 "type": "object"
2874 }
2875 },
2876 "type": "object"
2877 },
2878 "id": {
2879 "description": "The ID of the bucket. For buckets, the id and name properties are the same.",
2880 "type": "string"
2881 },
2882 "kind": {
2883 "default": "storage#bucket",
2884 "description": "The kind of item this is. For buckets, this is always storage#bucket.",
2885 "type": "string"
2886 },
2887 "labels": {
2888 "additionalProperties": {
2889 "description": "An individual label entry.",
2890 "type": "string"
2891 },
2892 "description": "User-provided labels, in key/value pairs.",
2893 "type": "object"
2894 },
2895 "lifecycle": {
2896 "description": "The bucket's lifecycle configuration. See lifecycle management for more information.",
2897 "properties": {
2898 "rule": {
2899 "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.",
2900 "items": {
2901 "properties": {
2902 "action": {
2903 "description": "The action to take.",
2904 "properties": {
2905 "storageClass": {
2906 "description": "Target storage class. Required iff the type of the action is SetStorageClass.",
2907 "type": "string"
2908 },
2909 "type": {
2910 "description": "Type of the action. Currently, only Delete and SetStorageClass are supported.",
2911 "type": "string"
2912 }
2913 },
2914 "type": "object"
2915 },
2916 "condition": {
2917 "description": "The condition(s) under which the action will be taken.",
2918 "properties": {
2919 "age": {
2920 "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.",
2921 "format": "int32",
2922 "type": "integer"
2923 },
2924 "createdBefore": {
2925 "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.",
2926 "format": "date",
2927 "type": "string"
2928 },
2929 "isLive": {
2930 "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.",
2931 "type": "boolean"
2932 },
2933 "matchesPattern": {
2934 "description": "A regular expression that satisfies the RE2 syntax. This condition is satisfied when the name of the object matches the RE2 pattern. Note: This feature is currently in the \"Early Access\" launch stage and is only available to a whitelisted set of users; that means that this feature may be changed in backward-incompatible ways and that it is not guaranteed to be released.",
2935 "type": "string"
2936 },
2937 "matchesStorageClass": {
2938 "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.",
2939 "items": {
2940 "type": "string"
2941 },
2942 "type": "array"
2943 },
2944 "numNewerVersions": {
2945 "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.",
2946 "format": "int32",
2947 "type": "integer"
2948 }
2949 },
2950 "type": "object"
2951 }
2952 },
2953 "type": "object"
2954 },
2955 "type": "array"
2956 }
2957 },
2958 "type": "object"
2959 },
2960 "location": {
2961 "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.",
2962 "type": "string"
2963 },
2964 "logging": {
2965 "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.",
2966 "properties": {
2967 "logBucket": {
2968 "description": "The destination bucket where the current bucket's logs should be placed.",
2969 "type": "string"
2970 },
2971 "logObjectPrefix": {
2972 "description": "A prefix for log object names.",
2973 "type": "string"
2974 }
2975 },
2976 "type": "object"
2977 },
2978 "metageneration": {
2979 "description": "The metadata generation of this bucket.",
2980 "format": "int64",
2981 "type": "string"
2982 },
2983 "name": {
2984 "annotations": {
2985 "required": [
2986 "storage.buckets.insert"
2987 ]
2988 },
2989 "description": "The name of the bucket.",
2990 "type": "string"
2991 },
2992 "owner": {
2993 "description": "The owner of the bucket. This is always the project team's owner group.",
2994 "properties": {
2995 "entity": {
2996 "description": "The entity, in the form project-owner-projectId.",
2997 "type": "string"
2998 },
2999 "entityId": {
3000 "description": "The ID for the entity.",
3001 "type": "string"
3002 }
3003 },
3004 "type": "object"
3005 },
3006 "projectNumber": {
3007 "description": "The project number of the project the bucket belongs to.",
3008 "format": "uint64",
3009 "type": "string"
3010 },
3011 "retentionPolicy": {
3012 "description": "The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.",
3013 "properties": {
3014 "effectiveTime": {
3015 "description": "Server-determined value that indicates the time from which policy was enforced and effective. This value is in RFC 3339 format.",
3016 "format": "date-time",
3017 "type": "string"
3018 },
3019 "isLocked": {
3020 "description": "Once locked, an object retention policy cannot be modified.",
3021 "type": "boolean"
3022 },
3023 "retentionPeriod": {
3024 "description": "The duration in seconds that objects need to be retained. Retention duration must be greater than zero and less than 100 years. Note that enforcement of retention periods less than a day is not guaranteed. Such periods should only be used for testing purposes.",
3025 "format": "int64",
3026 "type": "string"
3027 }
3028 },
3029 "type": "object"
3030 },
3031 "selfLink": {
3032 "description": "The URI of this bucket.",
3033 "type": "string"
3034 },
3035 "storageClass": {
3036 "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.",
3037 "type": "string"
3038 },
3039 "timeCreated": {
3040 "description": "The creation time of the bucket in RFC 3339 format.",
3041 "format": "date-time",
3042 "type": "string"
3043 },
3044 "updated": {
3045 "description": "The modification time of the bucket in RFC 3339 format.",
3046 "format": "date-time",
3047 "type": "string"
3048 },
3049 "versioning": {
3050 "description": "The bucket's versioning configuration.",
3051 "properties": {
3052 "enabled": {
3053 "description": "While set to true, versioning is fully enabled for this bucket.",
3054 "type": "boolean"
3055 }
3056 },
3057 "type": "object"
3058 },
3059 "website": {
3060 "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.",
3061 "properties": {
3062 "mainPageSuffix": {
3063 "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.",
3064 "type": "string"
3065 },
3066 "notFoundPage": {
3067 "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result.",
3068 "type": "string"
3069 }
3070 },
3071 "type": "object"
3072 }
3073 },
3074 "type": "object"
3075 },
3076 "BucketAccessControl": {
3077 "description": "An access-control entry.",
3078 "id": "BucketAccessControl",
3079 "properties": {
3080 "bucket": {
3081 "description": "The name of the bucket.",
3082 "type": "string"
3083 },
3084 "domain": {
3085 "description": "The domain associated with the entity, if any.",
3086 "type": "string"
3087 },
3088 "email": {
3089 "description": "The email address associated with the entity, if any.",
3090 "type": "string"
3091 },
3092 "entity": {
3093 "annotations": {
3094 "required": [
3095 "storage.bucketAccessControls.insert"
3096 ]
3097 },
3098 "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.",
3099 "type": "string"
3100 },
3101 "entityId": {
3102 "description": "The ID for the entity, if any.",
3103 "type": "string"
3104 },
3105 "etag": {
3106 "description": "HTTP 1.1 Entity tag for the access-control entry.",
3107 "type": "string"
3108 },
3109 "id": {
3110 "description": "The ID of the access-control entry.",
3111 "type": "string"
3112 },
3113 "kind": {
3114 "default": "storage#bucketAccessControl",
3115 "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.",
3116 "type": "string"
3117 },
3118 "projectTeam": {
3119 "description": "The project team associated with the entity, if any.",
3120 "properties": {
3121 "projectNumber": {
3122 "description": "The project number.",
3123 "type": "string"
3124 },
3125 "team": {
3126 "description": "The team.",
3127 "type": "string"
3128 }
3129 },
3130 "type": "object"
3131 },
3132 "role": {
3133 "annotations": {
3134 "required": [
3135 "storage.bucketAccessControls.insert"
3136 ]
3137 },
3138 "description": "The access permission for the entity.",
3139 "type": "string"
3140 },
3141 "selfLink": {
3142 "description": "The link to this access-control entry.",
3143 "type": "string"
3144 }
3145 },
3146 "type": "object"
3147 },
3148 "BucketAccessControls": {
3149 "description": "An access-control list.",
3150 "id": "BucketAccessControls",
3151 "properties": {
3152 "items": {
3153 "description": "The list of items.",
3154 "items": {
3155 "$ref": "BucketAccessControl"
3156 },
3157 "type": "array"
3158 },
3159 "kind": {
3160 "default": "storage#bucketAccessControls",
3161 "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.",
3162 "type": "string"
3163 }
3164 },
3165 "type": "object"
3166 },
3167 "Buckets": {
3168 "description": "A list of buckets.",
3169 "id": "Buckets",
3170 "properties": {
3171 "items": {
3172 "description": "The list of items.",
3173 "items": {
3174 "$ref": "Bucket"
3175 },
3176 "type": "array"
3177 },
3178 "kind": {
3179 "default": "storage#buckets",
3180 "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.",
3181 "type": "string"
3182 },
3183 "nextPageToken": {
3184 "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.",
3185 "type": "string"
3186 }
3187 },
3188 "type": "object"
3189 },
3190 "Channel": {
3191 "description": "An notification channel used to watch for resource changes.",
3192 "id": "Channel",
3193 "properties": {
3194 "address": {
3195 "description": "The address where notifications are delivered for this channel.",
3196 "type": "string"
3197 },
3198 "expiration": {
3199 "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.",
3200 "format": "int64",
3201 "type": "string"
3202 },
3203 "id": {
3204 "description": "A UUID or similar unique string that identifies this channel.",
3205 "type": "string"
3206 },
3207 "kind": {
3208 "default": "api#channel",
3209 "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".",
3210 "type": "string"
3211 },
3212 "params": {
3213 "additionalProperties": {
3214 "description": "Declares a new parameter by name.",
3215 "type": "string"
3216 },
3217 "description": "Additional parameters controlling delivery channel behavior. Optional.",
3218 "type": "object"
3219 },
3220 "payload": {
3221 "description": "A Boolean value to indicate whether payload is wanted. Optional.",
3222 "type": "boolean"
3223 },
3224 "resourceId": {
3225 "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions.",
3226 "type": "string"
3227 },
3228 "resourceUri": {
3229 "description": "A version-specific identifier for the watched resource.",
3230 "type": "string"
3231 },
3232 "token": {
3233 "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional.",
3234 "type": "string"
3235 },
3236 "type": {
3237 "description": "The type of delivery mechanism used for this channel.",
3238 "type": "string"
3239 }
3240 },
3241 "type": "object"
3242 },
3243 "ComposeRequest": {
3244 "description": "A Compose request.",
3245 "id": "ComposeRequest",
3246 "properties": {
3247 "destination": {
3248 "$ref": "Object",
3249 "description": "Properties of the resulting object."
3250 },
3251 "kind": {
3252 "default": "storage#composeRequest",
3253 "description": "The kind of item this is.",
3254 "type": "string"
3255 },
3256 "sourceObjects": {
3257 "annotations": {
3258 "required": [
3259 "storage.objects.compose"
3260 ]
3261 },
3262 "description": "The list of source objects that will be concatenated into a single object.",
3263 "items": {
3264 "properties": {
3265 "generation": {
3266 "description": "The generation of this object to use as the source.",
3267 "format": "int64",
3268 "type": "string"
3269 },
3270 "name": {
3271 "annotations": {
3272 "required": [
3273 "storage.objects.compose"
3274 ]
3275 },
3276 "description": "The source object's name. All source objects must reside in the same bucket.",
3277 "type": "string"
3278 },
3279 "objectPreconditions": {
3280 "description": "Conditions that must be met for this operation to execute.",
3281 "properties": {
3282 "ifGenerationMatch": {
3283 "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.",
3284 "format": "int64",
3285 "type": "string"
3286 }
3287 },
3288 "type": "object"
3289 }
3290 },
3291 "type": "object"
3292 },
3293 "type": "array"
3294 }
3295 },
3296 "type": "object"
3297 },
3298 "Notification": {
3299 "description": "A subscription to receive Google PubSub notifications.",
3300 "id": "Notification",
3301 "properties": {
3302 "custom_attributes": {
3303 "additionalProperties": {
3304 "type": "string"
3305 },
3306 "description": "An optional list of additional attributes to attach to each Cloud PubSub message published for this notification subscription.",
3307 "type": "object"
3308 },
3309 "etag": {
3310 "description": "HTTP 1.1 Entity tag for this subscription notification.",
3311 "type": "string"
3312 },
3313 "event_types": {
3314 "description": "If present, only send notifications about listed event types. If empty, sent notifications for all event types.",
3315 "items": {
3316 "type": "string"
3317 },
3318 "type": "array"
3319 },
3320 "id": {
3321 "description": "The ID of the notification.",
3322 "type": "string"
3323 },
3324 "kind": {
3325 "default": "storage#notification",
3326 "description": "The kind of item this is. For notifications, this is always storage#notification.",
3327 "type": "string"
3328 },
3329 "object_name_prefix": {
3330 "description": "If present, only apply this notification configuration to object names that begin with this prefix.",
3331 "type": "string"
3332 },
3333 "payload_format": {
3334 "annotations": {
3335 "required": [
3336 "storage.notifications.insert"
3337 ]
3338 },
3339 "default": "JSON_API_V1",
3340 "description": "The desired content of the Payload.",
3341 "type": "string"
3342 },
3343 "selfLink": {
3344 "description": "The canonical URL of this notification.",
3345 "type": "string"
3346 },
3347 "topic": {
3348 "annotations": {
3349 "required": [
3350 "storage.notifications.insert"
3351 ]
3352 },
3353 "description": "The Cloud PubSub topic to which this subscription publishes. Formatted as: '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'",
3354 "type": "string"
3355 }
3356 },
3357 "type": "object"
3358 },
3359 "Notifications": {
3360 "description": "A list of notification subscriptions.",
3361 "id": "Notifications",
3362 "properties": {
3363 "items": {
3364 "description": "The list of items.",
3365 "items": {
3366 "$ref": "Notification"
3367 },
3368 "type": "array"
3369 },
3370 "kind": {
3371 "default": "storage#notifications",
3372 "description": "The kind of item this is. For lists of notifications, this is always storage#notifications.",
3373 "type": "string"
3374 }
3375 },
3376 "type": "object"
3377 },
3378 "Object": {
3379 "description": "An object.",
3380 "id": "Object",
3381 "properties": {
3382 "acl": {
3383 "annotations": {
3384 "required": [
3385 "storage.objects.update"
3386 ]
3387 },
3388 "description": "Access controls on the object.",
3389 "items": {
3390 "$ref": "ObjectAccessControl"
3391 },
3392 "type": "array"
3393 },
3394 "bucket": {
3395 "description": "The name of the bucket containing this object.",
3396 "type": "string"
3397 },
3398 "cacheControl": {
3399 "description": "Cache-Control directive for the object data. If omitted, and the object is accessible to all anonymous users, the default will be public, max-age=3600.",
3400 "type": "string"
3401 },
3402 "componentCount": {
3403 "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.",
3404 "format": "int32",
3405 "type": "integer"
3406 },
3407 "contentDisposition": {
3408 "description": "Content-Disposition of the object data.",
3409 "type": "string"
3410 },
3411 "contentEncoding": {
3412 "description": "Content-Encoding of the object data.",
3413 "type": "string"
3414 },
3415 "contentLanguage": {
3416 "description": "Content-Language of the object data.",
3417 "type": "string"
3418 },
3419 "contentType": {
3420 "description": "Content-Type of the object data. If an object is stored without a Content-Type, it is served as application/octet-stream.",
3421 "type": "string"
3422 },
3423 "crc32c": {
3424 "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices.",
3425 "type": "string"
3426 },
3427 "customerEncryption": {
3428 "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.",
3429 "properties": {
3430 "encryptionAlgorithm": {
3431 "description": "The encryption algorithm.",
3432 "type": "string"
3433 },
3434 "keySha256": {
3435 "description": "SHA256 hash value of the encryption key.",
3436 "type": "string"
3437 }
3438 },
3439 "type": "object"
3440 },
3441 "etag": {
3442 "description": "HTTP 1.1 Entity tag for the object.",
3443 "type": "string"
3444 },
3445 "eventBasedHold": {
3446 "description": "Whether an object is under event-based hold. Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is the loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false.",
3447 "type": "boolean"
3448 },
3449 "generation": {
3450 "description": "The content generation of this object. Used for object versioning.",
3451 "format": "int64",
3452 "type": "string"
3453 },
3454 "id": {
3455 "description": "The ID of the object, including the bucket name, object name, and generation number.",
3456 "type": "string"
3457 },
3458 "kind": {
3459 "default": "storage#object",
3460 "description": "The kind of item this is. For objects, this is always storage#object.",
3461 "type": "string"
3462 },
3463 "kmsKeyName": {
3464 "description": "Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key.",
3465 "type": "string"
3466 },
3467 "md5Hash": {
3468 "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices.",
3469 "type": "string"
3470 },
3471 "mediaLink": {
3472 "description": "Media download link.",
3473 "type": "string"
3474 },
3475 "metadata": {
3476 "additionalProperties": {
3477 "description": "An individual metadata entry.",
3478 "type": "string"
3479 },
3480 "description": "User-provided metadata, in key/value pairs.",
3481 "type": "object"
3482 },
3483 "metageneration": {
3484 "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.",
3485 "format": "int64",
3486 "type": "string"
3487 },
3488 "name": {
3489 "description": "The name of the object. Required if not specified by URL parameter.",
3490 "type": "string"
3491 },
3492 "owner": {
3493 "description": "The owner of the object. This will always be the uploader of the object.",
3494 "properties": {
3495 "entity": {
3496 "description": "The entity, in the form user-userId.",
3497 "type": "string"
3498 },
3499 "entityId": {
3500 "description": "The ID for the entity.",
3501 "type": "string"
3502 }
3503 },
3504 "type": "object"
3505 },
3506 "retentionExpirationTime": {
3507 "description": "A server-determined value that specifies the earliest time that the object's retention period expires. This value is in RFC 3339 format. Note 1: This field is not provided for objects with an active event-based hold, since retention expiration is unknown until the hold is removed. Note 2: This value can be provided even when temporary hold is set (so that the user can reason about policy without having to first unset the temporary hold).",
3508 "format": "date-time",
3509 "type": "string"
3510 },
3511 "selfLink": {
3512 "description": "The link to this object.",
3513 "type": "string"
3514 },
3515 "size": {
3516 "description": "Content-Length of the data in bytes.",
3517 "format": "uint64",
3518 "type": "string"
3519 },
3520 "storageClass": {
3521 "description": "Storage class of the object.",
3522 "type": "string"
3523 },
3524 "temporaryHold": {
3525 "description": "Whether an object is under temporary hold. While this flag is set to true, the object is protected against deletion and overwrites. A common use case of this flag is regulatory investigations where objects need to be retained while the investigation is ongoing. Note that unlike event-based hold, temporary hold does not impact retention expiration time of an object.",
3526 "type": "boolean"
3527 },
3528 "timeCreated": {
3529 "description": "The creation time of the object in RFC 3339 format.",
3530 "format": "date-time",
3531 "type": "string"
3532 },
3533 "timeDeleted": {
3534 "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.",
3535 "format": "date-time",
3536 "type": "string"
3537 },
3538 "timeStorageClassUpdated": {
3539 "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.",
3540 "format": "date-time",
3541 "type": "string"
3542 },
3543 "updated": {
3544 "description": "The modification time of the object metadata in RFC 3339 format.",
3545 "format": "date-time",
3546 "type": "string"
3547 }
3548 },
3549 "type": "object"
3550 },
3551 "ObjectAccessControl": {
3552 "description": "An access-control entry.",
3553 "id": "ObjectAccessControl",
3554 "properties": {
3555 "bucket": {
3556 "description": "The name of the bucket.",
3557 "type": "string"
3558 },
3559 "domain": {
3560 "description": "The domain associated with the entity, if any.",
3561 "type": "string"
3562 },
3563 "email": {
3564 "description": "The email address associated with the entity, if any.",
3565 "type": "string"
3566 },
3567 "entity": {
3568 "annotations": {
3569 "required": [
3570 "storage.defaultObjectAccessControls.insert",
3571 "storage.objectAccessControls.insert"
3572 ]
3573 },
3574 "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.",
3575 "type": "string"
3576 },
3577 "entityId": {
3578 "description": "The ID for the entity, if any.",
3579 "type": "string"
3580 },
3581 "etag": {
3582 "description": "HTTP 1.1 Entity tag for the access-control entry.",
3583 "type": "string"
3584 },
3585 "generation": {
3586 "description": "The content generation of the object, if applied to an object.",
3587 "format": "int64",
3588 "type": "string"
3589 },
3590 "id": {
3591 "description": "The ID of the access-control entry.",
3592 "type": "string"
3593 },
3594 "kind": {
3595 "default": "storage#objectAccessControl",
3596 "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.",
3597 "type": "string"
3598 },
3599 "object": {
3600 "description": "The name of the object, if applied to an object.",
3601 "type": "string"
3602 },
3603 "projectTeam": {
3604 "description": "The project team associated with the entity, if any.",
3605 "properties": {
3606 "projectNumber": {
3607 "description": "The project number.",
3608 "type": "string"
3609 },
3610 "team": {
3611 "description": "The team.",
3612 "type": "string"
3613 }
3614 },
3615 "type": "object"
3616 },
3617 "role": {
3618 "annotations": {
3619 "required": [
3620 "storage.defaultObjectAccessControls.insert",
3621 "storage.objectAccessControls.insert"
3622 ]
3623 },
3624 "description": "The access permission for the entity.",
3625 "type": "string"
3626 },
3627 "selfLink": {
3628 "description": "The link to this access-control entry.",
3629 "type": "string"
3630 }
3631 },
3632 "type": "object"
3633 },
3634 "ObjectAccessControls": {
3635 "description": "An access-control list.",
3636 "id": "ObjectAccessControls",
3637 "properties": {
3638 "items": {
3639 "description": "The list of items.",
3640 "items": {
3641 "$ref": "ObjectAccessControl"
3642 },
3643 "type": "array"
3644 },
3645 "kind": {
3646 "default": "storage#objectAccessControls",
3647 "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.",
3648 "type": "string"
3649 }
3650 },
3651 "type": "object"
3652 },
3653 "Objects": {
3654 "description": "A list of objects.",
3655 "id": "Objects",
3656 "properties": {
3657 "items": {
3658 "description": "The list of items.",
3659 "items": {
3660 "$ref": "Object"
3661 },
3662 "type": "array"
3663 },
3664 "kind": {
3665 "default": "storage#objects",
3666 "description": "The kind of item this is. For lists of objects, this is always storage#objects.",
3667 "type": "string"
3668 },
3669 "nextPageToken": {
3670 "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.",
3671 "type": "string"
3672 },
3673 "prefixes": {
3674 "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.",
3675 "items": {
3676 "type": "string"
3677 },
3678 "type": "array"
3679 }
3680 },
3681 "type": "object"
3682 },
3683 "Policy": {
3684 "description": "A bucket/object IAM policy.",
3685 "id": "Policy",
3686 "properties": {
3687 "bindings": {
3688 "annotations": {
3689 "required": [
3690 "storage.buckets.setIamPolicy",
3691 "storage.objects.setIamPolicy"
3692 ]
3693 },
3694 "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.",
3695 "items": {
3696 "properties": {
3697 "condition": {
3698 "type": "any"
3699 },
3700 "members": {
3701 "annotations": {
3702 "required": [
3703 "storage.buckets.setIamPolicy",
3704 "storage.objects.setIamPolicy"
3705 ]
3706 },
3707 "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project",
3708 "items": {
3709 "type": "string"
3710 },
3711 "type": "array"
3712 },
3713 "role": {
3714 "annotations": {
3715 "required": [
3716 "storage.buckets.setIamPolicy",
3717 "storage.objects.setIamPolicy"
3718 ]
3719 },
3720 "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.",
3721 "type": "string"
3722 }
3723 },
3724 "type": "object"
3725 },
3726 "type": "array"
3727 },
3728 "etag": {
3729 "description": "HTTP 1.1 Entity tag for the policy.",
3730 "format": "byte",
3731 "type": "string"
3732 },
3733 "kind": {
3734 "default": "storage#policy",
3735 "description": "The kind of item this is. For policies, this is always storage#policy. This field is ignored on input.",
3736 "type": "string"
3737 },
3738 "resourceId": {
3739 "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.",
3740 "type": "string"
3741 }
3742 },
3743 "type": "object"
3744 },
3745 "RewriteResponse": {
3746 "description": "A rewrite response.",
3747 "id": "RewriteResponse",
3748 "properties": {
3749 "done": {
3750 "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response.",
3751 "type": "boolean"
3752 },
3753 "kind": {
3754 "default": "storage#rewriteResponse",
3755 "description": "The kind of item this is.",
3756 "type": "string"
3757 },
3758 "objectSize": {
3759 "description": "The total size of the object being copied in bytes. This property is always present in the response.",
3760 "format": "int64",
3761 "type": "string"
3762 },
3763 "resource": {
3764 "$ref": "Object",
3765 "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes."
3766 },
3767 "rewriteToken": {
3768 "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy.",
3769 "type": "string"
3770 },
3771 "totalBytesRewritten": {
3772 "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.",
3773 "format": "int64",
3774 "type": "string"
3775 }
3776 },
3777 "type": "object"
3778 },
3779 "ServiceAccount": {
3780 "description": "A subscription to receive Google PubSub notifications.",
3781 "id": "ServiceAccount",
3782 "properties": {
3783 "email_address": {
3784 "description": "The ID of the notification.",
3785 "type": "string"
3786 },
3787 "kind": {
3788 "default": "storage#serviceAccount",
3789 "description": "The kind of item this is. For notifications, this is always storage#notification.",
3790 "type": "string"
3791 }
3792 },
3793 "type": "object"
3794 },
3795 "TestIamPermissionsResponse": {
3796 "description": "A storage.(buckets|objects).testIamPermissions response.",
3797 "id": "TestIamPermissionsResponse",
3798 "properties": {
3799 "kind": {
3800 "default": "storage#testIamPermissionsResponse",
3801 "description": "The kind of item this is.",
3802 "type": "string"
3803 },
3804 "permissions": {
3805 "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.",
3806 "items": {
3807 "type": "string"
3808 },
3809 "type": "array"
3810 }
3811 },
3812 "type": "object"
3813 }
3814 },
3815 "servicePath": "storage/v1/",
3816 "title": "Cloud Storage JSON API",
3817 "version": "v1"
3818} \ No newline at end of file
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
new file mode 100644
index 0000000..606686f
--- /dev/null
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -0,0 +1,11472 @@
1// Copyright 2018 Google Inc. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Code generated file. DO NOT EDIT.
6
7// Package storage provides access to the Cloud Storage JSON API.
8//
9// This package is DEPRECATED. Use package cloud.google.com/go/storage instead.
10//
11// See https://developers.google.com/storage/docs/json_api/
12//
13// Usage example:
14//
15// import "google.golang.org/api/storage/v1"
16// ...
17// storageService, err := storage.New(oauthHttpClient)
18package storage // import "google.golang.org/api/storage/v1"
19
20import (
21 "bytes"
22 "context"
23 "encoding/json"
24 "errors"
25 "fmt"
26 "io"
27 "net/http"
28 "net/url"
29 "strconv"
30 "strings"
31
32 gensupport "google.golang.org/api/gensupport"
33 googleapi "google.golang.org/api/googleapi"
34)
35
36// Always reference these packages, just in case the auto-generated code
37// below doesn't.
38var _ = bytes.NewBuffer
39var _ = strconv.Itoa
40var _ = fmt.Sprintf
41var _ = json.NewDecoder
42var _ = io.Copy
43var _ = url.Parse
44var _ = gensupport.MarshalJSON
45var _ = googleapi.Version
46var _ = errors.New
47var _ = strings.Replace
48var _ = context.Canceled
49
50const apiId = "storage:v1"
51const apiName = "storage"
52const apiVersion = "v1"
53const basePath = "https://www.googleapis.com/storage/v1/"
54
55// OAuth2 scopes used by this API.
56const (
57 // View and manage your data across Google Cloud Platform services
58 CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
59
60 // View your data across Google Cloud Platform services
61 CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only"
62
63 // Manage your data and permissions in Google Cloud Storage
64 DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control"
65
66 // View your data in Google Cloud Storage
67 DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only"
68
69 // Manage your data in Google Cloud Storage
70 DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write"
71)
72
73func New(client *http.Client) (*Service, error) {
74 if client == nil {
75 return nil, errors.New("client is nil")
76 }
77 s := &Service{client: client, BasePath: basePath}
78 s.BucketAccessControls = NewBucketAccessControlsService(s)
79 s.Buckets = NewBucketsService(s)
80 s.Channels = NewChannelsService(s)
81 s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s)
82 s.Notifications = NewNotificationsService(s)
83 s.ObjectAccessControls = NewObjectAccessControlsService(s)
84 s.Objects = NewObjectsService(s)
85 s.Projects = NewProjectsService(s)
86 return s, nil
87}
88
89type Service struct {
90 client *http.Client
91 BasePath string // API endpoint base URL
92 UserAgent string // optional additional User-Agent fragment
93
94 BucketAccessControls *BucketAccessControlsService
95
96 Buckets *BucketsService
97
98 Channels *ChannelsService
99
100 DefaultObjectAccessControls *DefaultObjectAccessControlsService
101
102 Notifications *NotificationsService
103
104 ObjectAccessControls *ObjectAccessControlsService
105
106 Objects *ObjectsService
107
108 Projects *ProjectsService
109}
110
111func (s *Service) userAgent() string {
112 if s.UserAgent == "" {
113 return googleapi.UserAgent
114 }
115 return googleapi.UserAgent + " " + s.UserAgent
116}
117
118func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService {
119 rs := &BucketAccessControlsService{s: s}
120 return rs
121}
122
123type BucketAccessControlsService struct {
124 s *Service
125}
126
127func NewBucketsService(s *Service) *BucketsService {
128 rs := &BucketsService{s: s}
129 return rs
130}
131
132type BucketsService struct {
133 s *Service
134}
135
136func NewChannelsService(s *Service) *ChannelsService {
137 rs := &ChannelsService{s: s}
138 return rs
139}
140
141type ChannelsService struct {
142 s *Service
143}
144
145func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService {
146 rs := &DefaultObjectAccessControlsService{s: s}
147 return rs
148}
149
150type DefaultObjectAccessControlsService struct {
151 s *Service
152}
153
154func NewNotificationsService(s *Service) *NotificationsService {
155 rs := &NotificationsService{s: s}
156 return rs
157}
158
159type NotificationsService struct {
160 s *Service
161}
162
163func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService {
164 rs := &ObjectAccessControlsService{s: s}
165 return rs
166}
167
168type ObjectAccessControlsService struct {
169 s *Service
170}
171
172func NewObjectsService(s *Service) *ObjectsService {
173 rs := &ObjectsService{s: s}
174 return rs
175}
176
177type ObjectsService struct {
178 s *Service
179}
180
181func NewProjectsService(s *Service) *ProjectsService {
182 rs := &ProjectsService{s: s}
183 rs.ServiceAccount = NewProjectsServiceAccountService(s)
184 return rs
185}
186
187type ProjectsService struct {
188 s *Service
189
190 ServiceAccount *ProjectsServiceAccountService
191}
192
193func NewProjectsServiceAccountService(s *Service) *ProjectsServiceAccountService {
194 rs := &ProjectsServiceAccountService{s: s}
195 return rs
196}
197
198type ProjectsServiceAccountService struct {
199 s *Service
200}
201
202// Bucket: A bucket.
203type Bucket struct {
204 // Acl: Access controls on the bucket.
205 Acl []*BucketAccessControl `json:"acl,omitempty"`
206
207 // Billing: The bucket's billing configuration.
208 Billing *BucketBilling `json:"billing,omitempty"`
209
210 // Cors: The bucket's Cross-Origin Resource Sharing (CORS)
211 // configuration.
212 Cors []*BucketCors `json:"cors,omitempty"`
213
214 // DefaultEventBasedHold: The default value for event-based hold on
215 // newly created objects in this bucket. Event-based hold is a way to
216 // retain objects indefinitely until an event occurs, signified by the
217 // hold's release. After being released, such objects will be subject to
218 // bucket-level retention (if any). One sample use case of this flag is
219 // for banks to hold loan documents for at least 3 years after loan is
220 // paid in full. Here, bucket-level retention is 3 years and the event
221 // is loan being paid in full. In this example, these objects will be
222 // held intact for any number of years until the event has occurred
223 // (event-based hold on the object is released) and then 3 more years
224 // after that. That means retention duration of the objects begins from
225 // the moment event-based hold transitioned from true to false. Objects
226 // under event-based hold cannot be deleted, overwritten or archived
227 // until the hold is removed.
228 DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"`
229
230 // DefaultObjectAcl: Default access controls to apply to new objects
231 // when no ACL is provided.
232 DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"`
233
234 // Encryption: Encryption configuration for a bucket.
235 Encryption *BucketEncryption `json:"encryption,omitempty"`
236
237 // Etag: HTTP 1.1 Entity tag for the bucket.
238 Etag string `json:"etag,omitempty"`
239
240 // IamConfiguration: The bucket's IAM configuration.
241 IamConfiguration *BucketIamConfiguration `json:"iamConfiguration,omitempty"`
242
243 // Id: The ID of the bucket. For buckets, the id and name properties are
244 // the same.
245 Id string `json:"id,omitempty"`
246
247 // Kind: The kind of item this is. For buckets, this is always
248 // storage#bucket.
249 Kind string `json:"kind,omitempty"`
250
251 // Labels: User-provided labels, in key/value pairs.
252 Labels map[string]string `json:"labels,omitempty"`
253
254 // Lifecycle: The bucket's lifecycle configuration. See lifecycle
255 // management for more information.
256 Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"`
257
258 // Location: The location of the bucket. Object data for objects in the
259 // bucket resides in physical storage within this region. Defaults to
260 // US. See the developer's guide for the authoritative list.
261 Location string `json:"location,omitempty"`
262
263 // Logging: The bucket's logging configuration, which defines the
264 // destination bucket and optional name prefix for the current bucket's
265 // logs.
266 Logging *BucketLogging `json:"logging,omitempty"`
267
268 // Metageneration: The metadata generation of this bucket.
269 Metageneration int64 `json:"metageneration,omitempty,string"`
270
271 // Name: The name of the bucket.
272 Name string `json:"name,omitempty"`
273
274 // Owner: The owner of the bucket. This is always the project team's
275 // owner group.
276 Owner *BucketOwner `json:"owner,omitempty"`
277
278 // ProjectNumber: The project number of the project the bucket belongs
279 // to.
280 ProjectNumber uint64 `json:"projectNumber,omitempty,string"`
281
282 // RetentionPolicy: The bucket's retention policy. The retention policy
283 // enforces a minimum retention time for all objects contained in the
284 // bucket, based on their creation time. Any attempt to overwrite or
285 // delete objects younger than the retention period will result in a
286 // PERMISSION_DENIED error. An unlocked retention policy can be modified
287 // or removed from the bucket via a storage.buckets.update operation. A
288 // locked retention policy cannot be removed or shortened in duration
289 // for the lifetime of the bucket. Attempting to remove or decrease
290 // period of a locked retention policy will result in a
291 // PERMISSION_DENIED error.
292 RetentionPolicy *BucketRetentionPolicy `json:"retentionPolicy,omitempty"`
293
294 // SelfLink: The URI of this bucket.
295 SelfLink string `json:"selfLink,omitempty"`
296
297 // StorageClass: The bucket's default storage class, used whenever no
298 // storageClass is specified for a newly-created object. This defines
299 // how objects in the bucket are stored and determines the SLA and the
300 // cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD,
301 // NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value
302 // is not specified when the bucket is created, it will default to
303 // STANDARD. For more information, see storage classes.
304 StorageClass string `json:"storageClass,omitempty"`
305
306 // TimeCreated: The creation time of the bucket in RFC 3339 format.
307 TimeCreated string `json:"timeCreated,omitempty"`
308
309 // Updated: The modification time of the bucket in RFC 3339 format.
310 Updated string `json:"updated,omitempty"`
311
312 // Versioning: The bucket's versioning configuration.
313 Versioning *BucketVersioning `json:"versioning,omitempty"`
314
315 // Website: The bucket's website configuration, controlling how the
316 // service behaves when accessing bucket contents as a web site. See the
317 // Static Website Examples for more information.
318 Website *BucketWebsite `json:"website,omitempty"`
319
320 // ServerResponse contains the HTTP response code and headers from the
321 // server.
322 googleapi.ServerResponse `json:"-"`
323
324 // ForceSendFields is a list of field names (e.g. "Acl") to
325 // unconditionally include in API requests. By default, fields with
326 // empty values are omitted from API requests. However, any non-pointer,
327 // non-interface field appearing in ForceSendFields will be sent to the
328 // server regardless of whether the field is empty or not. This may be
329 // used to include empty fields in Patch requests.
330 ForceSendFields []string `json:"-"`
331
332 // NullFields is a list of field names (e.g. "Acl") to include in API
333 // requests with the JSON null value. By default, fields with empty
334 // values are omitted from API requests. However, any field with an
335 // empty value appearing in NullFields will be sent to the server as
336 // null. It is an error if a field in this list has a non-empty value.
337 // This may be used to include null fields in Patch requests.
338 NullFields []string `json:"-"`
339}
340
341func (s *Bucket) MarshalJSON() ([]byte, error) {
342 type NoMethod Bucket
343 raw := NoMethod(*s)
344 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
345}
346
347// BucketBilling: The bucket's billing configuration.
348type BucketBilling struct {
349 // RequesterPays: When set to true, Requester Pays is enabled for this
350 // bucket.
351 RequesterPays bool `json:"requesterPays,omitempty"`
352
353 // ForceSendFields is a list of field names (e.g. "RequesterPays") to
354 // unconditionally include in API requests. By default, fields with
355 // empty values are omitted from API requests. However, any non-pointer,
356 // non-interface field appearing in ForceSendFields will be sent to the
357 // server regardless of whether the field is empty or not. This may be
358 // used to include empty fields in Patch requests.
359 ForceSendFields []string `json:"-"`
360
361 // NullFields is a list of field names (e.g. "RequesterPays") to include
362 // in API requests with the JSON null value. By default, fields with
363 // empty values are omitted from API requests. However, any field with
364 // an empty value appearing in NullFields will be sent to the server as
365 // null. It is an error if a field in this list has a non-empty value.
366 // This may be used to include null fields in Patch requests.
367 NullFields []string `json:"-"`
368}
369
370func (s *BucketBilling) MarshalJSON() ([]byte, error) {
371 type NoMethod BucketBilling
372 raw := NoMethod(*s)
373 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
374}
375
376type BucketCors struct {
377 // MaxAgeSeconds: The value, in seconds, to return in the
378 // Access-Control-Max-Age header used in preflight responses.
379 MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"`
380
381 // Method: The list of HTTP methods on which to include CORS response
382 // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
383 // of methods, and means "any method".
384 Method []string `json:"method,omitempty"`
385
386 // Origin: The list of Origins eligible to receive CORS response
387 // headers. Note: "*" is permitted in the list of origins, and means
388 // "any Origin".
389 Origin []string `json:"origin,omitempty"`
390
391 // ResponseHeader: The list of HTTP headers other than the simple
392 // response headers to give permission for the user-agent to share
393 // across domains.
394 ResponseHeader []string `json:"responseHeader,omitempty"`
395
396 // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to
397 // unconditionally include in API requests. By default, fields with
398 // empty values are omitted from API requests. However, any non-pointer,
399 // non-interface field appearing in ForceSendFields will be sent to the
400 // server regardless of whether the field is empty or not. This may be
401 // used to include empty fields in Patch requests.
402 ForceSendFields []string `json:"-"`
403
404 // NullFields is a list of field names (e.g. "MaxAgeSeconds") to include
405 // in API requests with the JSON null value. By default, fields with
406 // empty values are omitted from API requests. However, any field with
407 // an empty value appearing in NullFields will be sent to the server as
408 // null. It is an error if a field in this list has a non-empty value.
409 // This may be used to include null fields in Patch requests.
410 NullFields []string `json:"-"`
411}
412
413func (s *BucketCors) MarshalJSON() ([]byte, error) {
414 type NoMethod BucketCors
415 raw := NoMethod(*s)
416 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
417}
418
419// BucketEncryption: Encryption configuration for a bucket.
420type BucketEncryption struct {
421 // DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt
422 // objects inserted into this bucket, if no encryption method is
423 // specified.
424 DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"`
425
426 // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName")
427 // to unconditionally include in API requests. By default, fields with
428 // empty values are omitted from API requests. However, any non-pointer,
429 // non-interface field appearing in ForceSendFields will be sent to the
430 // server regardless of whether the field is empty or not. This may be
431 // used to include empty fields in Patch requests.
432 ForceSendFields []string `json:"-"`
433
434 // NullFields is a list of field names (e.g. "DefaultKmsKeyName") to
435 // include in API requests with the JSON null value. By default, fields
436 // with empty values are omitted from API requests. However, any field
437 // with an empty value appearing in NullFields will be sent to the
438 // server as null. It is an error if a field in this list has a
439 // non-empty value. This may be used to include null fields in Patch
440 // requests.
441 NullFields []string `json:"-"`
442}
443
444func (s *BucketEncryption) MarshalJSON() ([]byte, error) {
445 type NoMethod BucketEncryption
446 raw := NoMethod(*s)
447 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
448}
449
450// BucketIamConfiguration: The bucket's IAM configuration.
451type BucketIamConfiguration struct {
452 BucketPolicyOnly *BucketIamConfigurationBucketPolicyOnly `json:"bucketPolicyOnly,omitempty"`
453
454 // ForceSendFields is a list of field names (e.g. "BucketPolicyOnly") to
455 // unconditionally include in API requests. By default, fields with
456 // empty values are omitted from API requests. However, any non-pointer,
457 // non-interface field appearing in ForceSendFields will be sent to the
458 // server regardless of whether the field is empty or not. This may be
459 // used to include empty fields in Patch requests.
460 ForceSendFields []string `json:"-"`
461
462 // NullFields is a list of field names (e.g. "BucketPolicyOnly") to
463 // include in API requests with the JSON null value. By default, fields
464 // with empty values are omitted from API requests. However, any field
465 // with an empty value appearing in NullFields will be sent to the
466 // server as null. It is an error if a field in this list has a
467 // non-empty value. This may be used to include null fields in Patch
468 // requests.
469 NullFields []string `json:"-"`
470}
471
472func (s *BucketIamConfiguration) MarshalJSON() ([]byte, error) {
473 type NoMethod BucketIamConfiguration
474 raw := NoMethod(*s)
475 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
476}
477
478type BucketIamConfigurationBucketPolicyOnly struct {
479 // Enabled: If set, access checks only use bucket-level IAM policies or
480 // above.
481 Enabled bool `json:"enabled,omitempty"`
482
483 // LockedTime: The deadline time for changing
484 // iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC
485 // 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed
486 // from true to false until the locked time, after which the field is
487 // immutable.
488 LockedTime string `json:"lockedTime,omitempty"`
489
490 // ForceSendFields is a list of field names (e.g. "Enabled") to
491 // unconditionally include in API requests. By default, fields with
492 // empty values are omitted from API requests. However, any non-pointer,
493 // non-interface field appearing in ForceSendFields will be sent to the
494 // server regardless of whether the field is empty or not. This may be
495 // used to include empty fields in Patch requests.
496 ForceSendFields []string `json:"-"`
497
498 // NullFields is a list of field names (e.g. "Enabled") to include in
499 // API requests with the JSON null value. By default, fields with empty
500 // values are omitted from API requests. However, any field with an
501 // empty value appearing in NullFields will be sent to the server as
502 // null. It is an error if a field in this list has a non-empty value.
503 // This may be used to include null fields in Patch requests.
504 NullFields []string `json:"-"`
505}
506
507func (s *BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) {
508 type NoMethod BucketIamConfigurationBucketPolicyOnly
509 raw := NoMethod(*s)
510 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
511}
512
513// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle
514// management for more information.
515type BucketLifecycle struct {
516 // Rule: A lifecycle management rule, which is made of an action to take
517 // and the condition(s) under which the action will be taken.
518 Rule []*BucketLifecycleRule `json:"rule,omitempty"`
519
520 // ForceSendFields is a list of field names (e.g. "Rule") to
521 // unconditionally include in API requests. By default, fields with
522 // empty values are omitted from API requests. However, any non-pointer,
523 // non-interface field appearing in ForceSendFields will be sent to the
524 // server regardless of whether the field is empty or not. This may be
525 // used to include empty fields in Patch requests.
526 ForceSendFields []string `json:"-"`
527
528 // NullFields is a list of field names (e.g. "Rule") to include in API
529 // requests with the JSON null value. By default, fields with empty
530 // values are omitted from API requests. However, any field with an
531 // empty value appearing in NullFields will be sent to the server as
532 // null. It is an error if a field in this list has a non-empty value.
533 // This may be used to include null fields in Patch requests.
534 NullFields []string `json:"-"`
535}
536
537func (s *BucketLifecycle) MarshalJSON() ([]byte, error) {
538 type NoMethod BucketLifecycle
539 raw := NoMethod(*s)
540 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
541}
542
543type BucketLifecycleRule struct {
544 // Action: The action to take.
545 Action *BucketLifecycleRuleAction `json:"action,omitempty"`
546
547 // Condition: The condition(s) under which the action will be taken.
548 Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"`
549
550 // ForceSendFields is a list of field names (e.g. "Action") to
551 // unconditionally include in API requests. By default, fields with
552 // empty values are omitted from API requests. However, any non-pointer,
553 // non-interface field appearing in ForceSendFields will be sent to the
554 // server regardless of whether the field is empty or not. This may be
555 // used to include empty fields in Patch requests.
556 ForceSendFields []string `json:"-"`
557
558 // NullFields is a list of field names (e.g. "Action") to include in API
559 // requests with the JSON null value. By default, fields with empty
560 // values are omitted from API requests. However, any field with an
561 // empty value appearing in NullFields will be sent to the server as
562 // null. It is an error if a field in this list has a non-empty value.
563 // This may be used to include null fields in Patch requests.
564 NullFields []string `json:"-"`
565}
566
567func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) {
568 type NoMethod BucketLifecycleRule
569 raw := NoMethod(*s)
570 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
571}
572
573// BucketLifecycleRuleAction: The action to take.
574type BucketLifecycleRuleAction struct {
575 // StorageClass: Target storage class. Required iff the type of the
576 // action is SetStorageClass.
577 StorageClass string `json:"storageClass,omitempty"`
578
579 // Type: Type of the action. Currently, only Delete and SetStorageClass
580 // are supported.
581 Type string `json:"type,omitempty"`
582
583 // ForceSendFields is a list of field names (e.g. "StorageClass") to
584 // unconditionally include in API requests. By default, fields with
585 // empty values are omitted from API requests. However, any non-pointer,
586 // non-interface field appearing in ForceSendFields will be sent to the
587 // server regardless of whether the field is empty or not. This may be
588 // used to include empty fields in Patch requests.
589 ForceSendFields []string `json:"-"`
590
591 // NullFields is a list of field names (e.g. "StorageClass") to include
592 // in API requests with the JSON null value. By default, fields with
593 // empty values are omitted from API requests. However, any field with
594 // an empty value appearing in NullFields will be sent to the server as
595 // null. It is an error if a field in this list has a non-empty value.
596 // This may be used to include null fields in Patch requests.
597 NullFields []string `json:"-"`
598}
599
600func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) {
601 type NoMethod BucketLifecycleRuleAction
602 raw := NoMethod(*s)
603 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
604}
605
606// BucketLifecycleRuleCondition: The condition(s) under which the action
607// will be taken.
608type BucketLifecycleRuleCondition struct {
609 // Age: Age of an object (in days). This condition is satisfied when an
610 // object reaches the specified age.
611 Age int64 `json:"age,omitempty"`
612
613 // CreatedBefore: A date in RFC 3339 format with only the date part (for
614 // instance, "2013-01-15"). This condition is satisfied when an object
615 // is created before midnight of the specified date in UTC.
616 CreatedBefore string `json:"createdBefore,omitempty"`
617
618 // IsLive: Relevant only for versioned objects. If the value is true,
619 // this condition matches live objects; if the value is false, it
620 // matches archived objects.
621 IsLive *bool `json:"isLive,omitempty"`
622
623 // MatchesPattern: A regular expression that satisfies the RE2 syntax.
624 // This condition is satisfied when the name of the object matches the
625 // RE2 pattern. Note: This feature is currently in the "Early Access"
626 // launch stage and is only available to a whitelisted set of users;
627 // that means that this feature may be changed in backward-incompatible
628 // ways and that it is not guaranteed to be released.
629 MatchesPattern string `json:"matchesPattern,omitempty"`
630
631 // MatchesStorageClass: Objects having any of the storage classes
632 // specified by this condition will be matched. Values include
633 // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and
634 // DURABLE_REDUCED_AVAILABILITY.
635 MatchesStorageClass []string `json:"matchesStorageClass,omitempty"`
636
637 // NumNewerVersions: Relevant only for versioned objects. If the value
638 // is N, this condition is satisfied when there are at least N versions
639 // (including the live version) newer than this version of the object.
640 NumNewerVersions int64 `json:"numNewerVersions,omitempty"`
641
642 // ForceSendFields is a list of field names (e.g. "Age") to
643 // unconditionally include in API requests. By default, fields with
644 // empty values are omitted from API requests. However, any non-pointer,
645 // non-interface field appearing in ForceSendFields will be sent to the
646 // server regardless of whether the field is empty or not. This may be
647 // used to include empty fields in Patch requests.
648 ForceSendFields []string `json:"-"`
649
650 // NullFields is a list of field names (e.g. "Age") to include in API
651 // requests with the JSON null value. By default, fields with empty
652 // values are omitted from API requests. However, any field with an
653 // empty value appearing in NullFields will be sent to the server as
654 // null. It is an error if a field in this list has a non-empty value.
655 // This may be used to include null fields in Patch requests.
656 NullFields []string `json:"-"`
657}
658
659func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) {
660 type NoMethod BucketLifecycleRuleCondition
661 raw := NoMethod(*s)
662 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
663}
664
665// BucketLogging: The bucket's logging configuration, which defines the
666// destination bucket and optional name prefix for the current bucket's
667// logs.
668type BucketLogging struct {
669 // LogBucket: The destination bucket where the current bucket's logs
670 // should be placed.
671 LogBucket string `json:"logBucket,omitempty"`
672
673 // LogObjectPrefix: A prefix for log object names.
674 LogObjectPrefix string `json:"logObjectPrefix,omitempty"`
675
676 // ForceSendFields is a list of field names (e.g. "LogBucket") to
677 // unconditionally include in API requests. By default, fields with
678 // empty values are omitted from API requests. However, any non-pointer,
679 // non-interface field appearing in ForceSendFields will be sent to the
680 // server regardless of whether the field is empty or not. This may be
681 // used to include empty fields in Patch requests.
682 ForceSendFields []string `json:"-"`
683
684 // NullFields is a list of field names (e.g. "LogBucket") to include in
685 // API requests with the JSON null value. By default, fields with empty
686 // values are omitted from API requests. However, any field with an
687 // empty value appearing in NullFields will be sent to the server as
688 // null. It is an error if a field in this list has a non-empty value.
689 // This may be used to include null fields in Patch requests.
690 NullFields []string `json:"-"`
691}
692
693func (s *BucketLogging) MarshalJSON() ([]byte, error) {
694 type NoMethod BucketLogging
695 raw := NoMethod(*s)
696 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
697}
698
699// BucketOwner: The owner of the bucket. This is always the project
700// team's owner group.
701type BucketOwner struct {
702 // Entity: The entity, in the form project-owner-projectId.
703 Entity string `json:"entity,omitempty"`
704
705 // EntityId: The ID for the entity.
706 EntityId string `json:"entityId,omitempty"`
707
708 // ForceSendFields is a list of field names (e.g. "Entity") to
709 // unconditionally include in API requests. By default, fields with
710 // empty values are omitted from API requests. However, any non-pointer,
711 // non-interface field appearing in ForceSendFields will be sent to the
712 // server regardless of whether the field is empty or not. This may be
713 // used to include empty fields in Patch requests.
714 ForceSendFields []string `json:"-"`
715
716 // NullFields is a list of field names (e.g. "Entity") to include in API
717 // requests with the JSON null value. By default, fields with empty
718 // values are omitted from API requests. However, any field with an
719 // empty value appearing in NullFields will be sent to the server as
720 // null. It is an error if a field in this list has a non-empty value.
721 // This may be used to include null fields in Patch requests.
722 NullFields []string `json:"-"`
723}
724
725func (s *BucketOwner) MarshalJSON() ([]byte, error) {
726 type NoMethod BucketOwner
727 raw := NoMethod(*s)
728 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
729}
730
731// BucketRetentionPolicy: The bucket's retention policy. The retention
732// policy enforces a minimum retention time for all objects contained in
733// the bucket, based on their creation time. Any attempt to overwrite or
734// delete objects younger than the retention period will result in a
735// PERMISSION_DENIED error. An unlocked retention policy can be modified
736// or removed from the bucket via a storage.buckets.update operation. A
737// locked retention policy cannot be removed or shortened in duration
738// for the lifetime of the bucket. Attempting to remove or decrease
739// period of a locked retention policy will result in a
740// PERMISSION_DENIED error.
741type BucketRetentionPolicy struct {
742 // EffectiveTime: Server-determined value that indicates the time from
743 // which policy was enforced and effective. This value is in RFC 3339
744 // format.
745 EffectiveTime string `json:"effectiveTime,omitempty"`
746
747 // IsLocked: Once locked, an object retention policy cannot be modified.
748 IsLocked bool `json:"isLocked,omitempty"`
749
750 // RetentionPeriod: The duration in seconds that objects need to be
751 // retained. Retention duration must be greater than zero and less than
752 // 100 years. Note that enforcement of retention periods less than a day
753 // is not guaranteed. Such periods should only be used for testing
754 // purposes.
755 RetentionPeriod int64 `json:"retentionPeriod,omitempty,string"`
756
757 // ForceSendFields is a list of field names (e.g. "EffectiveTime") to
758 // unconditionally include in API requests. By default, fields with
759 // empty values are omitted from API requests. However, any non-pointer,
760 // non-interface field appearing in ForceSendFields will be sent to the
761 // server regardless of whether the field is empty or not. This may be
762 // used to include empty fields in Patch requests.
763 ForceSendFields []string `json:"-"`
764
765 // NullFields is a list of field names (e.g. "EffectiveTime") to include
766 // in API requests with the JSON null value. By default, fields with
767 // empty values are omitted from API requests. However, any field with
768 // an empty value appearing in NullFields will be sent to the server as
769 // null. It is an error if a field in this list has a non-empty value.
770 // This may be used to include null fields in Patch requests.
771 NullFields []string `json:"-"`
772}
773
774func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) {
775 type NoMethod BucketRetentionPolicy
776 raw := NoMethod(*s)
777 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
778}
779
780// BucketVersioning: The bucket's versioning configuration.
781type BucketVersioning struct {
782 // Enabled: While set to true, versioning is fully enabled for this
783 // bucket.
784 Enabled bool `json:"enabled,omitempty"`
785
786 // ForceSendFields is a list of field names (e.g. "Enabled") to
787 // unconditionally include in API requests. By default, fields with
788 // empty values are omitted from API requests. However, any non-pointer,
789 // non-interface field appearing in ForceSendFields will be sent to the
790 // server regardless of whether the field is empty or not. This may be
791 // used to include empty fields in Patch requests.
792 ForceSendFields []string `json:"-"`
793
794 // NullFields is a list of field names (e.g. "Enabled") to include in
795 // API requests with the JSON null value. By default, fields with empty
796 // values are omitted from API requests. However, any field with an
797 // empty value appearing in NullFields will be sent to the server as
798 // null. It is an error if a field in this list has a non-empty value.
799 // This may be used to include null fields in Patch requests.
800 NullFields []string `json:"-"`
801}
802
803func (s *BucketVersioning) MarshalJSON() ([]byte, error) {
804 type NoMethod BucketVersioning
805 raw := NoMethod(*s)
806 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
807}
808
809// BucketWebsite: The bucket's website configuration, controlling how
810// the service behaves when accessing bucket contents as a web site. See
811// the Static Website Examples for more information.
812type BucketWebsite struct {
813 // MainPageSuffix: If the requested object path is missing, the service
814 // will ensure the path has a trailing '/', append this suffix, and
815 // attempt to retrieve the resulting object. This allows the creation of
816 // index.html objects to represent directory pages.
817 MainPageSuffix string `json:"mainPageSuffix,omitempty"`
818
819 // NotFoundPage: If the requested object path is missing, and any
820 // mainPageSuffix object is missing, if applicable, the service will
821 // return the named object from this bucket as the content for a 404 Not
822 // Found result.
823 NotFoundPage string `json:"notFoundPage,omitempty"`
824
825 // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to
826 // unconditionally include in API requests. By default, fields with
827 // empty values are omitted from API requests. However, any non-pointer,
828 // non-interface field appearing in ForceSendFields will be sent to the
829 // server regardless of whether the field is empty or not. This may be
830 // used to include empty fields in Patch requests.
831 ForceSendFields []string `json:"-"`
832
833 // NullFields is a list of field names (e.g. "MainPageSuffix") to
834 // include in API requests with the JSON null value. By default, fields
835 // with empty values are omitted from API requests. However, any field
836 // with an empty value appearing in NullFields will be sent to the
837 // server as null. It is an error if a field in this list has a
838 // non-empty value. This may be used to include null fields in Patch
839 // requests.
840 NullFields []string `json:"-"`
841}
842
843func (s *BucketWebsite) MarshalJSON() ([]byte, error) {
844 type NoMethod BucketWebsite
845 raw := NoMethod(*s)
846 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
847}
848
849// BucketAccessControl: An access-control entry.
850type BucketAccessControl struct {
851 // Bucket: The name of the bucket.
852 Bucket string `json:"bucket,omitempty"`
853
854 // Domain: The domain associated with the entity, if any.
855 Domain string `json:"domain,omitempty"`
856
857 // Email: The email address associated with the entity, if any.
858 Email string `json:"email,omitempty"`
859
860 // Entity: The entity holding the permission, in one of the following
861 // forms:
862 // - user-userId
863 // - user-email
864 // - group-groupId
865 // - group-email
866 // - domain-domain
867 // - project-team-projectId
868 // - allUsers
869 // - allAuthenticatedUsers Examples:
870 // - The user liz@example.com would be user-liz@example.com.
871 // - The group example@googlegroups.com would be
872 // group-example@googlegroups.com.
873 // - To refer to all members of the Google Apps for Business domain
874 // example.com, the entity would be domain-example.com.
875 Entity string `json:"entity,omitempty"`
876
877 // EntityId: The ID for the entity, if any.
878 EntityId string `json:"entityId,omitempty"`
879
880 // Etag: HTTP 1.1 Entity tag for the access-control entry.
881 Etag string `json:"etag,omitempty"`
882
883 // Id: The ID of the access-control entry.
884 Id string `json:"id,omitempty"`
885
886 // Kind: The kind of item this is. For bucket access control entries,
887 // this is always storage#bucketAccessControl.
888 Kind string `json:"kind,omitempty"`
889
890 // ProjectTeam: The project team associated with the entity, if any.
891 ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"`
892
893 // Role: The access permission for the entity.
894 Role string `json:"role,omitempty"`
895
896 // SelfLink: The link to this access-control entry.
897 SelfLink string `json:"selfLink,omitempty"`
898
899 // ServerResponse contains the HTTP response code and headers from the
900 // server.
901 googleapi.ServerResponse `json:"-"`
902
903 // ForceSendFields is a list of field names (e.g. "Bucket") to
904 // unconditionally include in API requests. By default, fields with
905 // empty values are omitted from API requests. However, any non-pointer,
906 // non-interface field appearing in ForceSendFields will be sent to the
907 // server regardless of whether the field is empty or not. This may be
908 // used to include empty fields in Patch requests.
909 ForceSendFields []string `json:"-"`
910
911 // NullFields is a list of field names (e.g. "Bucket") to include in API
912 // requests with the JSON null value. By default, fields with empty
913 // values are omitted from API requests. However, any field with an
914 // empty value appearing in NullFields will be sent to the server as
915 // null. It is an error if a field in this list has a non-empty value.
916 // This may be used to include null fields in Patch requests.
917 NullFields []string `json:"-"`
918}
919
920func (s *BucketAccessControl) MarshalJSON() ([]byte, error) {
921 type NoMethod BucketAccessControl
922 raw := NoMethod(*s)
923 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
924}
925
926// BucketAccessControlProjectTeam: The project team associated with the
927// entity, if any.
928type BucketAccessControlProjectTeam struct {
929 // ProjectNumber: The project number.
930 ProjectNumber string `json:"projectNumber,omitempty"`
931
932 // Team: The team.
933 Team string `json:"team,omitempty"`
934
935 // ForceSendFields is a list of field names (e.g. "ProjectNumber") to
936 // unconditionally include in API requests. By default, fields with
937 // empty values are omitted from API requests. However, any non-pointer,
938 // non-interface field appearing in ForceSendFields will be sent to the
939 // server regardless of whether the field is empty or not. This may be
940 // used to include empty fields in Patch requests.
941 ForceSendFields []string `json:"-"`
942
943 // NullFields is a list of field names (e.g. "ProjectNumber") to include
944 // in API requests with the JSON null value. By default, fields with
945 // empty values are omitted from API requests. However, any field with
946 // an empty value appearing in NullFields will be sent to the server as
947 // null. It is an error if a field in this list has a non-empty value.
948 // This may be used to include null fields in Patch requests.
949 NullFields []string `json:"-"`
950}
951
952func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) {
953 type NoMethod BucketAccessControlProjectTeam
954 raw := NoMethod(*s)
955 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
956}
957
958// BucketAccessControls: An access-control list.
959type BucketAccessControls struct {
960 // Items: The list of items.
961 Items []*BucketAccessControl `json:"items,omitempty"`
962
963 // Kind: The kind of item this is. For lists of bucket access control
964 // entries, this is always storage#bucketAccessControls.
965 Kind string `json:"kind,omitempty"`
966
967 // ServerResponse contains the HTTP response code and headers from the
968 // server.
969 googleapi.ServerResponse `json:"-"`
970
971 // ForceSendFields is a list of field names (e.g. "Items") to
972 // unconditionally include in API requests. By default, fields with
973 // empty values are omitted from API requests. However, any non-pointer,
974 // non-interface field appearing in ForceSendFields will be sent to the
975 // server regardless of whether the field is empty or not. This may be
976 // used to include empty fields in Patch requests.
977 ForceSendFields []string `json:"-"`
978
979 // NullFields is a list of field names (e.g. "Items") to include in API
980 // requests with the JSON null value. By default, fields with empty
981 // values are omitted from API requests. However, any field with an
982 // empty value appearing in NullFields will be sent to the server as
983 // null. It is an error if a field in this list has a non-empty value.
984 // This may be used to include null fields in Patch requests.
985 NullFields []string `json:"-"`
986}
987
988func (s *BucketAccessControls) MarshalJSON() ([]byte, error) {
989 type NoMethod BucketAccessControls
990 raw := NoMethod(*s)
991 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
992}
993
994// Buckets: A list of buckets.
995type Buckets struct {
996 // Items: The list of items.
997 Items []*Bucket `json:"items,omitempty"`
998
999 // Kind: The kind of item this is. For lists of buckets, this is always
1000 // storage#buckets.
1001 Kind string `json:"kind,omitempty"`
1002
1003 // NextPageToken: The continuation token, used to page through large
1004 // result sets. Provide this value in a subsequent request to return the
1005 // next page of results.
1006 NextPageToken string `json:"nextPageToken,omitempty"`
1007
1008 // ServerResponse contains the HTTP response code and headers from the
1009 // server.
1010 googleapi.ServerResponse `json:"-"`
1011
1012 // ForceSendFields is a list of field names (e.g. "Items") to
1013 // unconditionally include in API requests. By default, fields with
1014 // empty values are omitted from API requests. However, any non-pointer,
1015 // non-interface field appearing in ForceSendFields will be sent to the
1016 // server regardless of whether the field is empty or not. This may be
1017 // used to include empty fields in Patch requests.
1018 ForceSendFields []string `json:"-"`
1019
1020 // NullFields is a list of field names (e.g. "Items") to include in API
1021 // requests with the JSON null value. By default, fields with empty
1022 // values are omitted from API requests. However, any field with an
1023 // empty value appearing in NullFields will be sent to the server as
1024 // null. It is an error if a field in this list has a non-empty value.
1025 // This may be used to include null fields in Patch requests.
1026 NullFields []string `json:"-"`
1027}
1028
1029func (s *Buckets) MarshalJSON() ([]byte, error) {
1030 type NoMethod Buckets
1031 raw := NoMethod(*s)
1032 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1033}
1034
1035// Channel: An notification channel used to watch for resource changes.
1036type Channel struct {
1037 // Address: The address where notifications are delivered for this
1038 // channel.
1039 Address string `json:"address,omitempty"`
1040
1041 // Expiration: Date and time of notification channel expiration,
1042 // expressed as a Unix timestamp, in milliseconds. Optional.
1043 Expiration int64 `json:"expiration,omitempty,string"`
1044
1045 // Id: A UUID or similar unique string that identifies this channel.
1046 Id string `json:"id,omitempty"`
1047
1048 // Kind: Identifies this as a notification channel used to watch for
1049 // changes to a resource. Value: the fixed string "api#channel".
1050 Kind string `json:"kind,omitempty"`
1051
1052 // Params: Additional parameters controlling delivery channel behavior.
1053 // Optional.
1054 Params map[string]string `json:"params,omitempty"`
1055
1056 // Payload: A Boolean value to indicate whether payload is wanted.
1057 // Optional.
1058 Payload bool `json:"payload,omitempty"`
1059
1060 // ResourceId: An opaque ID that identifies the resource being watched
1061 // on this channel. Stable across different API versions.
1062 ResourceId string `json:"resourceId,omitempty"`
1063
1064 // ResourceUri: A version-specific identifier for the watched resource.
1065 ResourceUri string `json:"resourceUri,omitempty"`
1066
1067 // Token: An arbitrary string delivered to the target address with each
1068 // notification delivered over this channel. Optional.
1069 Token string `json:"token,omitempty"`
1070
1071 // Type: The type of delivery mechanism used for this channel.
1072 Type string `json:"type,omitempty"`
1073
1074 // ServerResponse contains the HTTP response code and headers from the
1075 // server.
1076 googleapi.ServerResponse `json:"-"`
1077
1078 // ForceSendFields is a list of field names (e.g. "Address") to
1079 // unconditionally include in API requests. By default, fields with
1080 // empty values are omitted from API requests. However, any non-pointer,
1081 // non-interface field appearing in ForceSendFields will be sent to the
1082 // server regardless of whether the field is empty or not. This may be
1083 // used to include empty fields in Patch requests.
1084 ForceSendFields []string `json:"-"`
1085
1086 // NullFields is a list of field names (e.g. "Address") to include in
1087 // API requests with the JSON null value. By default, fields with empty
1088 // values are omitted from API requests. However, any field with an
1089 // empty value appearing in NullFields will be sent to the server as
1090 // null. It is an error if a field in this list has a non-empty value.
1091 // This may be used to include null fields in Patch requests.
1092 NullFields []string `json:"-"`
1093}
1094
1095func (s *Channel) MarshalJSON() ([]byte, error) {
1096 type NoMethod Channel
1097 raw := NoMethod(*s)
1098 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1099}
1100
1101// ComposeRequest: A Compose request.
1102type ComposeRequest struct {
1103 // Destination: Properties of the resulting object.
1104 Destination *Object `json:"destination,omitempty"`
1105
1106 // Kind: The kind of item this is.
1107 Kind string `json:"kind,omitempty"`
1108
1109 // SourceObjects: The list of source objects that will be concatenated
1110 // into a single object.
1111 SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"`
1112
1113 // ForceSendFields is a list of field names (e.g. "Destination") to
1114 // unconditionally include in API requests. By default, fields with
1115 // empty values are omitted from API requests. However, any non-pointer,
1116 // non-interface field appearing in ForceSendFields will be sent to the
1117 // server regardless of whether the field is empty or not. This may be
1118 // used to include empty fields in Patch requests.
1119 ForceSendFields []string `json:"-"`
1120
1121 // NullFields is a list of field names (e.g. "Destination") to include
1122 // in API requests with the JSON null value. By default, fields with
1123 // empty values are omitted from API requests. However, any field with
1124 // an empty value appearing in NullFields will be sent to the server as
1125 // null. It is an error if a field in this list has a non-empty value.
1126 // This may be used to include null fields in Patch requests.
1127 NullFields []string `json:"-"`
1128}
1129
1130func (s *ComposeRequest) MarshalJSON() ([]byte, error) {
1131 type NoMethod ComposeRequest
1132 raw := NoMethod(*s)
1133 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1134}
1135
1136type ComposeRequestSourceObjects struct {
1137 // Generation: The generation of this object to use as the source.
1138 Generation int64 `json:"generation,omitempty,string"`
1139
1140 // Name: The source object's name. All source objects must reside in the
1141 // same bucket.
1142 Name string `json:"name,omitempty"`
1143
1144 // ObjectPreconditions: Conditions that must be met for this operation
1145 // to execute.
1146 ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"`
1147
1148 // ForceSendFields is a list of field names (e.g. "Generation") to
1149 // unconditionally include in API requests. By default, fields with
1150 // empty values are omitted from API requests. However, any non-pointer,
1151 // non-interface field appearing in ForceSendFields will be sent to the
1152 // server regardless of whether the field is empty or not. This may be
1153 // used to include empty fields in Patch requests.
1154 ForceSendFields []string `json:"-"`
1155
1156 // NullFields is a list of field names (e.g. "Generation") to include in
1157 // API requests with the JSON null value. By default, fields with empty
1158 // values are omitted from API requests. However, any field with an
1159 // empty value appearing in NullFields will be sent to the server as
1160 // null. It is an error if a field in this list has a non-empty value.
1161 // This may be used to include null fields in Patch requests.
1162 NullFields []string `json:"-"`
1163}
1164
1165func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) {
1166 type NoMethod ComposeRequestSourceObjects
1167 raw := NoMethod(*s)
1168 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1169}
1170
1171// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must
1172// be met for this operation to execute.
1173type ComposeRequestSourceObjectsObjectPreconditions struct {
1174 // IfGenerationMatch: Only perform the composition if the generation of
1175 // the source object that would be used matches this value. If this
1176 // value and a generation are both specified, they must be the same
1177 // value or the call will fail.
1178 IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"`
1179
1180 // ForceSendFields is a list of field names (e.g. "IfGenerationMatch")
1181 // to unconditionally include in API requests. By default, fields with
1182 // empty values are omitted from API requests. However, any non-pointer,
1183 // non-interface field appearing in ForceSendFields will be sent to the
1184 // server regardless of whether the field is empty or not. This may be
1185 // used to include empty fields in Patch requests.
1186 ForceSendFields []string `json:"-"`
1187
1188 // NullFields is a list of field names (e.g. "IfGenerationMatch") to
1189 // include in API requests with the JSON null value. By default, fields
1190 // with empty values are omitted from API requests. However, any field
1191 // with an empty value appearing in NullFields will be sent to the
1192 // server as null. It is an error if a field in this list has a
1193 // non-empty value. This may be used to include null fields in Patch
1194 // requests.
1195 NullFields []string `json:"-"`
1196}
1197
1198func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) {
1199 type NoMethod ComposeRequestSourceObjectsObjectPreconditions
1200 raw := NoMethod(*s)
1201 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1202}
1203
1204// Notification: A subscription to receive Google PubSub notifications.
1205type Notification struct {
1206 // CustomAttributes: An optional list of additional attributes to attach
1207 // to each Cloud PubSub message published for this notification
1208 // subscription.
1209 CustomAttributes map[string]string `json:"custom_attributes,omitempty"`
1210
1211 // Etag: HTTP 1.1 Entity tag for this subscription notification.
1212 Etag string `json:"etag,omitempty"`
1213
1214 // EventTypes: If present, only send notifications about listed event
1215 // types. If empty, sent notifications for all event types.
1216 EventTypes []string `json:"event_types,omitempty"`
1217
1218 // Id: The ID of the notification.
1219 Id string `json:"id,omitempty"`
1220
1221 // Kind: The kind of item this is. For notifications, this is always
1222 // storage#notification.
1223 Kind string `json:"kind,omitempty"`
1224
1225 // ObjectNamePrefix: If present, only apply this notification
1226 // configuration to object names that begin with this prefix.
1227 ObjectNamePrefix string `json:"object_name_prefix,omitempty"`
1228
1229 // PayloadFormat: The desired content of the Payload.
1230 PayloadFormat string `json:"payload_format,omitempty"`
1231
1232 // SelfLink: The canonical URL of this notification.
1233 SelfLink string `json:"selfLink,omitempty"`
1234
1235 // Topic: The Cloud PubSub topic to which this subscription publishes.
1236 // Formatted as:
1237 // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topi
1238 // c}'
1239 Topic string `json:"topic,omitempty"`
1240
1241 // ServerResponse contains the HTTP response code and headers from the
1242 // server.
1243 googleapi.ServerResponse `json:"-"`
1244
1245 // ForceSendFields is a list of field names (e.g. "CustomAttributes") to
1246 // unconditionally include in API requests. By default, fields with
1247 // empty values are omitted from API requests. However, any non-pointer,
1248 // non-interface field appearing in ForceSendFields will be sent to the
1249 // server regardless of whether the field is empty or not. This may be
1250 // used to include empty fields in Patch requests.
1251 ForceSendFields []string `json:"-"`
1252
1253 // NullFields is a list of field names (e.g. "CustomAttributes") to
1254 // include in API requests with the JSON null value. By default, fields
1255 // with empty values are omitted from API requests. However, any field
1256 // with an empty value appearing in NullFields will be sent to the
1257 // server as null. It is an error if a field in this list has a
1258 // non-empty value. This may be used to include null fields in Patch
1259 // requests.
1260 NullFields []string `json:"-"`
1261}
1262
1263func (s *Notification) MarshalJSON() ([]byte, error) {
1264 type NoMethod Notification
1265 raw := NoMethod(*s)
1266 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1267}
1268
1269// Notifications: A list of notification subscriptions.
1270type Notifications struct {
1271 // Items: The list of items.
1272 Items []*Notification `json:"items,omitempty"`
1273
1274 // Kind: The kind of item this is. For lists of notifications, this is
1275 // always storage#notifications.
1276 Kind string `json:"kind,omitempty"`
1277
1278 // ServerResponse contains the HTTP response code and headers from the
1279 // server.
1280 googleapi.ServerResponse `json:"-"`
1281
1282 // ForceSendFields is a list of field names (e.g. "Items") to
1283 // unconditionally include in API requests. By default, fields with
1284 // empty values are omitted from API requests. However, any non-pointer,
1285 // non-interface field appearing in ForceSendFields will be sent to the
1286 // server regardless of whether the field is empty or not. This may be
1287 // used to include empty fields in Patch requests.
1288 ForceSendFields []string `json:"-"`
1289
1290 // NullFields is a list of field names (e.g. "Items") to include in API
1291 // requests with the JSON null value. By default, fields with empty
1292 // values are omitted from API requests. However, any field with an
1293 // empty value appearing in NullFields will be sent to the server as
1294 // null. It is an error if a field in this list has a non-empty value.
1295 // This may be used to include null fields in Patch requests.
1296 NullFields []string `json:"-"`
1297}
1298
1299func (s *Notifications) MarshalJSON() ([]byte, error) {
1300 type NoMethod Notifications
1301 raw := NoMethod(*s)
1302 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1303}
1304
1305// Object: An object.
1306type Object struct {
1307 // Acl: Access controls on the object.
1308 Acl []*ObjectAccessControl `json:"acl,omitempty"`
1309
1310 // Bucket: The name of the bucket containing this object.
1311 Bucket string `json:"bucket,omitempty"`
1312
1313 // CacheControl: Cache-Control directive for the object data. If
1314 // omitted, and the object is accessible to all anonymous users, the
1315 // default will be public, max-age=3600.
1316 CacheControl string `json:"cacheControl,omitempty"`
1317
1318 // ComponentCount: Number of underlying components that make up this
1319 // object. Components are accumulated by compose operations.
1320 ComponentCount int64 `json:"componentCount,omitempty"`
1321
1322 // ContentDisposition: Content-Disposition of the object data.
1323 ContentDisposition string `json:"contentDisposition,omitempty"`
1324
1325 // ContentEncoding: Content-Encoding of the object data.
1326 ContentEncoding string `json:"contentEncoding,omitempty"`
1327
1328 // ContentLanguage: Content-Language of the object data.
1329 ContentLanguage string `json:"contentLanguage,omitempty"`
1330
1331 // ContentType: Content-Type of the object data. If an object is stored
1332 // without a Content-Type, it is served as application/octet-stream.
1333 ContentType string `json:"contentType,omitempty"`
1334
1335 // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B;
1336 // encoded using base64 in big-endian byte order. For more information
1337 // about using the CRC32c checksum, see Hashes and ETags: Best
1338 // Practices.
1339 Crc32c string `json:"crc32c,omitempty"`
1340
1341 // CustomerEncryption: Metadata of customer-supplied encryption key, if
1342 // the object is encrypted by such a key.
1343 CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"`
1344
1345 // Etag: HTTP 1.1 Entity tag for the object.
1346 Etag string `json:"etag,omitempty"`
1347
1348 // EventBasedHold: Whether an object is under event-based hold.
1349 // Event-based hold is a way to retain objects until an event occurs,
1350 // which is signified by the hold's release (i.e. this value is set to
1351 // false). After being released (set to false), such objects will be
1352 // subject to bucket-level retention (if any). One sample use case of
1353 // this flag is for banks to hold loan documents for at least 3 years
1354 // after loan is paid in full. Here, bucket-level retention is 3 years
1355 // and the event is the loan being paid in full. In this example, these
1356 // objects will be held intact for any number of years until the event
1357 // has occurred (event-based hold on the object is released) and then 3
1358 // more years after that. That means retention duration of the objects
1359 // begins from the moment event-based hold transitioned from true to
1360 // false.
1361 EventBasedHold bool `json:"eventBasedHold,omitempty"`
1362
1363 // Generation: The content generation of this object. Used for object
1364 // versioning.
1365 Generation int64 `json:"generation,omitempty,string"`
1366
1367 // Id: The ID of the object, including the bucket name, object name, and
1368 // generation number.
1369 Id string `json:"id,omitempty"`
1370
1371 // Kind: The kind of item this is. For objects, this is always
1372 // storage#object.
1373 Kind string `json:"kind,omitempty"`
1374
1375 // KmsKeyName: Cloud KMS Key used to encrypt this object, if the object
1376 // is encrypted by such a key.
1377 KmsKeyName string `json:"kmsKeyName,omitempty"`
1378
1379 // Md5Hash: MD5 hash of the data; encoded using base64. For more
1380 // information about using the MD5 hash, see Hashes and ETags: Best
1381 // Practices.
1382 Md5Hash string `json:"md5Hash,omitempty"`
1383
1384 // MediaLink: Media download link.
1385 MediaLink string `json:"mediaLink,omitempty"`
1386
1387 // Metadata: User-provided metadata, in key/value pairs.
1388 Metadata map[string]string `json:"metadata,omitempty"`
1389
1390 // Metageneration: The version of the metadata for this object at this
1391 // generation. Used for preconditions and for detecting changes in
1392 // metadata. A metageneration number is only meaningful in the context
1393 // of a particular generation of a particular object.
1394 Metageneration int64 `json:"metageneration,omitempty,string"`
1395
1396 // Name: The name of the object. Required if not specified by URL
1397 // parameter.
1398 Name string `json:"name,omitempty"`
1399
1400 // Owner: The owner of the object. This will always be the uploader of
1401 // the object.
1402 Owner *ObjectOwner `json:"owner,omitempty"`
1403
1404 // RetentionExpirationTime: A server-determined value that specifies the
1405 // earliest time that the object's retention period expires. This value
1406 // is in RFC 3339 format. Note 1: This field is not provided for objects
1407 // with an active event-based hold, since retention expiration is
1408 // unknown until the hold is removed. Note 2: This value can be provided
1409 // even when temporary hold is set (so that the user can reason about
1410 // policy without having to first unset the temporary hold).
1411 RetentionExpirationTime string `json:"retentionExpirationTime,omitempty"`
1412
1413 // SelfLink: The link to this object.
1414 SelfLink string `json:"selfLink,omitempty"`
1415
1416 // Size: Content-Length of the data in bytes.
1417 Size uint64 `json:"size,omitempty,string"`
1418
1419 // StorageClass: Storage class of the object.
1420 StorageClass string `json:"storageClass,omitempty"`
1421
1422 // TemporaryHold: Whether an object is under temporary hold. While this
1423 // flag is set to true, the object is protected against deletion and
1424 // overwrites. A common use case of this flag is regulatory
1425 // investigations where objects need to be retained while the
1426 // investigation is ongoing. Note that unlike event-based hold,
1427 // temporary hold does not impact retention expiration time of an
1428 // object.
1429 TemporaryHold bool `json:"temporaryHold,omitempty"`
1430
1431 // TimeCreated: The creation time of the object in RFC 3339 format.
1432 TimeCreated string `json:"timeCreated,omitempty"`
1433
1434 // TimeDeleted: The deletion time of the object in RFC 3339 format. Will
1435 // be returned if and only if this version of the object has been
1436 // deleted.
1437 TimeDeleted string `json:"timeDeleted,omitempty"`
1438
1439 // TimeStorageClassUpdated: The time at which the object's storage class
1440 // was last changed. When the object is initially created, it will be
1441 // set to timeCreated.
1442 TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"`
1443
1444 // Updated: The modification time of the object metadata in RFC 3339
1445 // format.
1446 Updated string `json:"updated,omitempty"`
1447
1448 // ServerResponse contains the HTTP response code and headers from the
1449 // server.
1450 googleapi.ServerResponse `json:"-"`
1451
1452 // ForceSendFields is a list of field names (e.g. "Acl") to
1453 // unconditionally include in API requests. By default, fields with
1454 // empty values are omitted from API requests. However, any non-pointer,
1455 // non-interface field appearing in ForceSendFields will be sent to the
1456 // server regardless of whether the field is empty or not. This may be
1457 // used to include empty fields in Patch requests.
1458 ForceSendFields []string `json:"-"`
1459
1460 // NullFields is a list of field names (e.g. "Acl") to include in API
1461 // requests with the JSON null value. By default, fields with empty
1462 // values are omitted from API requests. However, any field with an
1463 // empty value appearing in NullFields will be sent to the server as
1464 // null. It is an error if a field in this list has a non-empty value.
1465 // This may be used to include null fields in Patch requests.
1466 NullFields []string `json:"-"`
1467}
1468
1469func (s *Object) MarshalJSON() ([]byte, error) {
1470 type NoMethod Object
1471 raw := NoMethod(*s)
1472 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1473}
1474
1475// ObjectCustomerEncryption: Metadata of customer-supplied encryption
1476// key, if the object is encrypted by such a key.
1477type ObjectCustomerEncryption struct {
1478 // EncryptionAlgorithm: The encryption algorithm.
1479 EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"`
1480
1481 // KeySha256: SHA256 hash value of the encryption key.
1482 KeySha256 string `json:"keySha256,omitempty"`
1483
1484 // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm")
1485 // to unconditionally include in API requests. By default, fields with
1486 // empty values are omitted from API requests. However, any non-pointer,
1487 // non-interface field appearing in ForceSendFields will be sent to the
1488 // server regardless of whether the field is empty or not. This may be
1489 // used to include empty fields in Patch requests.
1490 ForceSendFields []string `json:"-"`
1491
1492 // NullFields is a list of field names (e.g. "EncryptionAlgorithm") to
1493 // include in API requests with the JSON null value. By default, fields
1494 // with empty values are omitted from API requests. However, any field
1495 // with an empty value appearing in NullFields will be sent to the
1496 // server as null. It is an error if a field in this list has a
1497 // non-empty value. This may be used to include null fields in Patch
1498 // requests.
1499 NullFields []string `json:"-"`
1500}
1501
1502func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) {
1503 type NoMethod ObjectCustomerEncryption
1504 raw := NoMethod(*s)
1505 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1506}
1507
1508// ObjectOwner: The owner of the object. This will always be the
1509// uploader of the object.
1510type ObjectOwner struct {
1511 // Entity: The entity, in the form user-userId.
1512 Entity string `json:"entity,omitempty"`
1513
1514 // EntityId: The ID for the entity.
1515 EntityId string `json:"entityId,omitempty"`
1516
1517 // ForceSendFields is a list of field names (e.g. "Entity") to
1518 // unconditionally include in API requests. By default, fields with
1519 // empty values are omitted from API requests. However, any non-pointer,
1520 // non-interface field appearing in ForceSendFields will be sent to the
1521 // server regardless of whether the field is empty or not. This may be
1522 // used to include empty fields in Patch requests.
1523 ForceSendFields []string `json:"-"`
1524
1525 // NullFields is a list of field names (e.g. "Entity") to include in API
1526 // requests with the JSON null value. By default, fields with empty
1527 // values are omitted from API requests. However, any field with an
1528 // empty value appearing in NullFields will be sent to the server as
1529 // null. It is an error if a field in this list has a non-empty value.
1530 // This may be used to include null fields in Patch requests.
1531 NullFields []string `json:"-"`
1532}
1533
1534func (s *ObjectOwner) MarshalJSON() ([]byte, error) {
1535 type NoMethod ObjectOwner
1536 raw := NoMethod(*s)
1537 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1538}
1539
1540// ObjectAccessControl: An access-control entry.
1541type ObjectAccessControl struct {
1542 // Bucket: The name of the bucket.
1543 Bucket string `json:"bucket,omitempty"`
1544
1545 // Domain: The domain associated with the entity, if any.
1546 Domain string `json:"domain,omitempty"`
1547
1548 // Email: The email address associated with the entity, if any.
1549 Email string `json:"email,omitempty"`
1550
1551 // Entity: The entity holding the permission, in one of the following
1552 // forms:
1553 // - user-userId
1554 // - user-email
1555 // - group-groupId
1556 // - group-email
1557 // - domain-domain
1558 // - project-team-projectId
1559 // - allUsers
1560 // - allAuthenticatedUsers Examples:
1561 // - The user liz@example.com would be user-liz@example.com.
1562 // - The group example@googlegroups.com would be
1563 // group-example@googlegroups.com.
1564 // - To refer to all members of the Google Apps for Business domain
1565 // example.com, the entity would be domain-example.com.
1566 Entity string `json:"entity,omitempty"`
1567
1568 // EntityId: The ID for the entity, if any.
1569 EntityId string `json:"entityId,omitempty"`
1570
1571 // Etag: HTTP 1.1 Entity tag for the access-control entry.
1572 Etag string `json:"etag,omitempty"`
1573
1574 // Generation: The content generation of the object, if applied to an
1575 // object.
1576 Generation int64 `json:"generation,omitempty,string"`
1577
1578 // Id: The ID of the access-control entry.
1579 Id string `json:"id,omitempty"`
1580
1581 // Kind: The kind of item this is. For object access control entries,
1582 // this is always storage#objectAccessControl.
1583 Kind string `json:"kind,omitempty"`
1584
1585 // Object: The name of the object, if applied to an object.
1586 Object string `json:"object,omitempty"`
1587
1588 // ProjectTeam: The project team associated with the entity, if any.
1589 ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"`
1590
1591 // Role: The access permission for the entity.
1592 Role string `json:"role,omitempty"`
1593
1594 // SelfLink: The link to this access-control entry.
1595 SelfLink string `json:"selfLink,omitempty"`
1596
1597 // ServerResponse contains the HTTP response code and headers from the
1598 // server.
1599 googleapi.ServerResponse `json:"-"`
1600
1601 // ForceSendFields is a list of field names (e.g. "Bucket") to
1602 // unconditionally include in API requests. By default, fields with
1603 // empty values are omitted from API requests. However, any non-pointer,
1604 // non-interface field appearing in ForceSendFields will be sent to the
1605 // server regardless of whether the field is empty or not. This may be
1606 // used to include empty fields in Patch requests.
1607 ForceSendFields []string `json:"-"`
1608
1609 // NullFields is a list of field names (e.g. "Bucket") to include in API
1610 // requests with the JSON null value. By default, fields with empty
1611 // values are omitted from API requests. However, any field with an
1612 // empty value appearing in NullFields will be sent to the server as
1613 // null. It is an error if a field in this list has a non-empty value.
1614 // This may be used to include null fields in Patch requests.
1615 NullFields []string `json:"-"`
1616}
1617
1618func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) {
1619 type NoMethod ObjectAccessControl
1620 raw := NoMethod(*s)
1621 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1622}
1623
1624// ObjectAccessControlProjectTeam: The project team associated with the
1625// entity, if any.
1626type ObjectAccessControlProjectTeam struct {
1627 // ProjectNumber: The project number.
1628 ProjectNumber string `json:"projectNumber,omitempty"`
1629
1630 // Team: The team.
1631 Team string `json:"team,omitempty"`
1632
1633 // ForceSendFields is a list of field names (e.g. "ProjectNumber") to
1634 // unconditionally include in API requests. By default, fields with
1635 // empty values are omitted from API requests. However, any non-pointer,
1636 // non-interface field appearing in ForceSendFields will be sent to the
1637 // server regardless of whether the field is empty or not. This may be
1638 // used to include empty fields in Patch requests.
1639 ForceSendFields []string `json:"-"`
1640
1641 // NullFields is a list of field names (e.g. "ProjectNumber") to include
1642 // in API requests with the JSON null value. By default, fields with
1643 // empty values are omitted from API requests. However, any field with
1644 // an empty value appearing in NullFields will be sent to the server as
1645 // null. It is an error if a field in this list has a non-empty value.
1646 // This may be used to include null fields in Patch requests.
1647 NullFields []string `json:"-"`
1648}
1649
1650func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) {
1651 type NoMethod ObjectAccessControlProjectTeam
1652 raw := NoMethod(*s)
1653 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1654}
1655
1656// ObjectAccessControls: An access-control list.
1657type ObjectAccessControls struct {
1658 // Items: The list of items.
1659 Items []*ObjectAccessControl `json:"items,omitempty"`
1660
1661 // Kind: The kind of item this is. For lists of object access control
1662 // entries, this is always storage#objectAccessControls.
1663 Kind string `json:"kind,omitempty"`
1664
1665 // ServerResponse contains the HTTP response code and headers from the
1666 // server.
1667 googleapi.ServerResponse `json:"-"`
1668
1669 // ForceSendFields is a list of field names (e.g. "Items") to
1670 // unconditionally include in API requests. By default, fields with
1671 // empty values are omitted from API requests. However, any non-pointer,
1672 // non-interface field appearing in ForceSendFields will be sent to the
1673 // server regardless of whether the field is empty or not. This may be
1674 // used to include empty fields in Patch requests.
1675 ForceSendFields []string `json:"-"`
1676
1677 // NullFields is a list of field names (e.g. "Items") to include in API
1678 // requests with the JSON null value. By default, fields with empty
1679 // values are omitted from API requests. However, any field with an
1680 // empty value appearing in NullFields will be sent to the server as
1681 // null. It is an error if a field in this list has a non-empty value.
1682 // This may be used to include null fields in Patch requests.
1683 NullFields []string `json:"-"`
1684}
1685
1686func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) {
1687 type NoMethod ObjectAccessControls
1688 raw := NoMethod(*s)
1689 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1690}
1691
1692// Objects: A list of objects.
1693type Objects struct {
1694 // Items: The list of items.
1695 Items []*Object `json:"items,omitempty"`
1696
1697 // Kind: The kind of item this is. For lists of objects, this is always
1698 // storage#objects.
1699 Kind string `json:"kind,omitempty"`
1700
1701 // NextPageToken: The continuation token, used to page through large
1702 // result sets. Provide this value in a subsequent request to return the
1703 // next page of results.
1704 NextPageToken string `json:"nextPageToken,omitempty"`
1705
1706 // Prefixes: The list of prefixes of objects matching-but-not-listed up
1707 // to and including the requested delimiter.
1708 Prefixes []string `json:"prefixes,omitempty"`
1709
1710 // ServerResponse contains the HTTP response code and headers from the
1711 // server.
1712 googleapi.ServerResponse `json:"-"`
1713
1714 // ForceSendFields is a list of field names (e.g. "Items") to
1715 // unconditionally include in API requests. By default, fields with
1716 // empty values are omitted from API requests. However, any non-pointer,
1717 // non-interface field appearing in ForceSendFields will be sent to the
1718 // server regardless of whether the field is empty or not. This may be
1719 // used to include empty fields in Patch requests.
1720 ForceSendFields []string `json:"-"`
1721
1722 // NullFields is a list of field names (e.g. "Items") to include in API
1723 // requests with the JSON null value. By default, fields with empty
1724 // values are omitted from API requests. However, any field with an
1725 // empty value appearing in NullFields will be sent to the server as
1726 // null. It is an error if a field in this list has a non-empty value.
1727 // This may be used to include null fields in Patch requests.
1728 NullFields []string `json:"-"`
1729}
1730
1731func (s *Objects) MarshalJSON() ([]byte, error) {
1732 type NoMethod Objects
1733 raw := NoMethod(*s)
1734 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1735}
1736
1737// Policy: A bucket/object IAM policy.
1738type Policy struct {
1739 // Bindings: An association between a role, which comes with a set of
1740 // permissions, and members who may assume that role.
1741 Bindings []*PolicyBindings `json:"bindings,omitempty"`
1742
1743 // Etag: HTTP 1.1 Entity tag for the policy.
1744 Etag string `json:"etag,omitempty"`
1745
1746 // Kind: The kind of item this is. For policies, this is always
1747 // storage#policy. This field is ignored on input.
1748 Kind string `json:"kind,omitempty"`
1749
1750 // ResourceId: The ID of the resource to which this policy belongs. Will
1751 // be of the form projects/_/buckets/bucket for buckets, and
1752 // projects/_/buckets/bucket/objects/object for objects. A specific
1753 // generation may be specified by appending #generationNumber to the end
1754 // of the object name, e.g.
1755 // projects/_/buckets/my-bucket/objects/data.txt#17. The current
1756 // generation can be denoted with #0. This field is ignored on input.
1757 ResourceId string `json:"resourceId,omitempty"`
1758
1759 // ServerResponse contains the HTTP response code and headers from the
1760 // server.
1761 googleapi.ServerResponse `json:"-"`
1762
1763 // ForceSendFields is a list of field names (e.g. "Bindings") to
1764 // unconditionally include in API requests. By default, fields with
1765 // empty values are omitted from API requests. However, any non-pointer,
1766 // non-interface field appearing in ForceSendFields will be sent to the
1767 // server regardless of whether the field is empty or not. This may be
1768 // used to include empty fields in Patch requests.
1769 ForceSendFields []string `json:"-"`
1770
1771 // NullFields is a list of field names (e.g. "Bindings") to include in
1772 // API requests with the JSON null value. By default, fields with empty
1773 // values are omitted from API requests. However, any field with an
1774 // empty value appearing in NullFields will be sent to the server as
1775 // null. It is an error if a field in this list has a non-empty value.
1776 // This may be used to include null fields in Patch requests.
1777 NullFields []string `json:"-"`
1778}
1779
1780func (s *Policy) MarshalJSON() ([]byte, error) {
1781 type NoMethod Policy
1782 raw := NoMethod(*s)
1783 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1784}
1785
1786type PolicyBindings struct {
1787 Condition interface{} `json:"condition,omitempty"`
1788
1789 // Members: A collection of identifiers for members who may assume the
1790 // provided role. Recognized identifiers are as follows:
1791 // - allUsers — A special identifier that represents anyone on the
1792 // internet; with or without a Google account.
1793 // - allAuthenticatedUsers — A special identifier that represents
1794 // anyone who is authenticated with a Google account or a service
1795 // account.
1796 // - user:emailid — An email address that represents a specific
1797 // account. For example, user:alice@gmail.com or user:joe@example.com.
1798 //
1799 // - serviceAccount:emailid — An email address that represents a
1800 // service account. For example,
1801 // serviceAccount:my-other-app@appspot.gserviceaccount.com .
1802 // - group:emailid — An email address that represents a Google group.
1803 // For example, group:admins@example.com.
1804 // - domain:domain — A Google Apps domain name that represents all the
1805 // users of that domain. For example, domain:google.com or
1806 // domain:example.com.
1807 // - projectOwner:projectid — Owners of the given project. For
1808 // example, projectOwner:my-example-project
1809 // - projectEditor:projectid — Editors of the given project. For
1810 // example, projectEditor:my-example-project
1811 // - projectViewer:projectid — Viewers of the given project. For
1812 // example, projectViewer:my-example-project
1813 Members []string `json:"members,omitempty"`
1814
1815 // Role: The role to which members belong. Two types of roles are
1816 // supported: new IAM roles, which grant permissions that do not map
1817 // directly to those provided by ACLs, and legacy IAM roles, which do
1818 // map directly to ACL permissions. All roles are of the format
1819 // roles/storage.specificRole.
1820 // The new IAM roles are:
1821 // - roles/storage.admin — Full control of Google Cloud Storage
1822 // resources.
1823 // - roles/storage.objectViewer — Read-Only access to Google Cloud
1824 // Storage objects.
1825 // - roles/storage.objectCreator — Access to create objects in Google
1826 // Cloud Storage.
1827 // - roles/storage.objectAdmin — Full control of Google Cloud Storage
1828 // objects. The legacy IAM roles are:
1829 // - roles/storage.legacyObjectReader — Read-only access to objects
1830 // without listing. Equivalent to an ACL entry on an object with the
1831 // READER role.
1832 // - roles/storage.legacyObjectOwner — Read/write access to existing
1833 // objects without listing. Equivalent to an ACL entry on an object with
1834 // the OWNER role.
1835 // - roles/storage.legacyBucketReader — Read access to buckets with
1836 // object listing. Equivalent to an ACL entry on a bucket with the
1837 // READER role.
1838 // - roles/storage.legacyBucketWriter — Read access to buckets with
1839 // object listing/creation/deletion. Equivalent to an ACL entry on a
1840 // bucket with the WRITER role.
1841 // - roles/storage.legacyBucketOwner — Read and write access to
1842 // existing buckets with object listing/creation/deletion. Equivalent to
1843 // an ACL entry on a bucket with the OWNER role.
1844 Role string `json:"role,omitempty"`
1845
1846 // ForceSendFields is a list of field names (e.g. "Condition") to
1847 // unconditionally include in API requests. By default, fields with
1848 // empty values are omitted from API requests. However, any non-pointer,
1849 // non-interface field appearing in ForceSendFields will be sent to the
1850 // server regardless of whether the field is empty or not. This may be
1851 // used to include empty fields in Patch requests.
1852 ForceSendFields []string `json:"-"`
1853
1854 // NullFields is a list of field names (e.g. "Condition") to include in
1855 // API requests with the JSON null value. By default, fields with empty
1856 // values are omitted from API requests. However, any field with an
1857 // empty value appearing in NullFields will be sent to the server as
1858 // null. It is an error if a field in this list has a non-empty value.
1859 // This may be used to include null fields in Patch requests.
1860 NullFields []string `json:"-"`
1861}
1862
1863func (s *PolicyBindings) MarshalJSON() ([]byte, error) {
1864 type NoMethod PolicyBindings
1865 raw := NoMethod(*s)
1866 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1867}
1868
1869// RewriteResponse: A rewrite response.
1870type RewriteResponse struct {
1871 // Done: true if the copy is finished; otherwise, false if the copy is
1872 // in progress. This property is always present in the response.
1873 Done bool `json:"done,omitempty"`
1874
1875 // Kind: The kind of item this is.
1876 Kind string `json:"kind,omitempty"`
1877
1878 // ObjectSize: The total size of the object being copied in bytes. This
1879 // property is always present in the response.
1880 ObjectSize int64 `json:"objectSize,omitempty,string"`
1881
1882 // Resource: A resource containing the metadata for the copied-to
1883 // object. This property is present in the response only when copying
1884 // completes.
1885 Resource *Object `json:"resource,omitempty"`
1886
1887 // RewriteToken: A token to use in subsequent requests to continue
1888 // copying data. This token is present in the response only when there
1889 // is more data to copy.
1890 RewriteToken string `json:"rewriteToken,omitempty"`
1891
1892 // TotalBytesRewritten: The total bytes written so far, which can be
1893 // used to provide a waiting user with a progress indicator. This
1894 // property is always present in the response.
1895 TotalBytesRewritten int64 `json:"totalBytesRewritten,omitempty,string"`
1896
1897 // ServerResponse contains the HTTP response code and headers from the
1898 // server.
1899 googleapi.ServerResponse `json:"-"`
1900
1901 // ForceSendFields is a list of field names (e.g. "Done") to
1902 // unconditionally include in API requests. By default, fields with
1903 // empty values are omitted from API requests. However, any non-pointer,
1904 // non-interface field appearing in ForceSendFields will be sent to the
1905 // server regardless of whether the field is empty or not. This may be
1906 // used to include empty fields in Patch requests.
1907 ForceSendFields []string `json:"-"`
1908
1909 // NullFields is a list of field names (e.g. "Done") to include in API
1910 // requests with the JSON null value. By default, fields with empty
1911 // values are omitted from API requests. However, any field with an
1912 // empty value appearing in NullFields will be sent to the server as
1913 // null. It is an error if a field in this list has a non-empty value.
1914 // This may be used to include null fields in Patch requests.
1915 NullFields []string `json:"-"`
1916}
1917
1918func (s *RewriteResponse) MarshalJSON() ([]byte, error) {
1919 type NoMethod RewriteResponse
1920 raw := NoMethod(*s)
1921 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1922}
1923
1924// ServiceAccount: A subscription to receive Google PubSub
1925// notifications.
1926type ServiceAccount struct {
1927 // EmailAddress: The ID of the notification.
1928 EmailAddress string `json:"email_address,omitempty"`
1929
1930 // Kind: The kind of item this is. For notifications, this is always
1931 // storage#notification.
1932 Kind string `json:"kind,omitempty"`
1933
1934 // ServerResponse contains the HTTP response code and headers from the
1935 // server.
1936 googleapi.ServerResponse `json:"-"`
1937
1938 // ForceSendFields is a list of field names (e.g. "EmailAddress") to
1939 // unconditionally include in API requests. By default, fields with
1940 // empty values are omitted from API requests. However, any non-pointer,
1941 // non-interface field appearing in ForceSendFields will be sent to the
1942 // server regardless of whether the field is empty or not. This may be
1943 // used to include empty fields in Patch requests.
1944 ForceSendFields []string `json:"-"`
1945
1946 // NullFields is a list of field names (e.g. "EmailAddress") to include
1947 // in API requests with the JSON null value. By default, fields with
1948 // empty values are omitted from API requests. However, any field with
1949 // an empty value appearing in NullFields will be sent to the server as
1950 // null. It is an error if a field in this list has a non-empty value.
1951 // This may be used to include null fields in Patch requests.
1952 NullFields []string `json:"-"`
1953}
1954
1955func (s *ServiceAccount) MarshalJSON() ([]byte, error) {
1956 type NoMethod ServiceAccount
1957 raw := NoMethod(*s)
1958 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
1959}
1960
1961// TestIamPermissionsResponse: A
1962// storage.(buckets|objects).testIamPermissions response.
1963type TestIamPermissionsResponse struct {
1964 // Kind: The kind of item this is.
1965 Kind string `json:"kind,omitempty"`
1966
1967 // Permissions: The permissions held by the caller. Permissions are
1968 // always of the format storage.resource.capability, where resource is
1969 // one of buckets or objects. The supported permissions are as follows:
1970 //
1971 // - storage.buckets.delete — Delete bucket.
1972 // - storage.buckets.get — Read bucket metadata.
1973 // - storage.buckets.getIamPolicy — Read bucket IAM policy.
1974 // - storage.buckets.create — Create bucket.
1975 // - storage.buckets.list — List buckets.
1976 // - storage.buckets.setIamPolicy — Update bucket IAM policy.
1977 // - storage.buckets.update — Update bucket metadata.
1978 // - storage.objects.delete — Delete object.
1979 // - storage.objects.get — Read object data and metadata.
1980 // - storage.objects.getIamPolicy — Read object IAM policy.
1981 // - storage.objects.create — Create object.
1982 // - storage.objects.list — List objects.
1983 // - storage.objects.setIamPolicy — Update object IAM policy.
1984 // - storage.objects.update — Update object metadata.
1985 Permissions []string `json:"permissions,omitempty"`
1986
1987 // ServerResponse contains the HTTP response code and headers from the
1988 // server.
1989 googleapi.ServerResponse `json:"-"`
1990
1991 // ForceSendFields is a list of field names (e.g. "Kind") to
1992 // unconditionally include in API requests. By default, fields with
1993 // empty values are omitted from API requests. However, any non-pointer,
1994 // non-interface field appearing in ForceSendFields will be sent to the
1995 // server regardless of whether the field is empty or not. This may be
1996 // used to include empty fields in Patch requests.
1997 ForceSendFields []string `json:"-"`
1998
1999 // NullFields is a list of field names (e.g. "Kind") to include in API
2000 // requests with the JSON null value. By default, fields with empty
2001 // values are omitted from API requests. However, any field with an
2002 // empty value appearing in NullFields will be sent to the server as
2003 // null. It is an error if a field in this list has a non-empty value.
2004 // This may be used to include null fields in Patch requests.
2005 NullFields []string `json:"-"`
2006}
2007
2008func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) {
2009 type NoMethod TestIamPermissionsResponse
2010 raw := NoMethod(*s)
2011 return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
2012}
2013
2014// method id "storage.bucketAccessControls.delete":
2015
2016type BucketAccessControlsDeleteCall struct {
2017 s *Service
2018 bucket string
2019 entity string
2020 urlParams_ gensupport.URLParams
2021 ctx_ context.Context
2022 header_ http.Header
2023}
2024
2025// Delete: Permanently deletes the ACL entry for the specified entity on
2026// the specified bucket.
2027func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall {
2028 c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
2029 c.bucket = bucket
2030 c.entity = entity
2031 return c
2032}
2033
2034// UserProject sets the optional parameter "userProject": The project to
2035// be billed for this request. Required for Requester Pays buckets.
2036func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall {
2037 c.urlParams_.Set("userProject", userProject)
2038 return c
2039}
2040
2041// Fields allows partial responses to be retrieved. See
2042// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
2043// for more information.
2044func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall {
2045 c.urlParams_.Set("fields", googleapi.CombineFields(s))
2046 return c
2047}
2048
2049// Context sets the context to be used in this call's Do method. Any
2050// pending HTTP request will be aborted if the provided context is
2051// canceled.
2052func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall {
2053 c.ctx_ = ctx
2054 return c
2055}
2056
2057// Header returns an http.Header that can be modified by the caller to
2058// add HTTP headers to the request.
2059func (c *BucketAccessControlsDeleteCall) Header() http.Header {
2060 if c.header_ == nil {
2061 c.header_ = make(http.Header)
2062 }
2063 return c.header_
2064}
2065
2066func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
2067 reqHeaders := make(http.Header)
2068 for k, v := range c.header_ {
2069 reqHeaders[k] = v
2070 }
2071 reqHeaders.Set("User-Agent", c.s.userAgent())
2072 var body io.Reader = nil
2073 c.urlParams_.Set("alt", alt)
2074 c.urlParams_.Set("prettyPrint", "false")
2075 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
2076 urls += "?" + c.urlParams_.Encode()
2077 req, err := http.NewRequest("DELETE", urls, body)
2078 if err != nil {
2079 return nil, err
2080 }
2081 req.Header = reqHeaders
2082 googleapi.Expand(req.URL, map[string]string{
2083 "bucket": c.bucket,
2084 "entity": c.entity,
2085 })
2086 return gensupport.SendRequest(c.ctx_, c.s.client, req)
2087}
2088
2089// Do executes the "storage.bucketAccessControls.delete" call.
2090func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
2091 gensupport.SetOptions(c.urlParams_, opts...)
2092 res, err := c.doRequest("json")
2093 if err != nil {
2094 return err
2095 }
2096 defer googleapi.CloseBody(res)
2097 if err := googleapi.CheckResponse(res); err != nil {
2098 return err
2099 }
2100 return nil
2101 // {
2102 // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.",
2103 // "httpMethod": "DELETE",
2104 // "id": "storage.bucketAccessControls.delete",
2105 // "parameterOrder": [
2106 // "bucket",
2107 // "entity"
2108 // ],
2109 // "parameters": {
2110 // "bucket": {
2111 // "description": "Name of a bucket.",
2112 // "location": "path",
2113 // "required": true,
2114 // "type": "string"
2115 // },
2116 // "entity": {
2117 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
2118 // "location": "path",
2119 // "required": true,
2120 // "type": "string"
2121 // },
2122 // "userProject": {
2123 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2124 // "location": "query",
2125 // "type": "string"
2126 // }
2127 // },
2128 // "path": "b/{bucket}/acl/{entity}",
2129 // "scopes": [
2130 // "https://www.googleapis.com/auth/cloud-platform",
2131 // "https://www.googleapis.com/auth/devstorage.full_control"
2132 // ]
2133 // }
2134
2135}
2136
2137// method id "storage.bucketAccessControls.get":
2138
2139type BucketAccessControlsGetCall struct {
2140 s *Service
2141 bucket string
2142 entity string
2143 urlParams_ gensupport.URLParams
2144 ifNoneMatch_ string
2145 ctx_ context.Context
2146 header_ http.Header
2147}
2148
2149// Get: Returns the ACL entry for the specified entity on the specified
2150// bucket.
2151func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall {
2152 c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
2153 c.bucket = bucket
2154 c.entity = entity
2155 return c
2156}
2157
2158// UserProject sets the optional parameter "userProject": The project to
2159// be billed for this request. Required for Requester Pays buckets.
2160func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall {
2161 c.urlParams_.Set("userProject", userProject)
2162 return c
2163}
2164
2165// Fields allows partial responses to be retrieved. See
2166// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
2167// for more information.
2168func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall {
2169 c.urlParams_.Set("fields", googleapi.CombineFields(s))
2170 return c
2171}
2172
2173// IfNoneMatch sets the optional parameter which makes the operation
2174// fail if the object's ETag matches the given value. This is useful for
2175// getting updates only after the object has changed since the last
2176// request. Use googleapi.IsNotModified to check whether the response
2177// error from Do is the result of In-None-Match.
2178func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall {
2179 c.ifNoneMatch_ = entityTag
2180 return c
2181}
2182
2183// Context sets the context to be used in this call's Do method. Any
2184// pending HTTP request will be aborted if the provided context is
2185// canceled.
2186func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall {
2187 c.ctx_ = ctx
2188 return c
2189}
2190
2191// Header returns an http.Header that can be modified by the caller to
2192// add HTTP headers to the request.
2193func (c *BucketAccessControlsGetCall) Header() http.Header {
2194 if c.header_ == nil {
2195 c.header_ = make(http.Header)
2196 }
2197 return c.header_
2198}
2199
2200func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
2201 reqHeaders := make(http.Header)
2202 for k, v := range c.header_ {
2203 reqHeaders[k] = v
2204 }
2205 reqHeaders.Set("User-Agent", c.s.userAgent())
2206 if c.ifNoneMatch_ != "" {
2207 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
2208 }
2209 var body io.Reader = nil
2210 c.urlParams_.Set("alt", alt)
2211 c.urlParams_.Set("prettyPrint", "false")
2212 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
2213 urls += "?" + c.urlParams_.Encode()
2214 req, err := http.NewRequest("GET", urls, body)
2215 if err != nil {
2216 return nil, err
2217 }
2218 req.Header = reqHeaders
2219 googleapi.Expand(req.URL, map[string]string{
2220 "bucket": c.bucket,
2221 "entity": c.entity,
2222 })
2223 return gensupport.SendRequest(c.ctx_, c.s.client, req)
2224}
2225
2226// Do executes the "storage.bucketAccessControls.get" call.
2227// Exactly one of *BucketAccessControl or error will be non-nil. Any
2228// non-2xx status code is an error. Response headers are in either
2229// *BucketAccessControl.ServerResponse.Header or (if a response was
2230// returned at all) in error.(*googleapi.Error).Header. Use
2231// googleapi.IsNotModified to check whether the returned error was
2232// because http.StatusNotModified was returned.
2233func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
2234 gensupport.SetOptions(c.urlParams_, opts...)
2235 res, err := c.doRequest("json")
2236 if res != nil && res.StatusCode == http.StatusNotModified {
2237 if res.Body != nil {
2238 res.Body.Close()
2239 }
2240 return nil, &googleapi.Error{
2241 Code: res.StatusCode,
2242 Header: res.Header,
2243 }
2244 }
2245 if err != nil {
2246 return nil, err
2247 }
2248 defer googleapi.CloseBody(res)
2249 if err := googleapi.CheckResponse(res); err != nil {
2250 return nil, err
2251 }
2252 ret := &BucketAccessControl{
2253 ServerResponse: googleapi.ServerResponse{
2254 Header: res.Header,
2255 HTTPStatusCode: res.StatusCode,
2256 },
2257 }
2258 target := &ret
2259 if err := gensupport.DecodeResponse(target, res); err != nil {
2260 return nil, err
2261 }
2262 return ret, nil
2263 // {
2264 // "description": "Returns the ACL entry for the specified entity on the specified bucket.",
2265 // "httpMethod": "GET",
2266 // "id": "storage.bucketAccessControls.get",
2267 // "parameterOrder": [
2268 // "bucket",
2269 // "entity"
2270 // ],
2271 // "parameters": {
2272 // "bucket": {
2273 // "description": "Name of a bucket.",
2274 // "location": "path",
2275 // "required": true,
2276 // "type": "string"
2277 // },
2278 // "entity": {
2279 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
2280 // "location": "path",
2281 // "required": true,
2282 // "type": "string"
2283 // },
2284 // "userProject": {
2285 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2286 // "location": "query",
2287 // "type": "string"
2288 // }
2289 // },
2290 // "path": "b/{bucket}/acl/{entity}",
2291 // "response": {
2292 // "$ref": "BucketAccessControl"
2293 // },
2294 // "scopes": [
2295 // "https://www.googleapis.com/auth/cloud-platform",
2296 // "https://www.googleapis.com/auth/devstorage.full_control"
2297 // ]
2298 // }
2299
2300}
2301
2302// method id "storage.bucketAccessControls.insert":
2303
2304type BucketAccessControlsInsertCall struct {
2305 s *Service
2306 bucket string
2307 bucketaccesscontrol *BucketAccessControl
2308 urlParams_ gensupport.URLParams
2309 ctx_ context.Context
2310 header_ http.Header
2311}
2312
2313// Insert: Creates a new ACL entry on the specified bucket.
2314func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall {
2315 c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
2316 c.bucket = bucket
2317 c.bucketaccesscontrol = bucketaccesscontrol
2318 return c
2319}
2320
2321// UserProject sets the optional parameter "userProject": The project to
2322// be billed for this request. Required for Requester Pays buckets.
2323func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall {
2324 c.urlParams_.Set("userProject", userProject)
2325 return c
2326}
2327
2328// Fields allows partial responses to be retrieved. See
2329// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
2330// for more information.
2331func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall {
2332 c.urlParams_.Set("fields", googleapi.CombineFields(s))
2333 return c
2334}
2335
2336// Context sets the context to be used in this call's Do method. Any
2337// pending HTTP request will be aborted if the provided context is
2338// canceled.
2339func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall {
2340 c.ctx_ = ctx
2341 return c
2342}
2343
2344// Header returns an http.Header that can be modified by the caller to
2345// add HTTP headers to the request.
2346func (c *BucketAccessControlsInsertCall) Header() http.Header {
2347 if c.header_ == nil {
2348 c.header_ = make(http.Header)
2349 }
2350 return c.header_
2351}
2352
2353func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
2354 reqHeaders := make(http.Header)
2355 for k, v := range c.header_ {
2356 reqHeaders[k] = v
2357 }
2358 reqHeaders.Set("User-Agent", c.s.userAgent())
2359 var body io.Reader = nil
2360 body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
2361 if err != nil {
2362 return nil, err
2363 }
2364 reqHeaders.Set("Content-Type", "application/json")
2365 c.urlParams_.Set("alt", alt)
2366 c.urlParams_.Set("prettyPrint", "false")
2367 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
2368 urls += "?" + c.urlParams_.Encode()
2369 req, err := http.NewRequest("POST", urls, body)
2370 if err != nil {
2371 return nil, err
2372 }
2373 req.Header = reqHeaders
2374 googleapi.Expand(req.URL, map[string]string{
2375 "bucket": c.bucket,
2376 })
2377 return gensupport.SendRequest(c.ctx_, c.s.client, req)
2378}
2379
2380// Do executes the "storage.bucketAccessControls.insert" call.
2381// Exactly one of *BucketAccessControl or error will be non-nil. Any
2382// non-2xx status code is an error. Response headers are in either
2383// *BucketAccessControl.ServerResponse.Header or (if a response was
2384// returned at all) in error.(*googleapi.Error).Header. Use
2385// googleapi.IsNotModified to check whether the returned error was
2386// because http.StatusNotModified was returned.
2387func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
2388 gensupport.SetOptions(c.urlParams_, opts...)
2389 res, err := c.doRequest("json")
2390 if res != nil && res.StatusCode == http.StatusNotModified {
2391 if res.Body != nil {
2392 res.Body.Close()
2393 }
2394 return nil, &googleapi.Error{
2395 Code: res.StatusCode,
2396 Header: res.Header,
2397 }
2398 }
2399 if err != nil {
2400 return nil, err
2401 }
2402 defer googleapi.CloseBody(res)
2403 if err := googleapi.CheckResponse(res); err != nil {
2404 return nil, err
2405 }
2406 ret := &BucketAccessControl{
2407 ServerResponse: googleapi.ServerResponse{
2408 Header: res.Header,
2409 HTTPStatusCode: res.StatusCode,
2410 },
2411 }
2412 target := &ret
2413 if err := gensupport.DecodeResponse(target, res); err != nil {
2414 return nil, err
2415 }
2416 return ret, nil
2417 // {
2418 // "description": "Creates a new ACL entry on the specified bucket.",
2419 // "httpMethod": "POST",
2420 // "id": "storage.bucketAccessControls.insert",
2421 // "parameterOrder": [
2422 // "bucket"
2423 // ],
2424 // "parameters": {
2425 // "bucket": {
2426 // "description": "Name of a bucket.",
2427 // "location": "path",
2428 // "required": true,
2429 // "type": "string"
2430 // },
2431 // "userProject": {
2432 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2433 // "location": "query",
2434 // "type": "string"
2435 // }
2436 // },
2437 // "path": "b/{bucket}/acl",
2438 // "request": {
2439 // "$ref": "BucketAccessControl"
2440 // },
2441 // "response": {
2442 // "$ref": "BucketAccessControl"
2443 // },
2444 // "scopes": [
2445 // "https://www.googleapis.com/auth/cloud-platform",
2446 // "https://www.googleapis.com/auth/devstorage.full_control"
2447 // ]
2448 // }
2449
2450}
2451
2452// method id "storage.bucketAccessControls.list":
2453
2454type BucketAccessControlsListCall struct {
2455 s *Service
2456 bucket string
2457 urlParams_ gensupport.URLParams
2458 ifNoneMatch_ string
2459 ctx_ context.Context
2460 header_ http.Header
2461}
2462
2463// List: Retrieves ACL entries on the specified bucket.
2464func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall {
2465 c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
2466 c.bucket = bucket
2467 return c
2468}
2469
2470// UserProject sets the optional parameter "userProject": The project to
2471// be billed for this request. Required for Requester Pays buckets.
2472func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall {
2473 c.urlParams_.Set("userProject", userProject)
2474 return c
2475}
2476
2477// Fields allows partial responses to be retrieved. See
2478// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
2479// for more information.
2480func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall {
2481 c.urlParams_.Set("fields", googleapi.CombineFields(s))
2482 return c
2483}
2484
2485// IfNoneMatch sets the optional parameter which makes the operation
2486// fail if the object's ETag matches the given value. This is useful for
2487// getting updates only after the object has changed since the last
2488// request. Use googleapi.IsNotModified to check whether the response
2489// error from Do is the result of In-None-Match.
2490func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall {
2491 c.ifNoneMatch_ = entityTag
2492 return c
2493}
2494
2495// Context sets the context to be used in this call's Do method. Any
2496// pending HTTP request will be aborted if the provided context is
2497// canceled.
2498func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall {
2499 c.ctx_ = ctx
2500 return c
2501}
2502
2503// Header returns an http.Header that can be modified by the caller to
2504// add HTTP headers to the request.
2505func (c *BucketAccessControlsListCall) Header() http.Header {
2506 if c.header_ == nil {
2507 c.header_ = make(http.Header)
2508 }
2509 return c.header_
2510}
2511
2512func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
2513 reqHeaders := make(http.Header)
2514 for k, v := range c.header_ {
2515 reqHeaders[k] = v
2516 }
2517 reqHeaders.Set("User-Agent", c.s.userAgent())
2518 if c.ifNoneMatch_ != "" {
2519 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
2520 }
2521 var body io.Reader = nil
2522 c.urlParams_.Set("alt", alt)
2523 c.urlParams_.Set("prettyPrint", "false")
2524 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
2525 urls += "?" + c.urlParams_.Encode()
2526 req, err := http.NewRequest("GET", urls, body)
2527 if err != nil {
2528 return nil, err
2529 }
2530 req.Header = reqHeaders
2531 googleapi.Expand(req.URL, map[string]string{
2532 "bucket": c.bucket,
2533 })
2534 return gensupport.SendRequest(c.ctx_, c.s.client, req)
2535}
2536
2537// Do executes the "storage.bucketAccessControls.list" call.
2538// Exactly one of *BucketAccessControls or error will be non-nil. Any
2539// non-2xx status code is an error. Response headers are in either
2540// *BucketAccessControls.ServerResponse.Header or (if a response was
2541// returned at all) in error.(*googleapi.Error).Header. Use
2542// googleapi.IsNotModified to check whether the returned error was
2543// because http.StatusNotModified was returned.
2544func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) {
2545 gensupport.SetOptions(c.urlParams_, opts...)
2546 res, err := c.doRequest("json")
2547 if res != nil && res.StatusCode == http.StatusNotModified {
2548 if res.Body != nil {
2549 res.Body.Close()
2550 }
2551 return nil, &googleapi.Error{
2552 Code: res.StatusCode,
2553 Header: res.Header,
2554 }
2555 }
2556 if err != nil {
2557 return nil, err
2558 }
2559 defer googleapi.CloseBody(res)
2560 if err := googleapi.CheckResponse(res); err != nil {
2561 return nil, err
2562 }
2563 ret := &BucketAccessControls{
2564 ServerResponse: googleapi.ServerResponse{
2565 Header: res.Header,
2566 HTTPStatusCode: res.StatusCode,
2567 },
2568 }
2569 target := &ret
2570 if err := gensupport.DecodeResponse(target, res); err != nil {
2571 return nil, err
2572 }
2573 return ret, nil
2574 // {
2575 // "description": "Retrieves ACL entries on the specified bucket.",
2576 // "httpMethod": "GET",
2577 // "id": "storage.bucketAccessControls.list",
2578 // "parameterOrder": [
2579 // "bucket"
2580 // ],
2581 // "parameters": {
2582 // "bucket": {
2583 // "description": "Name of a bucket.",
2584 // "location": "path",
2585 // "required": true,
2586 // "type": "string"
2587 // },
2588 // "userProject": {
2589 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2590 // "location": "query",
2591 // "type": "string"
2592 // }
2593 // },
2594 // "path": "b/{bucket}/acl",
2595 // "response": {
2596 // "$ref": "BucketAccessControls"
2597 // },
2598 // "scopes": [
2599 // "https://www.googleapis.com/auth/cloud-platform",
2600 // "https://www.googleapis.com/auth/devstorage.full_control"
2601 // ]
2602 // }
2603
2604}
2605
2606// method id "storage.bucketAccessControls.patch":
2607
2608type BucketAccessControlsPatchCall struct {
2609 s *Service
2610 bucket string
2611 entity string
2612 bucketaccesscontrol *BucketAccessControl
2613 urlParams_ gensupport.URLParams
2614 ctx_ context.Context
2615 header_ http.Header
2616}
2617
2618// Patch: Patches an ACL entry on the specified bucket.
2619func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall {
2620 c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
2621 c.bucket = bucket
2622 c.entity = entity
2623 c.bucketaccesscontrol = bucketaccesscontrol
2624 return c
2625}
2626
2627// UserProject sets the optional parameter "userProject": The project to
2628// be billed for this request. Required for Requester Pays buckets.
2629func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall {
2630 c.urlParams_.Set("userProject", userProject)
2631 return c
2632}
2633
2634// Fields allows partial responses to be retrieved. See
2635// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
2636// for more information.
2637func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall {
2638 c.urlParams_.Set("fields", googleapi.CombineFields(s))
2639 return c
2640}
2641
2642// Context sets the context to be used in this call's Do method. Any
2643// pending HTTP request will be aborted if the provided context is
2644// canceled.
2645func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall {
2646 c.ctx_ = ctx
2647 return c
2648}
2649
2650// Header returns an http.Header that can be modified by the caller to
2651// add HTTP headers to the request.
2652func (c *BucketAccessControlsPatchCall) Header() http.Header {
2653 if c.header_ == nil {
2654 c.header_ = make(http.Header)
2655 }
2656 return c.header_
2657}
2658
2659func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
2660 reqHeaders := make(http.Header)
2661 for k, v := range c.header_ {
2662 reqHeaders[k] = v
2663 }
2664 reqHeaders.Set("User-Agent", c.s.userAgent())
2665 var body io.Reader = nil
2666 body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
2667 if err != nil {
2668 return nil, err
2669 }
2670 reqHeaders.Set("Content-Type", "application/json")
2671 c.urlParams_.Set("alt", alt)
2672 c.urlParams_.Set("prettyPrint", "false")
2673 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
2674 urls += "?" + c.urlParams_.Encode()
2675 req, err := http.NewRequest("PATCH", urls, body)
2676 if err != nil {
2677 return nil, err
2678 }
2679 req.Header = reqHeaders
2680 googleapi.Expand(req.URL, map[string]string{
2681 "bucket": c.bucket,
2682 "entity": c.entity,
2683 })
2684 return gensupport.SendRequest(c.ctx_, c.s.client, req)
2685}
2686
2687// Do executes the "storage.bucketAccessControls.patch" call.
2688// Exactly one of *BucketAccessControl or error will be non-nil. Any
2689// non-2xx status code is an error. Response headers are in either
2690// *BucketAccessControl.ServerResponse.Header or (if a response was
2691// returned at all) in error.(*googleapi.Error).Header. Use
2692// googleapi.IsNotModified to check whether the returned error was
2693// because http.StatusNotModified was returned.
2694func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
2695 gensupport.SetOptions(c.urlParams_, opts...)
2696 res, err := c.doRequest("json")
2697 if res != nil && res.StatusCode == http.StatusNotModified {
2698 if res.Body != nil {
2699 res.Body.Close()
2700 }
2701 return nil, &googleapi.Error{
2702 Code: res.StatusCode,
2703 Header: res.Header,
2704 }
2705 }
2706 if err != nil {
2707 return nil, err
2708 }
2709 defer googleapi.CloseBody(res)
2710 if err := googleapi.CheckResponse(res); err != nil {
2711 return nil, err
2712 }
2713 ret := &BucketAccessControl{
2714 ServerResponse: googleapi.ServerResponse{
2715 Header: res.Header,
2716 HTTPStatusCode: res.StatusCode,
2717 },
2718 }
2719 target := &ret
2720 if err := gensupport.DecodeResponse(target, res); err != nil {
2721 return nil, err
2722 }
2723 return ret, nil
2724 // {
2725 // "description": "Patches an ACL entry on the specified bucket.",
2726 // "httpMethod": "PATCH",
2727 // "id": "storage.bucketAccessControls.patch",
2728 // "parameterOrder": [
2729 // "bucket",
2730 // "entity"
2731 // ],
2732 // "parameters": {
2733 // "bucket": {
2734 // "description": "Name of a bucket.",
2735 // "location": "path",
2736 // "required": true,
2737 // "type": "string"
2738 // },
2739 // "entity": {
2740 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
2741 // "location": "path",
2742 // "required": true,
2743 // "type": "string"
2744 // },
2745 // "userProject": {
2746 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2747 // "location": "query",
2748 // "type": "string"
2749 // }
2750 // },
2751 // "path": "b/{bucket}/acl/{entity}",
2752 // "request": {
2753 // "$ref": "BucketAccessControl"
2754 // },
2755 // "response": {
2756 // "$ref": "BucketAccessControl"
2757 // },
2758 // "scopes": [
2759 // "https://www.googleapis.com/auth/cloud-platform",
2760 // "https://www.googleapis.com/auth/devstorage.full_control"
2761 // ]
2762 // }
2763
2764}
2765
2766// method id "storage.bucketAccessControls.update":
2767
2768type BucketAccessControlsUpdateCall struct {
2769 s *Service
2770 bucket string
2771 entity string
2772 bucketaccesscontrol *BucketAccessControl
2773 urlParams_ gensupport.URLParams
2774 ctx_ context.Context
2775 header_ http.Header
2776}
2777
2778// Update: Updates an ACL entry on the specified bucket.
2779func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall {
2780 c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
2781 c.bucket = bucket
2782 c.entity = entity
2783 c.bucketaccesscontrol = bucketaccesscontrol
2784 return c
2785}
2786
2787// UserProject sets the optional parameter "userProject": The project to
2788// be billed for this request. Required for Requester Pays buckets.
2789func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall {
2790 c.urlParams_.Set("userProject", userProject)
2791 return c
2792}
2793
2794// Fields allows partial responses to be retrieved. See
2795// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
2796// for more information.
2797func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall {
2798 c.urlParams_.Set("fields", googleapi.CombineFields(s))
2799 return c
2800}
2801
2802// Context sets the context to be used in this call's Do method. Any
2803// pending HTTP request will be aborted if the provided context is
2804// canceled.
2805func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall {
2806 c.ctx_ = ctx
2807 return c
2808}
2809
2810// Header returns an http.Header that can be modified by the caller to
2811// add HTTP headers to the request.
2812func (c *BucketAccessControlsUpdateCall) Header() http.Header {
2813 if c.header_ == nil {
2814 c.header_ = make(http.Header)
2815 }
2816 return c.header_
2817}
2818
2819func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
2820 reqHeaders := make(http.Header)
2821 for k, v := range c.header_ {
2822 reqHeaders[k] = v
2823 }
2824 reqHeaders.Set("User-Agent", c.s.userAgent())
2825 var body io.Reader = nil
2826 body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
2827 if err != nil {
2828 return nil, err
2829 }
2830 reqHeaders.Set("Content-Type", "application/json")
2831 c.urlParams_.Set("alt", alt)
2832 c.urlParams_.Set("prettyPrint", "false")
2833 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
2834 urls += "?" + c.urlParams_.Encode()
2835 req, err := http.NewRequest("PUT", urls, body)
2836 if err != nil {
2837 return nil, err
2838 }
2839 req.Header = reqHeaders
2840 googleapi.Expand(req.URL, map[string]string{
2841 "bucket": c.bucket,
2842 "entity": c.entity,
2843 })
2844 return gensupport.SendRequest(c.ctx_, c.s.client, req)
2845}
2846
2847// Do executes the "storage.bucketAccessControls.update" call.
2848// Exactly one of *BucketAccessControl or error will be non-nil. Any
2849// non-2xx status code is an error. Response headers are in either
2850// *BucketAccessControl.ServerResponse.Header or (if a response was
2851// returned at all) in error.(*googleapi.Error).Header. Use
2852// googleapi.IsNotModified to check whether the returned error was
2853// because http.StatusNotModified was returned.
2854func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
2855 gensupport.SetOptions(c.urlParams_, opts...)
2856 res, err := c.doRequest("json")
2857 if res != nil && res.StatusCode == http.StatusNotModified {
2858 if res.Body != nil {
2859 res.Body.Close()
2860 }
2861 return nil, &googleapi.Error{
2862 Code: res.StatusCode,
2863 Header: res.Header,
2864 }
2865 }
2866 if err != nil {
2867 return nil, err
2868 }
2869 defer googleapi.CloseBody(res)
2870 if err := googleapi.CheckResponse(res); err != nil {
2871 return nil, err
2872 }
2873 ret := &BucketAccessControl{
2874 ServerResponse: googleapi.ServerResponse{
2875 Header: res.Header,
2876 HTTPStatusCode: res.StatusCode,
2877 },
2878 }
2879 target := &ret
2880 if err := gensupport.DecodeResponse(target, res); err != nil {
2881 return nil, err
2882 }
2883 return ret, nil
2884 // {
2885 // "description": "Updates an ACL entry on the specified bucket.",
2886 // "httpMethod": "PUT",
2887 // "id": "storage.bucketAccessControls.update",
2888 // "parameterOrder": [
2889 // "bucket",
2890 // "entity"
2891 // ],
2892 // "parameters": {
2893 // "bucket": {
2894 // "description": "Name of a bucket.",
2895 // "location": "path",
2896 // "required": true,
2897 // "type": "string"
2898 // },
2899 // "entity": {
2900 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
2901 // "location": "path",
2902 // "required": true,
2903 // "type": "string"
2904 // },
2905 // "userProject": {
2906 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
2907 // "location": "query",
2908 // "type": "string"
2909 // }
2910 // },
2911 // "path": "b/{bucket}/acl/{entity}",
2912 // "request": {
2913 // "$ref": "BucketAccessControl"
2914 // },
2915 // "response": {
2916 // "$ref": "BucketAccessControl"
2917 // },
2918 // "scopes": [
2919 // "https://www.googleapis.com/auth/cloud-platform",
2920 // "https://www.googleapis.com/auth/devstorage.full_control"
2921 // ]
2922 // }
2923
2924}
2925
2926// method id "storage.buckets.delete":
2927
2928type BucketsDeleteCall struct {
2929 s *Service
2930 bucket string
2931 urlParams_ gensupport.URLParams
2932 ctx_ context.Context
2933 header_ http.Header
2934}
2935
2936// Delete: Permanently deletes an empty bucket.
2937func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall {
2938 c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
2939 c.bucket = bucket
2940 return c
2941}
2942
2943// IfMetagenerationMatch sets the optional parameter
2944// "ifMetagenerationMatch": If set, only deletes the bucket if its
2945// metageneration matches this value.
2946func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall {
2947 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
2948 return c
2949}
2950
2951// IfMetagenerationNotMatch sets the optional parameter
2952// "ifMetagenerationNotMatch": If set, only deletes the bucket if its
2953// metageneration does not match this value.
2954func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall {
2955 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
2956 return c
2957}
2958
2959// UserProject sets the optional parameter "userProject": The project to
2960// be billed for this request. Required for Requester Pays buckets.
2961func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall {
2962 c.urlParams_.Set("userProject", userProject)
2963 return c
2964}
2965
2966// Fields allows partial responses to be retrieved. See
2967// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
2968// for more information.
2969func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall {
2970 c.urlParams_.Set("fields", googleapi.CombineFields(s))
2971 return c
2972}
2973
2974// Context sets the context to be used in this call's Do method. Any
2975// pending HTTP request will be aborted if the provided context is
2976// canceled.
2977func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall {
2978 c.ctx_ = ctx
2979 return c
2980}
2981
2982// Header returns an http.Header that can be modified by the caller to
2983// add HTTP headers to the request.
2984func (c *BucketsDeleteCall) Header() http.Header {
2985 if c.header_ == nil {
2986 c.header_ = make(http.Header)
2987 }
2988 return c.header_
2989}
2990
2991func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
2992 reqHeaders := make(http.Header)
2993 for k, v := range c.header_ {
2994 reqHeaders[k] = v
2995 }
2996 reqHeaders.Set("User-Agent", c.s.userAgent())
2997 var body io.Reader = nil
2998 c.urlParams_.Set("alt", alt)
2999 c.urlParams_.Set("prettyPrint", "false")
3000 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
3001 urls += "?" + c.urlParams_.Encode()
3002 req, err := http.NewRequest("DELETE", urls, body)
3003 if err != nil {
3004 return nil, err
3005 }
3006 req.Header = reqHeaders
3007 googleapi.Expand(req.URL, map[string]string{
3008 "bucket": c.bucket,
3009 })
3010 return gensupport.SendRequest(c.ctx_, c.s.client, req)
3011}
3012
3013// Do executes the "storage.buckets.delete" call.
3014func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error {
3015 gensupport.SetOptions(c.urlParams_, opts...)
3016 res, err := c.doRequest("json")
3017 if err != nil {
3018 return err
3019 }
3020 defer googleapi.CloseBody(res)
3021 if err := googleapi.CheckResponse(res); err != nil {
3022 return err
3023 }
3024 return nil
3025 // {
3026 // "description": "Permanently deletes an empty bucket.",
3027 // "httpMethod": "DELETE",
3028 // "id": "storage.buckets.delete",
3029 // "parameterOrder": [
3030 // "bucket"
3031 // ],
3032 // "parameters": {
3033 // "bucket": {
3034 // "description": "Name of a bucket.",
3035 // "location": "path",
3036 // "required": true,
3037 // "type": "string"
3038 // },
3039 // "ifMetagenerationMatch": {
3040 // "description": "If set, only deletes the bucket if its metageneration matches this value.",
3041 // "format": "int64",
3042 // "location": "query",
3043 // "type": "string"
3044 // },
3045 // "ifMetagenerationNotMatch": {
3046 // "description": "If set, only deletes the bucket if its metageneration does not match this value.",
3047 // "format": "int64",
3048 // "location": "query",
3049 // "type": "string"
3050 // },
3051 // "userProject": {
3052 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
3053 // "location": "query",
3054 // "type": "string"
3055 // }
3056 // },
3057 // "path": "b/{bucket}",
3058 // "scopes": [
3059 // "https://www.googleapis.com/auth/cloud-platform",
3060 // "https://www.googleapis.com/auth/devstorage.full_control",
3061 // "https://www.googleapis.com/auth/devstorage.read_write"
3062 // ]
3063 // }
3064
3065}
3066
3067// method id "storage.buckets.get":
3068
3069type BucketsGetCall struct {
3070 s *Service
3071 bucket string
3072 urlParams_ gensupport.URLParams
3073 ifNoneMatch_ string
3074 ctx_ context.Context
3075 header_ http.Header
3076}
3077
3078// Get: Returns metadata for the specified bucket.
3079func (r *BucketsService) Get(bucket string) *BucketsGetCall {
3080 c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
3081 c.bucket = bucket
3082 return c
3083}
3084
3085// IfMetagenerationMatch sets the optional parameter
3086// "ifMetagenerationMatch": Makes the return of the bucket metadata
3087// conditional on whether the bucket's current metageneration matches
3088// the given value.
3089func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall {
3090 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
3091 return c
3092}
3093
3094// IfMetagenerationNotMatch sets the optional parameter
3095// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
3096// conditional on whether the bucket's current metageneration does not
3097// match the given value.
3098func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall {
3099 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
3100 return c
3101}
3102
3103// Projection sets the optional parameter "projection": Set of
3104// properties to return. Defaults to noAcl.
3105//
3106// Possible values:
3107// "full" - Include all properties.
3108// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
3109func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall {
3110 c.urlParams_.Set("projection", projection)
3111 return c
3112}
3113
3114// UserProject sets the optional parameter "userProject": The project to
3115// be billed for this request. Required for Requester Pays buckets.
3116func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall {
3117 c.urlParams_.Set("userProject", userProject)
3118 return c
3119}
3120
3121// Fields allows partial responses to be retrieved. See
3122// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
3123// for more information.
3124func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall {
3125 c.urlParams_.Set("fields", googleapi.CombineFields(s))
3126 return c
3127}
3128
3129// IfNoneMatch sets the optional parameter which makes the operation
3130// fail if the object's ETag matches the given value. This is useful for
3131// getting updates only after the object has changed since the last
3132// request. Use googleapi.IsNotModified to check whether the response
3133// error from Do is the result of In-None-Match.
3134func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall {
3135 c.ifNoneMatch_ = entityTag
3136 return c
3137}
3138
3139// Context sets the context to be used in this call's Do method. Any
3140// pending HTTP request will be aborted if the provided context is
3141// canceled.
3142func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall {
3143 c.ctx_ = ctx
3144 return c
3145}
3146
3147// Header returns an http.Header that can be modified by the caller to
3148// add HTTP headers to the request.
3149func (c *BucketsGetCall) Header() http.Header {
3150 if c.header_ == nil {
3151 c.header_ = make(http.Header)
3152 }
3153 return c.header_
3154}
3155
3156func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
3157 reqHeaders := make(http.Header)
3158 for k, v := range c.header_ {
3159 reqHeaders[k] = v
3160 }
3161 reqHeaders.Set("User-Agent", c.s.userAgent())
3162 if c.ifNoneMatch_ != "" {
3163 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
3164 }
3165 var body io.Reader = nil
3166 c.urlParams_.Set("alt", alt)
3167 c.urlParams_.Set("prettyPrint", "false")
3168 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
3169 urls += "?" + c.urlParams_.Encode()
3170 req, err := http.NewRequest("GET", urls, body)
3171 if err != nil {
3172 return nil, err
3173 }
3174 req.Header = reqHeaders
3175 googleapi.Expand(req.URL, map[string]string{
3176 "bucket": c.bucket,
3177 })
3178 return gensupport.SendRequest(c.ctx_, c.s.client, req)
3179}
3180
3181// Do executes the "storage.buckets.get" call.
3182// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
3183// code is an error. Response headers are in either
3184// *Bucket.ServerResponse.Header or (if a response was returned at all)
3185// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
3186// check whether the returned error was because http.StatusNotModified
3187// was returned.
3188func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
3189 gensupport.SetOptions(c.urlParams_, opts...)
3190 res, err := c.doRequest("json")
3191 if res != nil && res.StatusCode == http.StatusNotModified {
3192 if res.Body != nil {
3193 res.Body.Close()
3194 }
3195 return nil, &googleapi.Error{
3196 Code: res.StatusCode,
3197 Header: res.Header,
3198 }
3199 }
3200 if err != nil {
3201 return nil, err
3202 }
3203 defer googleapi.CloseBody(res)
3204 if err := googleapi.CheckResponse(res); err != nil {
3205 return nil, err
3206 }
3207 ret := &Bucket{
3208 ServerResponse: googleapi.ServerResponse{
3209 Header: res.Header,
3210 HTTPStatusCode: res.StatusCode,
3211 },
3212 }
3213 target := &ret
3214 if err := gensupport.DecodeResponse(target, res); err != nil {
3215 return nil, err
3216 }
3217 return ret, nil
3218 // {
3219 // "description": "Returns metadata for the specified bucket.",
3220 // "httpMethod": "GET",
3221 // "id": "storage.buckets.get",
3222 // "parameterOrder": [
3223 // "bucket"
3224 // ],
3225 // "parameters": {
3226 // "bucket": {
3227 // "description": "Name of a bucket.",
3228 // "location": "path",
3229 // "required": true,
3230 // "type": "string"
3231 // },
3232 // "ifMetagenerationMatch": {
3233 // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
3234 // "format": "int64",
3235 // "location": "query",
3236 // "type": "string"
3237 // },
3238 // "ifMetagenerationNotMatch": {
3239 // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
3240 // "format": "int64",
3241 // "location": "query",
3242 // "type": "string"
3243 // },
3244 // "projection": {
3245 // "description": "Set of properties to return. Defaults to noAcl.",
3246 // "enum": [
3247 // "full",
3248 // "noAcl"
3249 // ],
3250 // "enumDescriptions": [
3251 // "Include all properties.",
3252 // "Omit owner, acl and defaultObjectAcl properties."
3253 // ],
3254 // "location": "query",
3255 // "type": "string"
3256 // },
3257 // "userProject": {
3258 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
3259 // "location": "query",
3260 // "type": "string"
3261 // }
3262 // },
3263 // "path": "b/{bucket}",
3264 // "response": {
3265 // "$ref": "Bucket"
3266 // },
3267 // "scopes": [
3268 // "https://www.googleapis.com/auth/cloud-platform",
3269 // "https://www.googleapis.com/auth/cloud-platform.read-only",
3270 // "https://www.googleapis.com/auth/devstorage.full_control",
3271 // "https://www.googleapis.com/auth/devstorage.read_only",
3272 // "https://www.googleapis.com/auth/devstorage.read_write"
3273 // ]
3274 // }
3275
3276}
3277
3278// method id "storage.buckets.getIamPolicy":
3279
3280type BucketsGetIamPolicyCall struct {
3281 s *Service
3282 bucket string
3283 urlParams_ gensupport.URLParams
3284 ifNoneMatch_ string
3285 ctx_ context.Context
3286 header_ http.Header
3287}
3288
3289// GetIamPolicy: Returns an IAM policy for the specified bucket.
3290func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall {
3291 c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
3292 c.bucket = bucket
3293 return c
3294}
3295
3296// UserProject sets the optional parameter "userProject": The project to
3297// be billed for this request. Required for Requester Pays buckets.
3298func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall {
3299 c.urlParams_.Set("userProject", userProject)
3300 return c
3301}
3302
3303// Fields allows partial responses to be retrieved. See
3304// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
3305// for more information.
3306func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall {
3307 c.urlParams_.Set("fields", googleapi.CombineFields(s))
3308 return c
3309}
3310
3311// IfNoneMatch sets the optional parameter which makes the operation
3312// fail if the object's ETag matches the given value. This is useful for
3313// getting updates only after the object has changed since the last
3314// request. Use googleapi.IsNotModified to check whether the response
3315// error from Do is the result of In-None-Match.
3316func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall {
3317 c.ifNoneMatch_ = entityTag
3318 return c
3319}
3320
3321// Context sets the context to be used in this call's Do method. Any
3322// pending HTTP request will be aborted if the provided context is
3323// canceled.
3324func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall {
3325 c.ctx_ = ctx
3326 return c
3327}
3328
3329// Header returns an http.Header that can be modified by the caller to
3330// add HTTP headers to the request.
3331func (c *BucketsGetIamPolicyCall) Header() http.Header {
3332 if c.header_ == nil {
3333 c.header_ = make(http.Header)
3334 }
3335 return c.header_
3336}
3337
3338func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
3339 reqHeaders := make(http.Header)
3340 for k, v := range c.header_ {
3341 reqHeaders[k] = v
3342 }
3343 reqHeaders.Set("User-Agent", c.s.userAgent())
3344 if c.ifNoneMatch_ != "" {
3345 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
3346 }
3347 var body io.Reader = nil
3348 c.urlParams_.Set("alt", alt)
3349 c.urlParams_.Set("prettyPrint", "false")
3350 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam")
3351 urls += "?" + c.urlParams_.Encode()
3352 req, err := http.NewRequest("GET", urls, body)
3353 if err != nil {
3354 return nil, err
3355 }
3356 req.Header = reqHeaders
3357 googleapi.Expand(req.URL, map[string]string{
3358 "bucket": c.bucket,
3359 })
3360 return gensupport.SendRequest(c.ctx_, c.s.client, req)
3361}
3362
3363// Do executes the "storage.buckets.getIamPolicy" call.
3364// Exactly one of *Policy or error will be non-nil. Any non-2xx status
3365// code is an error. Response headers are in either
3366// *Policy.ServerResponse.Header or (if a response was returned at all)
3367// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
3368// check whether the returned error was because http.StatusNotModified
3369// was returned.
3370func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
3371 gensupport.SetOptions(c.urlParams_, opts...)
3372 res, err := c.doRequest("json")
3373 if res != nil && res.StatusCode == http.StatusNotModified {
3374 if res.Body != nil {
3375 res.Body.Close()
3376 }
3377 return nil, &googleapi.Error{
3378 Code: res.StatusCode,
3379 Header: res.Header,
3380 }
3381 }
3382 if err != nil {
3383 return nil, err
3384 }
3385 defer googleapi.CloseBody(res)
3386 if err := googleapi.CheckResponse(res); err != nil {
3387 return nil, err
3388 }
3389 ret := &Policy{
3390 ServerResponse: googleapi.ServerResponse{
3391 Header: res.Header,
3392 HTTPStatusCode: res.StatusCode,
3393 },
3394 }
3395 target := &ret
3396 if err := gensupport.DecodeResponse(target, res); err != nil {
3397 return nil, err
3398 }
3399 return ret, nil
3400 // {
3401 // "description": "Returns an IAM policy for the specified bucket.",
3402 // "httpMethod": "GET",
3403 // "id": "storage.buckets.getIamPolicy",
3404 // "parameterOrder": [
3405 // "bucket"
3406 // ],
3407 // "parameters": {
3408 // "bucket": {
3409 // "description": "Name of a bucket.",
3410 // "location": "path",
3411 // "required": true,
3412 // "type": "string"
3413 // },
3414 // "userProject": {
3415 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
3416 // "location": "query",
3417 // "type": "string"
3418 // }
3419 // },
3420 // "path": "b/{bucket}/iam",
3421 // "response": {
3422 // "$ref": "Policy"
3423 // },
3424 // "scopes": [
3425 // "https://www.googleapis.com/auth/cloud-platform",
3426 // "https://www.googleapis.com/auth/cloud-platform.read-only",
3427 // "https://www.googleapis.com/auth/devstorage.full_control",
3428 // "https://www.googleapis.com/auth/devstorage.read_only",
3429 // "https://www.googleapis.com/auth/devstorage.read_write"
3430 // ]
3431 // }
3432
3433}
3434
3435// method id "storage.buckets.insert":
3436
3437type BucketsInsertCall struct {
3438 s *Service
3439 bucket *Bucket
3440 urlParams_ gensupport.URLParams
3441 ctx_ context.Context
3442 header_ http.Header
3443}
3444
3445// Insert: Creates a new bucket.
3446func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall {
3447 c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
3448 c.urlParams_.Set("project", projectid)
3449 c.bucket = bucket
3450 return c
3451}
3452
3453// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
3454// predefined set of access controls to this bucket.
3455//
3456// Possible values:
3457// "authenticatedRead" - Project team owners get OWNER access, and
3458// allAuthenticatedUsers get READER access.
3459// "private" - Project team owners get OWNER access.
3460// "projectPrivate" - Project team members get access according to
3461// their roles.
3462// "publicRead" - Project team owners get OWNER access, and allUsers
3463// get READER access.
3464// "publicReadWrite" - Project team owners get OWNER access, and
3465// allUsers get WRITER access.
3466func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall {
3467 c.urlParams_.Set("predefinedAcl", predefinedAcl)
3468 return c
3469}
3470
3471// PredefinedDefaultObjectAcl sets the optional parameter
3472// "predefinedDefaultObjectAcl": Apply a predefined set of default
3473// object access controls to this bucket.
3474//
3475// Possible values:
3476// "authenticatedRead" - Object owner gets OWNER access, and
3477// allAuthenticatedUsers get READER access.
3478// "bucketOwnerFullControl" - Object owner gets OWNER access, and
3479// project team owners get OWNER access.
3480// "bucketOwnerRead" - Object owner gets OWNER access, and project
3481// team owners get READER access.
3482// "private" - Object owner gets OWNER access.
3483// "projectPrivate" - Object owner gets OWNER access, and project team
3484// members get access according to their roles.
3485// "publicRead" - Object owner gets OWNER access, and allUsers get
3486// READER access.
3487func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall {
3488 c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
3489 return c
3490}
3491
3492// Projection sets the optional parameter "projection": Set of
3493// properties to return. Defaults to noAcl, unless the bucket resource
3494// specifies acl or defaultObjectAcl properties, when it defaults to
3495// full.
3496//
3497// Possible values:
3498// "full" - Include all properties.
3499// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
3500func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall {
3501 c.urlParams_.Set("projection", projection)
3502 return c
3503}
3504
3505// UserProject sets the optional parameter "userProject": The project to
3506// be billed for this request.
3507func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall {
3508 c.urlParams_.Set("userProject", userProject)
3509 return c
3510}
3511
3512// Fields allows partial responses to be retrieved. See
3513// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
3514// for more information.
3515func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall {
3516 c.urlParams_.Set("fields", googleapi.CombineFields(s))
3517 return c
3518}
3519
3520// Context sets the context to be used in this call's Do method. Any
3521// pending HTTP request will be aborted if the provided context is
3522// canceled.
3523func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall {
3524 c.ctx_ = ctx
3525 return c
3526}
3527
3528// Header returns an http.Header that can be modified by the caller to
3529// add HTTP headers to the request.
3530func (c *BucketsInsertCall) Header() http.Header {
3531 if c.header_ == nil {
3532 c.header_ = make(http.Header)
3533 }
3534 return c.header_
3535}
3536
3537func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
3538 reqHeaders := make(http.Header)
3539 for k, v := range c.header_ {
3540 reqHeaders[k] = v
3541 }
3542 reqHeaders.Set("User-Agent", c.s.userAgent())
3543 var body io.Reader = nil
3544 body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket)
3545 if err != nil {
3546 return nil, err
3547 }
3548 reqHeaders.Set("Content-Type", "application/json")
3549 c.urlParams_.Set("alt", alt)
3550 c.urlParams_.Set("prettyPrint", "false")
3551 urls := googleapi.ResolveRelative(c.s.BasePath, "b")
3552 urls += "?" + c.urlParams_.Encode()
3553 req, err := http.NewRequest("POST", urls, body)
3554 if err != nil {
3555 return nil, err
3556 }
3557 req.Header = reqHeaders
3558 return gensupport.SendRequest(c.ctx_, c.s.client, req)
3559}
3560
3561// Do executes the "storage.buckets.insert" call.
3562// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
3563// code is an error. Response headers are in either
3564// *Bucket.ServerResponse.Header or (if a response was returned at all)
3565// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
3566// check whether the returned error was because http.StatusNotModified
3567// was returned.
3568func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
3569 gensupport.SetOptions(c.urlParams_, opts...)
3570 res, err := c.doRequest("json")
3571 if res != nil && res.StatusCode == http.StatusNotModified {
3572 if res.Body != nil {
3573 res.Body.Close()
3574 }
3575 return nil, &googleapi.Error{
3576 Code: res.StatusCode,
3577 Header: res.Header,
3578 }
3579 }
3580 if err != nil {
3581 return nil, err
3582 }
3583 defer googleapi.CloseBody(res)
3584 if err := googleapi.CheckResponse(res); err != nil {
3585 return nil, err
3586 }
3587 ret := &Bucket{
3588 ServerResponse: googleapi.ServerResponse{
3589 Header: res.Header,
3590 HTTPStatusCode: res.StatusCode,
3591 },
3592 }
3593 target := &ret
3594 if err := gensupport.DecodeResponse(target, res); err != nil {
3595 return nil, err
3596 }
3597 return ret, nil
3598 // {
3599 // "description": "Creates a new bucket.",
3600 // "httpMethod": "POST",
3601 // "id": "storage.buckets.insert",
3602 // "parameterOrder": [
3603 // "project"
3604 // ],
3605 // "parameters": {
3606 // "predefinedAcl": {
3607 // "description": "Apply a predefined set of access controls to this bucket.",
3608 // "enum": [
3609 // "authenticatedRead",
3610 // "private",
3611 // "projectPrivate",
3612 // "publicRead",
3613 // "publicReadWrite"
3614 // ],
3615 // "enumDescriptions": [
3616 // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
3617 // "Project team owners get OWNER access.",
3618 // "Project team members get access according to their roles.",
3619 // "Project team owners get OWNER access, and allUsers get READER access.",
3620 // "Project team owners get OWNER access, and allUsers get WRITER access."
3621 // ],
3622 // "location": "query",
3623 // "type": "string"
3624 // },
3625 // "predefinedDefaultObjectAcl": {
3626 // "description": "Apply a predefined set of default object access controls to this bucket.",
3627 // "enum": [
3628 // "authenticatedRead",
3629 // "bucketOwnerFullControl",
3630 // "bucketOwnerRead",
3631 // "private",
3632 // "projectPrivate",
3633 // "publicRead"
3634 // ],
3635 // "enumDescriptions": [
3636 // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
3637 // "Object owner gets OWNER access, and project team owners get OWNER access.",
3638 // "Object owner gets OWNER access, and project team owners get READER access.",
3639 // "Object owner gets OWNER access.",
3640 // "Object owner gets OWNER access, and project team members get access according to their roles.",
3641 // "Object owner gets OWNER access, and allUsers get READER access."
3642 // ],
3643 // "location": "query",
3644 // "type": "string"
3645 // },
3646 // "project": {
3647 // "description": "A valid API project identifier.",
3648 // "location": "query",
3649 // "required": true,
3650 // "type": "string"
3651 // },
3652 // "projection": {
3653 // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.",
3654 // "enum": [
3655 // "full",
3656 // "noAcl"
3657 // ],
3658 // "enumDescriptions": [
3659 // "Include all properties.",
3660 // "Omit owner, acl and defaultObjectAcl properties."
3661 // ],
3662 // "location": "query",
3663 // "type": "string"
3664 // },
3665 // "userProject": {
3666 // "description": "The project to be billed for this request.",
3667 // "location": "query",
3668 // "type": "string"
3669 // }
3670 // },
3671 // "path": "b",
3672 // "request": {
3673 // "$ref": "Bucket"
3674 // },
3675 // "response": {
3676 // "$ref": "Bucket"
3677 // },
3678 // "scopes": [
3679 // "https://www.googleapis.com/auth/cloud-platform",
3680 // "https://www.googleapis.com/auth/devstorage.full_control",
3681 // "https://www.googleapis.com/auth/devstorage.read_write"
3682 // ]
3683 // }
3684
3685}
3686
3687// method id "storage.buckets.list":
3688
3689type BucketsListCall struct {
3690 s *Service
3691 urlParams_ gensupport.URLParams
3692 ifNoneMatch_ string
3693 ctx_ context.Context
3694 header_ http.Header
3695}
3696
3697// List: Retrieves a list of buckets for a given project.
3698func (r *BucketsService) List(projectid string) *BucketsListCall {
3699 c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
3700 c.urlParams_.Set("project", projectid)
3701 return c
3702}
3703
3704// MaxResults sets the optional parameter "maxResults": Maximum number
3705// of buckets to return in a single response. The service will use this
3706// parameter or 1,000 items, whichever is smaller.
3707func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall {
3708 c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
3709 return c
3710}
3711
3712// PageToken sets the optional parameter "pageToken": A
3713// previously-returned page token representing part of the larger set of
3714// results to view.
3715func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall {
3716 c.urlParams_.Set("pageToken", pageToken)
3717 return c
3718}
3719
3720// Prefix sets the optional parameter "prefix": Filter results to
3721// buckets whose names begin with this prefix.
3722func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall {
3723 c.urlParams_.Set("prefix", prefix)
3724 return c
3725}
3726
3727// Projection sets the optional parameter "projection": Set of
3728// properties to return. Defaults to noAcl.
3729//
3730// Possible values:
3731// "full" - Include all properties.
3732// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
3733func (c *BucketsListCall) Projection(projection string) *BucketsListCall {
3734 c.urlParams_.Set("projection", projection)
3735 return c
3736}
3737
3738// UserProject sets the optional parameter "userProject": The project to
3739// be billed for this request.
3740func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall {
3741 c.urlParams_.Set("userProject", userProject)
3742 return c
3743}
3744
3745// Fields allows partial responses to be retrieved. See
3746// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
3747// for more information.
3748func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall {
3749 c.urlParams_.Set("fields", googleapi.CombineFields(s))
3750 return c
3751}
3752
3753// IfNoneMatch sets the optional parameter which makes the operation
3754// fail if the object's ETag matches the given value. This is useful for
3755// getting updates only after the object has changed since the last
3756// request. Use googleapi.IsNotModified to check whether the response
3757// error from Do is the result of In-None-Match.
3758func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall {
3759 c.ifNoneMatch_ = entityTag
3760 return c
3761}
3762
3763// Context sets the context to be used in this call's Do method. Any
3764// pending HTTP request will be aborted if the provided context is
3765// canceled.
3766func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall {
3767 c.ctx_ = ctx
3768 return c
3769}
3770
3771// Header returns an http.Header that can be modified by the caller to
3772// add HTTP headers to the request.
3773func (c *BucketsListCall) Header() http.Header {
3774 if c.header_ == nil {
3775 c.header_ = make(http.Header)
3776 }
3777 return c.header_
3778}
3779
3780func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
3781 reqHeaders := make(http.Header)
3782 for k, v := range c.header_ {
3783 reqHeaders[k] = v
3784 }
3785 reqHeaders.Set("User-Agent", c.s.userAgent())
3786 if c.ifNoneMatch_ != "" {
3787 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
3788 }
3789 var body io.Reader = nil
3790 c.urlParams_.Set("alt", alt)
3791 c.urlParams_.Set("prettyPrint", "false")
3792 urls := googleapi.ResolveRelative(c.s.BasePath, "b")
3793 urls += "?" + c.urlParams_.Encode()
3794 req, err := http.NewRequest("GET", urls, body)
3795 if err != nil {
3796 return nil, err
3797 }
3798 req.Header = reqHeaders
3799 return gensupport.SendRequest(c.ctx_, c.s.client, req)
3800}
3801
3802// Do executes the "storage.buckets.list" call.
3803// Exactly one of *Buckets or error will be non-nil. Any non-2xx status
3804// code is an error. Response headers are in either
3805// *Buckets.ServerResponse.Header or (if a response was returned at all)
3806// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
3807// check whether the returned error was because http.StatusNotModified
3808// was returned.
3809func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) {
3810 gensupport.SetOptions(c.urlParams_, opts...)
3811 res, err := c.doRequest("json")
3812 if res != nil && res.StatusCode == http.StatusNotModified {
3813 if res.Body != nil {
3814 res.Body.Close()
3815 }
3816 return nil, &googleapi.Error{
3817 Code: res.StatusCode,
3818 Header: res.Header,
3819 }
3820 }
3821 if err != nil {
3822 return nil, err
3823 }
3824 defer googleapi.CloseBody(res)
3825 if err := googleapi.CheckResponse(res); err != nil {
3826 return nil, err
3827 }
3828 ret := &Buckets{
3829 ServerResponse: googleapi.ServerResponse{
3830 Header: res.Header,
3831 HTTPStatusCode: res.StatusCode,
3832 },
3833 }
3834 target := &ret
3835 if err := gensupport.DecodeResponse(target, res); err != nil {
3836 return nil, err
3837 }
3838 return ret, nil
3839 // {
3840 // "description": "Retrieves a list of buckets for a given project.",
3841 // "httpMethod": "GET",
3842 // "id": "storage.buckets.list",
3843 // "parameterOrder": [
3844 // "project"
3845 // ],
3846 // "parameters": {
3847 // "maxResults": {
3848 // "default": "1000",
3849 // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.",
3850 // "format": "uint32",
3851 // "location": "query",
3852 // "minimum": "0",
3853 // "type": "integer"
3854 // },
3855 // "pageToken": {
3856 // "description": "A previously-returned page token representing part of the larger set of results to view.",
3857 // "location": "query",
3858 // "type": "string"
3859 // },
3860 // "prefix": {
3861 // "description": "Filter results to buckets whose names begin with this prefix.",
3862 // "location": "query",
3863 // "type": "string"
3864 // },
3865 // "project": {
3866 // "description": "A valid API project identifier.",
3867 // "location": "query",
3868 // "required": true,
3869 // "type": "string"
3870 // },
3871 // "projection": {
3872 // "description": "Set of properties to return. Defaults to noAcl.",
3873 // "enum": [
3874 // "full",
3875 // "noAcl"
3876 // ],
3877 // "enumDescriptions": [
3878 // "Include all properties.",
3879 // "Omit owner, acl and defaultObjectAcl properties."
3880 // ],
3881 // "location": "query",
3882 // "type": "string"
3883 // },
3884 // "userProject": {
3885 // "description": "The project to be billed for this request.",
3886 // "location": "query",
3887 // "type": "string"
3888 // }
3889 // },
3890 // "path": "b",
3891 // "response": {
3892 // "$ref": "Buckets"
3893 // },
3894 // "scopes": [
3895 // "https://www.googleapis.com/auth/cloud-platform",
3896 // "https://www.googleapis.com/auth/cloud-platform.read-only",
3897 // "https://www.googleapis.com/auth/devstorage.full_control",
3898 // "https://www.googleapis.com/auth/devstorage.read_only",
3899 // "https://www.googleapis.com/auth/devstorage.read_write"
3900 // ]
3901 // }
3902
3903}
3904
3905// Pages invokes f for each page of results.
3906// A non-nil error returned from f will halt the iteration.
3907// The provided context supersedes any context provided to the Context method.
3908func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error {
3909 c.ctx_ = ctx
3910 defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
3911 for {
3912 x, err := c.Do()
3913 if err != nil {
3914 return err
3915 }
3916 if err := f(x); err != nil {
3917 return err
3918 }
3919 if x.NextPageToken == "" {
3920 return nil
3921 }
3922 c.PageToken(x.NextPageToken)
3923 }
3924}
3925
3926// method id "storage.buckets.lockRetentionPolicy":
3927
3928type BucketsLockRetentionPolicyCall struct {
3929 s *Service
3930 bucket string
3931 urlParams_ gensupport.URLParams
3932 ctx_ context.Context
3933 header_ http.Header
3934}
3935
3936// LockRetentionPolicy: Locks retention policy on a bucket.
3937func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall {
3938 c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
3939 c.bucket = bucket
3940 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
3941 return c
3942}
3943
3944// UserProject sets the optional parameter "userProject": The project to
3945// be billed for this request. Required for Requester Pays buckets.
3946func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall {
3947 c.urlParams_.Set("userProject", userProject)
3948 return c
3949}
3950
3951// Fields allows partial responses to be retrieved. See
3952// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
3953// for more information.
3954func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall {
3955 c.urlParams_.Set("fields", googleapi.CombineFields(s))
3956 return c
3957}
3958
3959// Context sets the context to be used in this call's Do method. Any
3960// pending HTTP request will be aborted if the provided context is
3961// canceled.
3962func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall {
3963 c.ctx_ = ctx
3964 return c
3965}
3966
3967// Header returns an http.Header that can be modified by the caller to
3968// add HTTP headers to the request.
3969func (c *BucketsLockRetentionPolicyCall) Header() http.Header {
3970 if c.header_ == nil {
3971 c.header_ = make(http.Header)
3972 }
3973 return c.header_
3974}
3975
3976func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) {
3977 reqHeaders := make(http.Header)
3978 for k, v := range c.header_ {
3979 reqHeaders[k] = v
3980 }
3981 reqHeaders.Set("User-Agent", c.s.userAgent())
3982 var body io.Reader = nil
3983 c.urlParams_.Set("alt", alt)
3984 c.urlParams_.Set("prettyPrint", "false")
3985 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy")
3986 urls += "?" + c.urlParams_.Encode()
3987 req, err := http.NewRequest("POST", urls, body)
3988 if err != nil {
3989 return nil, err
3990 }
3991 req.Header = reqHeaders
3992 googleapi.Expand(req.URL, map[string]string{
3993 "bucket": c.bucket,
3994 })
3995 return gensupport.SendRequest(c.ctx_, c.s.client, req)
3996}
3997
3998// Do executes the "storage.buckets.lockRetentionPolicy" call.
3999// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
4000// code is an error. Response headers are in either
4001// *Bucket.ServerResponse.Header or (if a response was returned at all)
4002// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
4003// check whether the returned error was because http.StatusNotModified
4004// was returned.
4005func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
4006 gensupport.SetOptions(c.urlParams_, opts...)
4007 res, err := c.doRequest("json")
4008 if res != nil && res.StatusCode == http.StatusNotModified {
4009 if res.Body != nil {
4010 res.Body.Close()
4011 }
4012 return nil, &googleapi.Error{
4013 Code: res.StatusCode,
4014 Header: res.Header,
4015 }
4016 }
4017 if err != nil {
4018 return nil, err
4019 }
4020 defer googleapi.CloseBody(res)
4021 if err := googleapi.CheckResponse(res); err != nil {
4022 return nil, err
4023 }
4024 ret := &Bucket{
4025 ServerResponse: googleapi.ServerResponse{
4026 Header: res.Header,
4027 HTTPStatusCode: res.StatusCode,
4028 },
4029 }
4030 target := &ret
4031 if err := gensupport.DecodeResponse(target, res); err != nil {
4032 return nil, err
4033 }
4034 return ret, nil
4035 // {
4036 // "description": "Locks retention policy on a bucket.",
4037 // "httpMethod": "POST",
4038 // "id": "storage.buckets.lockRetentionPolicy",
4039 // "parameterOrder": [
4040 // "bucket",
4041 // "ifMetagenerationMatch"
4042 // ],
4043 // "parameters": {
4044 // "bucket": {
4045 // "description": "Name of a bucket.",
4046 // "location": "path",
4047 // "required": true,
4048 // "type": "string"
4049 // },
4050 // "ifMetagenerationMatch": {
4051 // "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.",
4052 // "format": "int64",
4053 // "location": "query",
4054 // "required": true,
4055 // "type": "string"
4056 // },
4057 // "userProject": {
4058 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
4059 // "location": "query",
4060 // "type": "string"
4061 // }
4062 // },
4063 // "path": "b/{bucket}/lockRetentionPolicy",
4064 // "response": {
4065 // "$ref": "Bucket"
4066 // },
4067 // "scopes": [
4068 // "https://www.googleapis.com/auth/cloud-platform",
4069 // "https://www.googleapis.com/auth/devstorage.full_control",
4070 // "https://www.googleapis.com/auth/devstorage.read_write"
4071 // ]
4072 // }
4073
4074}
4075
4076// method id "storage.buckets.patch":
4077
4078type BucketsPatchCall struct {
4079 s *Service
4080 bucket string
4081 bucket2 *Bucket
4082 urlParams_ gensupport.URLParams
4083 ctx_ context.Context
4084 header_ http.Header
4085}
4086
4087// Patch: Patches a bucket. Changes to the bucket will be readable
4088// immediately after writing, but configuration changes may take time to
4089// propagate.
4090func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall {
4091 c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
4092 c.bucket = bucket
4093 c.bucket2 = bucket2
4094 return c
4095}
4096
4097// IfMetagenerationMatch sets the optional parameter
4098// "ifMetagenerationMatch": Makes the return of the bucket metadata
4099// conditional on whether the bucket's current metageneration matches
4100// the given value.
4101func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall {
4102 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
4103 return c
4104}
4105
4106// IfMetagenerationNotMatch sets the optional parameter
4107// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
4108// conditional on whether the bucket's current metageneration does not
4109// match the given value.
4110func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall {
4111 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
4112 return c
4113}
4114
4115// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
4116// predefined set of access controls to this bucket.
4117//
4118// Possible values:
4119// "authenticatedRead" - Project team owners get OWNER access, and
4120// allAuthenticatedUsers get READER access.
4121// "private" - Project team owners get OWNER access.
4122// "projectPrivate" - Project team members get access according to
4123// their roles.
4124// "publicRead" - Project team owners get OWNER access, and allUsers
4125// get READER access.
4126// "publicReadWrite" - Project team owners get OWNER access, and
4127// allUsers get WRITER access.
4128func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall {
4129 c.urlParams_.Set("predefinedAcl", predefinedAcl)
4130 return c
4131}
4132
4133// PredefinedDefaultObjectAcl sets the optional parameter
4134// "predefinedDefaultObjectAcl": Apply a predefined set of default
4135// object access controls to this bucket.
4136//
4137// Possible values:
4138// "authenticatedRead" - Object owner gets OWNER access, and
4139// allAuthenticatedUsers get READER access.
4140// "bucketOwnerFullControl" - Object owner gets OWNER access, and
4141// project team owners get OWNER access.
4142// "bucketOwnerRead" - Object owner gets OWNER access, and project
4143// team owners get READER access.
4144// "private" - Object owner gets OWNER access.
4145// "projectPrivate" - Object owner gets OWNER access, and project team
4146// members get access according to their roles.
4147// "publicRead" - Object owner gets OWNER access, and allUsers get
4148// READER access.
4149func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall {
4150 c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
4151 return c
4152}
4153
4154// Projection sets the optional parameter "projection": Set of
4155// properties to return. Defaults to full.
4156//
4157// Possible values:
4158// "full" - Include all properties.
4159// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
4160func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall {
4161 c.urlParams_.Set("projection", projection)
4162 return c
4163}
4164
4165// UserProject sets the optional parameter "userProject": The project to
4166// be billed for this request. Required for Requester Pays buckets.
4167func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall {
4168 c.urlParams_.Set("userProject", userProject)
4169 return c
4170}
4171
4172// Fields allows partial responses to be retrieved. See
4173// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
4174// for more information.
4175func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall {
4176 c.urlParams_.Set("fields", googleapi.CombineFields(s))
4177 return c
4178}
4179
4180// Context sets the context to be used in this call's Do method. Any
4181// pending HTTP request will be aborted if the provided context is
4182// canceled.
4183func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall {
4184 c.ctx_ = ctx
4185 return c
4186}
4187
4188// Header returns an http.Header that can be modified by the caller to
4189// add HTTP headers to the request.
4190func (c *BucketsPatchCall) Header() http.Header {
4191 if c.header_ == nil {
4192 c.header_ = make(http.Header)
4193 }
4194 return c.header_
4195}
4196
4197func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
4198 reqHeaders := make(http.Header)
4199 for k, v := range c.header_ {
4200 reqHeaders[k] = v
4201 }
4202 reqHeaders.Set("User-Agent", c.s.userAgent())
4203 var body io.Reader = nil
4204 body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
4205 if err != nil {
4206 return nil, err
4207 }
4208 reqHeaders.Set("Content-Type", "application/json")
4209 c.urlParams_.Set("alt", alt)
4210 c.urlParams_.Set("prettyPrint", "false")
4211 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
4212 urls += "?" + c.urlParams_.Encode()
4213 req, err := http.NewRequest("PATCH", urls, body)
4214 if err != nil {
4215 return nil, err
4216 }
4217 req.Header = reqHeaders
4218 googleapi.Expand(req.URL, map[string]string{
4219 "bucket": c.bucket,
4220 })
4221 return gensupport.SendRequest(c.ctx_, c.s.client, req)
4222}
4223
4224// Do executes the "storage.buckets.patch" call.
4225// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
4226// code is an error. Response headers are in either
4227// *Bucket.ServerResponse.Header or (if a response was returned at all)
4228// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
4229// check whether the returned error was because http.StatusNotModified
4230// was returned.
4231func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
4232 gensupport.SetOptions(c.urlParams_, opts...)
4233 res, err := c.doRequest("json")
4234 if res != nil && res.StatusCode == http.StatusNotModified {
4235 if res.Body != nil {
4236 res.Body.Close()
4237 }
4238 return nil, &googleapi.Error{
4239 Code: res.StatusCode,
4240 Header: res.Header,
4241 }
4242 }
4243 if err != nil {
4244 return nil, err
4245 }
4246 defer googleapi.CloseBody(res)
4247 if err := googleapi.CheckResponse(res); err != nil {
4248 return nil, err
4249 }
4250 ret := &Bucket{
4251 ServerResponse: googleapi.ServerResponse{
4252 Header: res.Header,
4253 HTTPStatusCode: res.StatusCode,
4254 },
4255 }
4256 target := &ret
4257 if err := gensupport.DecodeResponse(target, res); err != nil {
4258 return nil, err
4259 }
4260 return ret, nil
4261 // {
4262 // "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.",
4263 // "httpMethod": "PATCH",
4264 // "id": "storage.buckets.patch",
4265 // "parameterOrder": [
4266 // "bucket"
4267 // ],
4268 // "parameters": {
4269 // "bucket": {
4270 // "description": "Name of a bucket.",
4271 // "location": "path",
4272 // "required": true,
4273 // "type": "string"
4274 // },
4275 // "ifMetagenerationMatch": {
4276 // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
4277 // "format": "int64",
4278 // "location": "query",
4279 // "type": "string"
4280 // },
4281 // "ifMetagenerationNotMatch": {
4282 // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
4283 // "format": "int64",
4284 // "location": "query",
4285 // "type": "string"
4286 // },
4287 // "predefinedAcl": {
4288 // "description": "Apply a predefined set of access controls to this bucket.",
4289 // "enum": [
4290 // "authenticatedRead",
4291 // "private",
4292 // "projectPrivate",
4293 // "publicRead",
4294 // "publicReadWrite"
4295 // ],
4296 // "enumDescriptions": [
4297 // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
4298 // "Project team owners get OWNER access.",
4299 // "Project team members get access according to their roles.",
4300 // "Project team owners get OWNER access, and allUsers get READER access.",
4301 // "Project team owners get OWNER access, and allUsers get WRITER access."
4302 // ],
4303 // "location": "query",
4304 // "type": "string"
4305 // },
4306 // "predefinedDefaultObjectAcl": {
4307 // "description": "Apply a predefined set of default object access controls to this bucket.",
4308 // "enum": [
4309 // "authenticatedRead",
4310 // "bucketOwnerFullControl",
4311 // "bucketOwnerRead",
4312 // "private",
4313 // "projectPrivate",
4314 // "publicRead"
4315 // ],
4316 // "enumDescriptions": [
4317 // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
4318 // "Object owner gets OWNER access, and project team owners get OWNER access.",
4319 // "Object owner gets OWNER access, and project team owners get READER access.",
4320 // "Object owner gets OWNER access.",
4321 // "Object owner gets OWNER access, and project team members get access according to their roles.",
4322 // "Object owner gets OWNER access, and allUsers get READER access."
4323 // ],
4324 // "location": "query",
4325 // "type": "string"
4326 // },
4327 // "projection": {
4328 // "description": "Set of properties to return. Defaults to full.",
4329 // "enum": [
4330 // "full",
4331 // "noAcl"
4332 // ],
4333 // "enumDescriptions": [
4334 // "Include all properties.",
4335 // "Omit owner, acl and defaultObjectAcl properties."
4336 // ],
4337 // "location": "query",
4338 // "type": "string"
4339 // },
4340 // "userProject": {
4341 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
4342 // "location": "query",
4343 // "type": "string"
4344 // }
4345 // },
4346 // "path": "b/{bucket}",
4347 // "request": {
4348 // "$ref": "Bucket"
4349 // },
4350 // "response": {
4351 // "$ref": "Bucket"
4352 // },
4353 // "scopes": [
4354 // "https://www.googleapis.com/auth/cloud-platform",
4355 // "https://www.googleapis.com/auth/devstorage.full_control"
4356 // ]
4357 // }
4358
4359}
4360
4361// method id "storage.buckets.setIamPolicy":
4362
4363type BucketsSetIamPolicyCall struct {
4364 s *Service
4365 bucket string
4366 policy *Policy
4367 urlParams_ gensupport.URLParams
4368 ctx_ context.Context
4369 header_ http.Header
4370}
4371
4372// SetIamPolicy: Updates an IAM policy for the specified bucket.
4373func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall {
4374 c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
4375 c.bucket = bucket
4376 c.policy = policy
4377 return c
4378}
4379
4380// UserProject sets the optional parameter "userProject": The project to
4381// be billed for this request. Required for Requester Pays buckets.
4382func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall {
4383 c.urlParams_.Set("userProject", userProject)
4384 return c
4385}
4386
4387// Fields allows partial responses to be retrieved. See
4388// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
4389// for more information.
4390func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall {
4391 c.urlParams_.Set("fields", googleapi.CombineFields(s))
4392 return c
4393}
4394
4395// Context sets the context to be used in this call's Do method. Any
4396// pending HTTP request will be aborted if the provided context is
4397// canceled.
4398func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall {
4399 c.ctx_ = ctx
4400 return c
4401}
4402
4403// Header returns an http.Header that can be modified by the caller to
4404// add HTTP headers to the request.
4405func (c *BucketsSetIamPolicyCall) Header() http.Header {
4406 if c.header_ == nil {
4407 c.header_ = make(http.Header)
4408 }
4409 return c.header_
4410}
4411
4412func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
4413 reqHeaders := make(http.Header)
4414 for k, v := range c.header_ {
4415 reqHeaders[k] = v
4416 }
4417 reqHeaders.Set("User-Agent", c.s.userAgent())
4418 var body io.Reader = nil
4419 body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy)
4420 if err != nil {
4421 return nil, err
4422 }
4423 reqHeaders.Set("Content-Type", "application/json")
4424 c.urlParams_.Set("alt", alt)
4425 c.urlParams_.Set("prettyPrint", "false")
4426 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam")
4427 urls += "?" + c.urlParams_.Encode()
4428 req, err := http.NewRequest("PUT", urls, body)
4429 if err != nil {
4430 return nil, err
4431 }
4432 req.Header = reqHeaders
4433 googleapi.Expand(req.URL, map[string]string{
4434 "bucket": c.bucket,
4435 })
4436 return gensupport.SendRequest(c.ctx_, c.s.client, req)
4437}
4438
4439// Do executes the "storage.buckets.setIamPolicy" call.
4440// Exactly one of *Policy or error will be non-nil. Any non-2xx status
4441// code is an error. Response headers are in either
4442// *Policy.ServerResponse.Header or (if a response was returned at all)
4443// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
4444// check whether the returned error was because http.StatusNotModified
4445// was returned.
4446func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
4447 gensupport.SetOptions(c.urlParams_, opts...)
4448 res, err := c.doRequest("json")
4449 if res != nil && res.StatusCode == http.StatusNotModified {
4450 if res.Body != nil {
4451 res.Body.Close()
4452 }
4453 return nil, &googleapi.Error{
4454 Code: res.StatusCode,
4455 Header: res.Header,
4456 }
4457 }
4458 if err != nil {
4459 return nil, err
4460 }
4461 defer googleapi.CloseBody(res)
4462 if err := googleapi.CheckResponse(res); err != nil {
4463 return nil, err
4464 }
4465 ret := &Policy{
4466 ServerResponse: googleapi.ServerResponse{
4467 Header: res.Header,
4468 HTTPStatusCode: res.StatusCode,
4469 },
4470 }
4471 target := &ret
4472 if err := gensupport.DecodeResponse(target, res); err != nil {
4473 return nil, err
4474 }
4475 return ret, nil
4476 // {
4477 // "description": "Updates an IAM policy for the specified bucket.",
4478 // "httpMethod": "PUT",
4479 // "id": "storage.buckets.setIamPolicy",
4480 // "parameterOrder": [
4481 // "bucket"
4482 // ],
4483 // "parameters": {
4484 // "bucket": {
4485 // "description": "Name of a bucket.",
4486 // "location": "path",
4487 // "required": true,
4488 // "type": "string"
4489 // },
4490 // "userProject": {
4491 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
4492 // "location": "query",
4493 // "type": "string"
4494 // }
4495 // },
4496 // "path": "b/{bucket}/iam",
4497 // "request": {
4498 // "$ref": "Policy"
4499 // },
4500 // "response": {
4501 // "$ref": "Policy"
4502 // },
4503 // "scopes": [
4504 // "https://www.googleapis.com/auth/cloud-platform",
4505 // "https://www.googleapis.com/auth/devstorage.full_control",
4506 // "https://www.googleapis.com/auth/devstorage.read_write"
4507 // ]
4508 // }
4509
4510}
4511
4512// method id "storage.buckets.testIamPermissions":
4513
4514type BucketsTestIamPermissionsCall struct {
4515 s *Service
4516 bucket string
4517 urlParams_ gensupport.URLParams
4518 ifNoneMatch_ string
4519 ctx_ context.Context
4520 header_ http.Header
4521}
4522
4523// TestIamPermissions: Tests a set of permissions on the given bucket to
4524// see which, if any, are held by the caller.
4525func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall {
4526 c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
4527 c.bucket = bucket
4528 c.urlParams_.SetMulti("permissions", append([]string{}, permissions...))
4529 return c
4530}
4531
4532// UserProject sets the optional parameter "userProject": The project to
4533// be billed for this request. Required for Requester Pays buckets.
4534func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall {
4535 c.urlParams_.Set("userProject", userProject)
4536 return c
4537}
4538
4539// Fields allows partial responses to be retrieved. See
4540// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
4541// for more information.
4542func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall {
4543 c.urlParams_.Set("fields", googleapi.CombineFields(s))
4544 return c
4545}
4546
4547// IfNoneMatch sets the optional parameter which makes the operation
4548// fail if the object's ETag matches the given value. This is useful for
4549// getting updates only after the object has changed since the last
4550// request. Use googleapi.IsNotModified to check whether the response
4551// error from Do is the result of In-None-Match.
4552func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall {
4553 c.ifNoneMatch_ = entityTag
4554 return c
4555}
4556
4557// Context sets the context to be used in this call's Do method. Any
4558// pending HTTP request will be aborted if the provided context is
4559// canceled.
4560func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall {
4561 c.ctx_ = ctx
4562 return c
4563}
4564
4565// Header returns an http.Header that can be modified by the caller to
4566// add HTTP headers to the request.
4567func (c *BucketsTestIamPermissionsCall) Header() http.Header {
4568 if c.header_ == nil {
4569 c.header_ = make(http.Header)
4570 }
4571 return c.header_
4572}
4573
4574func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
4575 reqHeaders := make(http.Header)
4576 for k, v := range c.header_ {
4577 reqHeaders[k] = v
4578 }
4579 reqHeaders.Set("User-Agent", c.s.userAgent())
4580 if c.ifNoneMatch_ != "" {
4581 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
4582 }
4583 var body io.Reader = nil
4584 c.urlParams_.Set("alt", alt)
4585 c.urlParams_.Set("prettyPrint", "false")
4586 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions")
4587 urls += "?" + c.urlParams_.Encode()
4588 req, err := http.NewRequest("GET", urls, body)
4589 if err != nil {
4590 return nil, err
4591 }
4592 req.Header = reqHeaders
4593 googleapi.Expand(req.URL, map[string]string{
4594 "bucket": c.bucket,
4595 })
4596 return gensupport.SendRequest(c.ctx_, c.s.client, req)
4597}
4598
4599// Do executes the "storage.buckets.testIamPermissions" call.
4600// Exactly one of *TestIamPermissionsResponse or error will be non-nil.
4601// Any non-2xx status code is an error. Response headers are in either
4602// *TestIamPermissionsResponse.ServerResponse.Header or (if a response
4603// was returned at all) in error.(*googleapi.Error).Header. Use
4604// googleapi.IsNotModified to check whether the returned error was
4605// because http.StatusNotModified was returned.
4606func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) {
4607 gensupport.SetOptions(c.urlParams_, opts...)
4608 res, err := c.doRequest("json")
4609 if res != nil && res.StatusCode == http.StatusNotModified {
4610 if res.Body != nil {
4611 res.Body.Close()
4612 }
4613 return nil, &googleapi.Error{
4614 Code: res.StatusCode,
4615 Header: res.Header,
4616 }
4617 }
4618 if err != nil {
4619 return nil, err
4620 }
4621 defer googleapi.CloseBody(res)
4622 if err := googleapi.CheckResponse(res); err != nil {
4623 return nil, err
4624 }
4625 ret := &TestIamPermissionsResponse{
4626 ServerResponse: googleapi.ServerResponse{
4627 Header: res.Header,
4628 HTTPStatusCode: res.StatusCode,
4629 },
4630 }
4631 target := &ret
4632 if err := gensupport.DecodeResponse(target, res); err != nil {
4633 return nil, err
4634 }
4635 return ret, nil
4636 // {
4637 // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.",
4638 // "httpMethod": "GET",
4639 // "id": "storage.buckets.testIamPermissions",
4640 // "parameterOrder": [
4641 // "bucket",
4642 // "permissions"
4643 // ],
4644 // "parameters": {
4645 // "bucket": {
4646 // "description": "Name of a bucket.",
4647 // "location": "path",
4648 // "required": true,
4649 // "type": "string"
4650 // },
4651 // "permissions": {
4652 // "description": "Permissions to test.",
4653 // "location": "query",
4654 // "repeated": true,
4655 // "required": true,
4656 // "type": "string"
4657 // },
4658 // "userProject": {
4659 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
4660 // "location": "query",
4661 // "type": "string"
4662 // }
4663 // },
4664 // "path": "b/{bucket}/iam/testPermissions",
4665 // "response": {
4666 // "$ref": "TestIamPermissionsResponse"
4667 // },
4668 // "scopes": [
4669 // "https://www.googleapis.com/auth/cloud-platform",
4670 // "https://www.googleapis.com/auth/cloud-platform.read-only",
4671 // "https://www.googleapis.com/auth/devstorage.full_control",
4672 // "https://www.googleapis.com/auth/devstorage.read_only",
4673 // "https://www.googleapis.com/auth/devstorage.read_write"
4674 // ]
4675 // }
4676
4677}
4678
4679// method id "storage.buckets.update":
4680
4681type BucketsUpdateCall struct {
4682 s *Service
4683 bucket string
4684 bucket2 *Bucket
4685 urlParams_ gensupport.URLParams
4686 ctx_ context.Context
4687 header_ http.Header
4688}
4689
4690// Update: Updates a bucket. Changes to the bucket will be readable
4691// immediately after writing, but configuration changes may take time to
4692// propagate.
4693func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall {
4694 c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
4695 c.bucket = bucket
4696 c.bucket2 = bucket2
4697 return c
4698}
4699
4700// IfMetagenerationMatch sets the optional parameter
4701// "ifMetagenerationMatch": Makes the return of the bucket metadata
4702// conditional on whether the bucket's current metageneration matches
4703// the given value.
4704func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall {
4705 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
4706 return c
4707}
4708
4709// IfMetagenerationNotMatch sets the optional parameter
4710// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
4711// conditional on whether the bucket's current metageneration does not
4712// match the given value.
4713func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall {
4714 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
4715 return c
4716}
4717
4718// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
4719// predefined set of access controls to this bucket.
4720//
4721// Possible values:
4722// "authenticatedRead" - Project team owners get OWNER access, and
4723// allAuthenticatedUsers get READER access.
4724// "private" - Project team owners get OWNER access.
4725// "projectPrivate" - Project team members get access according to
4726// their roles.
4727// "publicRead" - Project team owners get OWNER access, and allUsers
4728// get READER access.
4729// "publicReadWrite" - Project team owners get OWNER access, and
4730// allUsers get WRITER access.
4731func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall {
4732 c.urlParams_.Set("predefinedAcl", predefinedAcl)
4733 return c
4734}
4735
4736// PredefinedDefaultObjectAcl sets the optional parameter
4737// "predefinedDefaultObjectAcl": Apply a predefined set of default
4738// object access controls to this bucket.
4739//
4740// Possible values:
4741// "authenticatedRead" - Object owner gets OWNER access, and
4742// allAuthenticatedUsers get READER access.
4743// "bucketOwnerFullControl" - Object owner gets OWNER access, and
4744// project team owners get OWNER access.
4745// "bucketOwnerRead" - Object owner gets OWNER access, and project
4746// team owners get READER access.
4747// "private" - Object owner gets OWNER access.
4748// "projectPrivate" - Object owner gets OWNER access, and project team
4749// members get access according to their roles.
4750// "publicRead" - Object owner gets OWNER access, and allUsers get
4751// READER access.
4752func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall {
4753 c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
4754 return c
4755}
4756
4757// Projection sets the optional parameter "projection": Set of
4758// properties to return. Defaults to full.
4759//
4760// Possible values:
4761// "full" - Include all properties.
4762// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
4763func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall {
4764 c.urlParams_.Set("projection", projection)
4765 return c
4766}
4767
4768// UserProject sets the optional parameter "userProject": The project to
4769// be billed for this request. Required for Requester Pays buckets.
4770func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall {
4771 c.urlParams_.Set("userProject", userProject)
4772 return c
4773}
4774
4775// Fields allows partial responses to be retrieved. See
4776// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
4777// for more information.
4778func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall {
4779 c.urlParams_.Set("fields", googleapi.CombineFields(s))
4780 return c
4781}
4782
4783// Context sets the context to be used in this call's Do method. Any
4784// pending HTTP request will be aborted if the provided context is
4785// canceled.
4786func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall {
4787 c.ctx_ = ctx
4788 return c
4789}
4790
4791// Header returns an http.Header that can be modified by the caller to
4792// add HTTP headers to the request.
4793func (c *BucketsUpdateCall) Header() http.Header {
4794 if c.header_ == nil {
4795 c.header_ = make(http.Header)
4796 }
4797 return c.header_
4798}
4799
4800func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
4801 reqHeaders := make(http.Header)
4802 for k, v := range c.header_ {
4803 reqHeaders[k] = v
4804 }
4805 reqHeaders.Set("User-Agent", c.s.userAgent())
4806 var body io.Reader = nil
4807 body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
4808 if err != nil {
4809 return nil, err
4810 }
4811 reqHeaders.Set("Content-Type", "application/json")
4812 c.urlParams_.Set("alt", alt)
4813 c.urlParams_.Set("prettyPrint", "false")
4814 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
4815 urls += "?" + c.urlParams_.Encode()
4816 req, err := http.NewRequest("PUT", urls, body)
4817 if err != nil {
4818 return nil, err
4819 }
4820 req.Header = reqHeaders
4821 googleapi.Expand(req.URL, map[string]string{
4822 "bucket": c.bucket,
4823 })
4824 return gensupport.SendRequest(c.ctx_, c.s.client, req)
4825}
4826
4827// Do executes the "storage.buckets.update" call.
4828// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
4829// code is an error. Response headers are in either
4830// *Bucket.ServerResponse.Header or (if a response was returned at all)
4831// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
4832// check whether the returned error was because http.StatusNotModified
4833// was returned.
4834func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
4835 gensupport.SetOptions(c.urlParams_, opts...)
4836 res, err := c.doRequest("json")
4837 if res != nil && res.StatusCode == http.StatusNotModified {
4838 if res.Body != nil {
4839 res.Body.Close()
4840 }
4841 return nil, &googleapi.Error{
4842 Code: res.StatusCode,
4843 Header: res.Header,
4844 }
4845 }
4846 if err != nil {
4847 return nil, err
4848 }
4849 defer googleapi.CloseBody(res)
4850 if err := googleapi.CheckResponse(res); err != nil {
4851 return nil, err
4852 }
4853 ret := &Bucket{
4854 ServerResponse: googleapi.ServerResponse{
4855 Header: res.Header,
4856 HTTPStatusCode: res.StatusCode,
4857 },
4858 }
4859 target := &ret
4860 if err := gensupport.DecodeResponse(target, res); err != nil {
4861 return nil, err
4862 }
4863 return ret, nil
4864 // {
4865 // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.",
4866 // "httpMethod": "PUT",
4867 // "id": "storage.buckets.update",
4868 // "parameterOrder": [
4869 // "bucket"
4870 // ],
4871 // "parameters": {
4872 // "bucket": {
4873 // "description": "Name of a bucket.",
4874 // "location": "path",
4875 // "required": true,
4876 // "type": "string"
4877 // },
4878 // "ifMetagenerationMatch": {
4879 // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
4880 // "format": "int64",
4881 // "location": "query",
4882 // "type": "string"
4883 // },
4884 // "ifMetagenerationNotMatch": {
4885 // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
4886 // "format": "int64",
4887 // "location": "query",
4888 // "type": "string"
4889 // },
4890 // "predefinedAcl": {
4891 // "description": "Apply a predefined set of access controls to this bucket.",
4892 // "enum": [
4893 // "authenticatedRead",
4894 // "private",
4895 // "projectPrivate",
4896 // "publicRead",
4897 // "publicReadWrite"
4898 // ],
4899 // "enumDescriptions": [
4900 // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
4901 // "Project team owners get OWNER access.",
4902 // "Project team members get access according to their roles.",
4903 // "Project team owners get OWNER access, and allUsers get READER access.",
4904 // "Project team owners get OWNER access, and allUsers get WRITER access."
4905 // ],
4906 // "location": "query",
4907 // "type": "string"
4908 // },
4909 // "predefinedDefaultObjectAcl": {
4910 // "description": "Apply a predefined set of default object access controls to this bucket.",
4911 // "enum": [
4912 // "authenticatedRead",
4913 // "bucketOwnerFullControl",
4914 // "bucketOwnerRead",
4915 // "private",
4916 // "projectPrivate",
4917 // "publicRead"
4918 // ],
4919 // "enumDescriptions": [
4920 // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
4921 // "Object owner gets OWNER access, and project team owners get OWNER access.",
4922 // "Object owner gets OWNER access, and project team owners get READER access.",
4923 // "Object owner gets OWNER access.",
4924 // "Object owner gets OWNER access, and project team members get access according to their roles.",
4925 // "Object owner gets OWNER access, and allUsers get READER access."
4926 // ],
4927 // "location": "query",
4928 // "type": "string"
4929 // },
4930 // "projection": {
4931 // "description": "Set of properties to return. Defaults to full.",
4932 // "enum": [
4933 // "full",
4934 // "noAcl"
4935 // ],
4936 // "enumDescriptions": [
4937 // "Include all properties.",
4938 // "Omit owner, acl and defaultObjectAcl properties."
4939 // ],
4940 // "location": "query",
4941 // "type": "string"
4942 // },
4943 // "userProject": {
4944 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
4945 // "location": "query",
4946 // "type": "string"
4947 // }
4948 // },
4949 // "path": "b/{bucket}",
4950 // "request": {
4951 // "$ref": "Bucket"
4952 // },
4953 // "response": {
4954 // "$ref": "Bucket"
4955 // },
4956 // "scopes": [
4957 // "https://www.googleapis.com/auth/cloud-platform",
4958 // "https://www.googleapis.com/auth/devstorage.full_control"
4959 // ]
4960 // }
4961
4962}
4963
4964// method id "storage.channels.stop":
4965
4966type ChannelsStopCall struct {
4967 s *Service
4968 channel *Channel
4969 urlParams_ gensupport.URLParams
4970 ctx_ context.Context
4971 header_ http.Header
4972}
4973
4974// Stop: Stop watching resources through this channel
4975func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall {
4976 c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)}
4977 c.channel = channel
4978 return c
4979}
4980
4981// Fields allows partial responses to be retrieved. See
4982// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
4983// for more information.
4984func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall {
4985 c.urlParams_.Set("fields", googleapi.CombineFields(s))
4986 return c
4987}
4988
4989// Context sets the context to be used in this call's Do method. Any
4990// pending HTTP request will be aborted if the provided context is
4991// canceled.
4992func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall {
4993 c.ctx_ = ctx
4994 return c
4995}
4996
4997// Header returns an http.Header that can be modified by the caller to
4998// add HTTP headers to the request.
4999func (c *ChannelsStopCall) Header() http.Header {
5000 if c.header_ == nil {
5001 c.header_ = make(http.Header)
5002 }
5003 return c.header_
5004}
5005
5006func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
5007 reqHeaders := make(http.Header)
5008 for k, v := range c.header_ {
5009 reqHeaders[k] = v
5010 }
5011 reqHeaders.Set("User-Agent", c.s.userAgent())
5012 var body io.Reader = nil
5013 body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
5014 if err != nil {
5015 return nil, err
5016 }
5017 reqHeaders.Set("Content-Type", "application/json")
5018 c.urlParams_.Set("alt", alt)
5019 c.urlParams_.Set("prettyPrint", "false")
5020 urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop")
5021 urls += "?" + c.urlParams_.Encode()
5022 req, err := http.NewRequest("POST", urls, body)
5023 if err != nil {
5024 return nil, err
5025 }
5026 req.Header = reqHeaders
5027 return gensupport.SendRequest(c.ctx_, c.s.client, req)
5028}
5029
5030// Do executes the "storage.channels.stop" call.
5031func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error {
5032 gensupport.SetOptions(c.urlParams_, opts...)
5033 res, err := c.doRequest("json")
5034 if err != nil {
5035 return err
5036 }
5037 defer googleapi.CloseBody(res)
5038 if err := googleapi.CheckResponse(res); err != nil {
5039 return err
5040 }
5041 return nil
5042 // {
5043 // "description": "Stop watching resources through this channel",
5044 // "httpMethod": "POST",
5045 // "id": "storage.channels.stop",
5046 // "path": "channels/stop",
5047 // "request": {
5048 // "$ref": "Channel",
5049 // "parameterName": "resource"
5050 // },
5051 // "scopes": [
5052 // "https://www.googleapis.com/auth/cloud-platform",
5053 // "https://www.googleapis.com/auth/cloud-platform.read-only",
5054 // "https://www.googleapis.com/auth/devstorage.full_control",
5055 // "https://www.googleapis.com/auth/devstorage.read_only",
5056 // "https://www.googleapis.com/auth/devstorage.read_write"
5057 // ]
5058 // }
5059
5060}
5061
5062// method id "storage.defaultObjectAccessControls.delete":
5063
5064type DefaultObjectAccessControlsDeleteCall struct {
5065 s *Service
5066 bucket string
5067 entity string
5068 urlParams_ gensupport.URLParams
5069 ctx_ context.Context
5070 header_ http.Header
5071}
5072
5073// Delete: Permanently deletes the default object ACL entry for the
5074// specified entity on the specified bucket.
5075func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall {
5076 c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
5077 c.bucket = bucket
5078 c.entity = entity
5079 return c
5080}
5081
5082// UserProject sets the optional parameter "userProject": The project to
5083// be billed for this request. Required for Requester Pays buckets.
5084func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall {
5085 c.urlParams_.Set("userProject", userProject)
5086 return c
5087}
5088
5089// Fields allows partial responses to be retrieved. See
5090// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
5091// for more information.
5092func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall {
5093 c.urlParams_.Set("fields", googleapi.CombineFields(s))
5094 return c
5095}
5096
5097// Context sets the context to be used in this call's Do method. Any
5098// pending HTTP request will be aborted if the provided context is
5099// canceled.
5100func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall {
5101 c.ctx_ = ctx
5102 return c
5103}
5104
5105// Header returns an http.Header that can be modified by the caller to
5106// add HTTP headers to the request.
5107func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header {
5108 if c.header_ == nil {
5109 c.header_ = make(http.Header)
5110 }
5111 return c.header_
5112}
5113
5114func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
5115 reqHeaders := make(http.Header)
5116 for k, v := range c.header_ {
5117 reqHeaders[k] = v
5118 }
5119 reqHeaders.Set("User-Agent", c.s.userAgent())
5120 var body io.Reader = nil
5121 c.urlParams_.Set("alt", alt)
5122 c.urlParams_.Set("prettyPrint", "false")
5123 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
5124 urls += "?" + c.urlParams_.Encode()
5125 req, err := http.NewRequest("DELETE", urls, body)
5126 if err != nil {
5127 return nil, err
5128 }
5129 req.Header = reqHeaders
5130 googleapi.Expand(req.URL, map[string]string{
5131 "bucket": c.bucket,
5132 "entity": c.entity,
5133 })
5134 return gensupport.SendRequest(c.ctx_, c.s.client, req)
5135}
5136
5137// Do executes the "storage.defaultObjectAccessControls.delete" call.
5138func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
5139 gensupport.SetOptions(c.urlParams_, opts...)
5140 res, err := c.doRequest("json")
5141 if err != nil {
5142 return err
5143 }
5144 defer googleapi.CloseBody(res)
5145 if err := googleapi.CheckResponse(res); err != nil {
5146 return err
5147 }
5148 return nil
5149 // {
5150 // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.",
5151 // "httpMethod": "DELETE",
5152 // "id": "storage.defaultObjectAccessControls.delete",
5153 // "parameterOrder": [
5154 // "bucket",
5155 // "entity"
5156 // ],
5157 // "parameters": {
5158 // "bucket": {
5159 // "description": "Name of a bucket.",
5160 // "location": "path",
5161 // "required": true,
5162 // "type": "string"
5163 // },
5164 // "entity": {
5165 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
5166 // "location": "path",
5167 // "required": true,
5168 // "type": "string"
5169 // },
5170 // "userProject": {
5171 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
5172 // "location": "query",
5173 // "type": "string"
5174 // }
5175 // },
5176 // "path": "b/{bucket}/defaultObjectAcl/{entity}",
5177 // "scopes": [
5178 // "https://www.googleapis.com/auth/cloud-platform",
5179 // "https://www.googleapis.com/auth/devstorage.full_control"
5180 // ]
5181 // }
5182
5183}
5184
5185// method id "storage.defaultObjectAccessControls.get":
5186
5187type DefaultObjectAccessControlsGetCall struct {
5188 s *Service
5189 bucket string
5190 entity string
5191 urlParams_ gensupport.URLParams
5192 ifNoneMatch_ string
5193 ctx_ context.Context
5194 header_ http.Header
5195}
5196
5197// Get: Returns the default object ACL entry for the specified entity on
5198// the specified bucket.
5199func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall {
5200 c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
5201 c.bucket = bucket
5202 c.entity = entity
5203 return c
5204}
5205
5206// UserProject sets the optional parameter "userProject": The project to
5207// be billed for this request. Required for Requester Pays buckets.
5208func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall {
5209 c.urlParams_.Set("userProject", userProject)
5210 return c
5211}
5212
5213// Fields allows partial responses to be retrieved. See
5214// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
5215// for more information.
5216func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall {
5217 c.urlParams_.Set("fields", googleapi.CombineFields(s))
5218 return c
5219}
5220
5221// IfNoneMatch sets the optional parameter which makes the operation
5222// fail if the object's ETag matches the given value. This is useful for
5223// getting updates only after the object has changed since the last
5224// request. Use googleapi.IsNotModified to check whether the response
5225// error from Do is the result of In-None-Match.
5226func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall {
5227 c.ifNoneMatch_ = entityTag
5228 return c
5229}
5230
5231// Context sets the context to be used in this call's Do method. Any
5232// pending HTTP request will be aborted if the provided context is
5233// canceled.
5234func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall {
5235 c.ctx_ = ctx
5236 return c
5237}
5238
5239// Header returns an http.Header that can be modified by the caller to
5240// add HTTP headers to the request.
5241func (c *DefaultObjectAccessControlsGetCall) Header() http.Header {
5242 if c.header_ == nil {
5243 c.header_ = make(http.Header)
5244 }
5245 return c.header_
5246}
5247
5248func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
5249 reqHeaders := make(http.Header)
5250 for k, v := range c.header_ {
5251 reqHeaders[k] = v
5252 }
5253 reqHeaders.Set("User-Agent", c.s.userAgent())
5254 if c.ifNoneMatch_ != "" {
5255 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
5256 }
5257 var body io.Reader = nil
5258 c.urlParams_.Set("alt", alt)
5259 c.urlParams_.Set("prettyPrint", "false")
5260 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
5261 urls += "?" + c.urlParams_.Encode()
5262 req, err := http.NewRequest("GET", urls, body)
5263 if err != nil {
5264 return nil, err
5265 }
5266 req.Header = reqHeaders
5267 googleapi.Expand(req.URL, map[string]string{
5268 "bucket": c.bucket,
5269 "entity": c.entity,
5270 })
5271 return gensupport.SendRequest(c.ctx_, c.s.client, req)
5272}
5273
5274// Do executes the "storage.defaultObjectAccessControls.get" call.
5275// Exactly one of *ObjectAccessControl or error will be non-nil. Any
5276// non-2xx status code is an error. Response headers are in either
5277// *ObjectAccessControl.ServerResponse.Header or (if a response was
5278// returned at all) in error.(*googleapi.Error).Header. Use
5279// googleapi.IsNotModified to check whether the returned error was
5280// because http.StatusNotModified was returned.
5281func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
5282 gensupport.SetOptions(c.urlParams_, opts...)
5283 res, err := c.doRequest("json")
5284 if res != nil && res.StatusCode == http.StatusNotModified {
5285 if res.Body != nil {
5286 res.Body.Close()
5287 }
5288 return nil, &googleapi.Error{
5289 Code: res.StatusCode,
5290 Header: res.Header,
5291 }
5292 }
5293 if err != nil {
5294 return nil, err
5295 }
5296 defer googleapi.CloseBody(res)
5297 if err := googleapi.CheckResponse(res); err != nil {
5298 return nil, err
5299 }
5300 ret := &ObjectAccessControl{
5301 ServerResponse: googleapi.ServerResponse{
5302 Header: res.Header,
5303 HTTPStatusCode: res.StatusCode,
5304 },
5305 }
5306 target := &ret
5307 if err := gensupport.DecodeResponse(target, res); err != nil {
5308 return nil, err
5309 }
5310 return ret, nil
5311 // {
5312 // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.",
5313 // "httpMethod": "GET",
5314 // "id": "storage.defaultObjectAccessControls.get",
5315 // "parameterOrder": [
5316 // "bucket",
5317 // "entity"
5318 // ],
5319 // "parameters": {
5320 // "bucket": {
5321 // "description": "Name of a bucket.",
5322 // "location": "path",
5323 // "required": true,
5324 // "type": "string"
5325 // },
5326 // "entity": {
5327 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
5328 // "location": "path",
5329 // "required": true,
5330 // "type": "string"
5331 // },
5332 // "userProject": {
5333 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
5334 // "location": "query",
5335 // "type": "string"
5336 // }
5337 // },
5338 // "path": "b/{bucket}/defaultObjectAcl/{entity}",
5339 // "response": {
5340 // "$ref": "ObjectAccessControl"
5341 // },
5342 // "scopes": [
5343 // "https://www.googleapis.com/auth/cloud-platform",
5344 // "https://www.googleapis.com/auth/devstorage.full_control"
5345 // ]
5346 // }
5347
5348}
5349
5350// method id "storage.defaultObjectAccessControls.insert":
5351
5352type DefaultObjectAccessControlsInsertCall struct {
5353 s *Service
5354 bucket string
5355 objectaccesscontrol *ObjectAccessControl
5356 urlParams_ gensupport.URLParams
5357 ctx_ context.Context
5358 header_ http.Header
5359}
5360
5361// Insert: Creates a new default object ACL entry on the specified
5362// bucket.
5363func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall {
5364 c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
5365 c.bucket = bucket
5366 c.objectaccesscontrol = objectaccesscontrol
5367 return c
5368}
5369
5370// UserProject sets the optional parameter "userProject": The project to
5371// be billed for this request. Required for Requester Pays buckets.
5372func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall {
5373 c.urlParams_.Set("userProject", userProject)
5374 return c
5375}
5376
5377// Fields allows partial responses to be retrieved. See
5378// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
5379// for more information.
5380func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall {
5381 c.urlParams_.Set("fields", googleapi.CombineFields(s))
5382 return c
5383}
5384
5385// Context sets the context to be used in this call's Do method. Any
5386// pending HTTP request will be aborted if the provided context is
5387// canceled.
5388func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall {
5389 c.ctx_ = ctx
5390 return c
5391}
5392
5393// Header returns an http.Header that can be modified by the caller to
5394// add HTTP headers to the request.
5395func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header {
5396 if c.header_ == nil {
5397 c.header_ = make(http.Header)
5398 }
5399 return c.header_
5400}
5401
5402func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
5403 reqHeaders := make(http.Header)
5404 for k, v := range c.header_ {
5405 reqHeaders[k] = v
5406 }
5407 reqHeaders.Set("User-Agent", c.s.userAgent())
5408 var body io.Reader = nil
5409 body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
5410 if err != nil {
5411 return nil, err
5412 }
5413 reqHeaders.Set("Content-Type", "application/json")
5414 c.urlParams_.Set("alt", alt)
5415 c.urlParams_.Set("prettyPrint", "false")
5416 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl")
5417 urls += "?" + c.urlParams_.Encode()
5418 req, err := http.NewRequest("POST", urls, body)
5419 if err != nil {
5420 return nil, err
5421 }
5422 req.Header = reqHeaders
5423 googleapi.Expand(req.URL, map[string]string{
5424 "bucket": c.bucket,
5425 })
5426 return gensupport.SendRequest(c.ctx_, c.s.client, req)
5427}
5428
5429// Do executes the "storage.defaultObjectAccessControls.insert" call.
5430// Exactly one of *ObjectAccessControl or error will be non-nil. Any
5431// non-2xx status code is an error. Response headers are in either
5432// *ObjectAccessControl.ServerResponse.Header or (if a response was
5433// returned at all) in error.(*googleapi.Error).Header. Use
5434// googleapi.IsNotModified to check whether the returned error was
5435// because http.StatusNotModified was returned.
5436func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
5437 gensupport.SetOptions(c.urlParams_, opts...)
5438 res, err := c.doRequest("json")
5439 if res != nil && res.StatusCode == http.StatusNotModified {
5440 if res.Body != nil {
5441 res.Body.Close()
5442 }
5443 return nil, &googleapi.Error{
5444 Code: res.StatusCode,
5445 Header: res.Header,
5446 }
5447 }
5448 if err != nil {
5449 return nil, err
5450 }
5451 defer googleapi.CloseBody(res)
5452 if err := googleapi.CheckResponse(res); err != nil {
5453 return nil, err
5454 }
5455 ret := &ObjectAccessControl{
5456 ServerResponse: googleapi.ServerResponse{
5457 Header: res.Header,
5458 HTTPStatusCode: res.StatusCode,
5459 },
5460 }
5461 target := &ret
5462 if err := gensupport.DecodeResponse(target, res); err != nil {
5463 return nil, err
5464 }
5465 return ret, nil
5466 // {
5467 // "description": "Creates a new default object ACL entry on the specified bucket.",
5468 // "httpMethod": "POST",
5469 // "id": "storage.defaultObjectAccessControls.insert",
5470 // "parameterOrder": [
5471 // "bucket"
5472 // ],
5473 // "parameters": {
5474 // "bucket": {
5475 // "description": "Name of a bucket.",
5476 // "location": "path",
5477 // "required": true,
5478 // "type": "string"
5479 // },
5480 // "userProject": {
5481 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
5482 // "location": "query",
5483 // "type": "string"
5484 // }
5485 // },
5486 // "path": "b/{bucket}/defaultObjectAcl",
5487 // "request": {
5488 // "$ref": "ObjectAccessControl"
5489 // },
5490 // "response": {
5491 // "$ref": "ObjectAccessControl"
5492 // },
5493 // "scopes": [
5494 // "https://www.googleapis.com/auth/cloud-platform",
5495 // "https://www.googleapis.com/auth/devstorage.full_control"
5496 // ]
5497 // }
5498
5499}
5500
5501// method id "storage.defaultObjectAccessControls.list":
5502
5503type DefaultObjectAccessControlsListCall struct {
5504 s *Service
5505 bucket string
5506 urlParams_ gensupport.URLParams
5507 ifNoneMatch_ string
5508 ctx_ context.Context
5509 header_ http.Header
5510}
5511
5512// List: Retrieves default object ACL entries on the specified bucket.
5513func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall {
5514 c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
5515 c.bucket = bucket
5516 return c
5517}
5518
5519// IfMetagenerationMatch sets the optional parameter
5520// "ifMetagenerationMatch": If present, only return default ACL listing
5521// if the bucket's current metageneration matches this value.
5522func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall {
5523 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
5524 return c
5525}
5526
5527// IfMetagenerationNotMatch sets the optional parameter
5528// "ifMetagenerationNotMatch": If present, only return default ACL
5529// listing if the bucket's current metageneration does not match the
5530// given value.
5531func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall {
5532 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
5533 return c
5534}
5535
5536// UserProject sets the optional parameter "userProject": The project to
5537// be billed for this request. Required for Requester Pays buckets.
5538func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall {
5539 c.urlParams_.Set("userProject", userProject)
5540 return c
5541}
5542
5543// Fields allows partial responses to be retrieved. See
5544// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
5545// for more information.
5546func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall {
5547 c.urlParams_.Set("fields", googleapi.CombineFields(s))
5548 return c
5549}
5550
5551// IfNoneMatch sets the optional parameter which makes the operation
5552// fail if the object's ETag matches the given value. This is useful for
5553// getting updates only after the object has changed since the last
5554// request. Use googleapi.IsNotModified to check whether the response
5555// error from Do is the result of In-None-Match.
5556func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall {
5557 c.ifNoneMatch_ = entityTag
5558 return c
5559}
5560
5561// Context sets the context to be used in this call's Do method. Any
5562// pending HTTP request will be aborted if the provided context is
5563// canceled.
5564func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall {
5565 c.ctx_ = ctx
5566 return c
5567}
5568
5569// Header returns an http.Header that can be modified by the caller to
5570// add HTTP headers to the request.
5571func (c *DefaultObjectAccessControlsListCall) Header() http.Header {
5572 if c.header_ == nil {
5573 c.header_ = make(http.Header)
5574 }
5575 return c.header_
5576}
5577
5578func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
5579 reqHeaders := make(http.Header)
5580 for k, v := range c.header_ {
5581 reqHeaders[k] = v
5582 }
5583 reqHeaders.Set("User-Agent", c.s.userAgent())
5584 if c.ifNoneMatch_ != "" {
5585 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
5586 }
5587 var body io.Reader = nil
5588 c.urlParams_.Set("alt", alt)
5589 c.urlParams_.Set("prettyPrint", "false")
5590 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl")
5591 urls += "?" + c.urlParams_.Encode()
5592 req, err := http.NewRequest("GET", urls, body)
5593 if err != nil {
5594 return nil, err
5595 }
5596 req.Header = reqHeaders
5597 googleapi.Expand(req.URL, map[string]string{
5598 "bucket": c.bucket,
5599 })
5600 return gensupport.SendRequest(c.ctx_, c.s.client, req)
5601}
5602
5603// Do executes the "storage.defaultObjectAccessControls.list" call.
5604// Exactly one of *ObjectAccessControls or error will be non-nil. Any
5605// non-2xx status code is an error. Response headers are in either
5606// *ObjectAccessControls.ServerResponse.Header or (if a response was
5607// returned at all) in error.(*googleapi.Error).Header. Use
5608// googleapi.IsNotModified to check whether the returned error was
5609// because http.StatusNotModified was returned.
5610func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) {
5611 gensupport.SetOptions(c.urlParams_, opts...)
5612 res, err := c.doRequest("json")
5613 if res != nil && res.StatusCode == http.StatusNotModified {
5614 if res.Body != nil {
5615 res.Body.Close()
5616 }
5617 return nil, &googleapi.Error{
5618 Code: res.StatusCode,
5619 Header: res.Header,
5620 }
5621 }
5622 if err != nil {
5623 return nil, err
5624 }
5625 defer googleapi.CloseBody(res)
5626 if err := googleapi.CheckResponse(res); err != nil {
5627 return nil, err
5628 }
5629 ret := &ObjectAccessControls{
5630 ServerResponse: googleapi.ServerResponse{
5631 Header: res.Header,
5632 HTTPStatusCode: res.StatusCode,
5633 },
5634 }
5635 target := &ret
5636 if err := gensupport.DecodeResponse(target, res); err != nil {
5637 return nil, err
5638 }
5639 return ret, nil
5640 // {
5641 // "description": "Retrieves default object ACL entries on the specified bucket.",
5642 // "httpMethod": "GET",
5643 // "id": "storage.defaultObjectAccessControls.list",
5644 // "parameterOrder": [
5645 // "bucket"
5646 // ],
5647 // "parameters": {
5648 // "bucket": {
5649 // "description": "Name of a bucket.",
5650 // "location": "path",
5651 // "required": true,
5652 // "type": "string"
5653 // },
5654 // "ifMetagenerationMatch": {
5655 // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.",
5656 // "format": "int64",
5657 // "location": "query",
5658 // "type": "string"
5659 // },
5660 // "ifMetagenerationNotMatch": {
5661 // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.",
5662 // "format": "int64",
5663 // "location": "query",
5664 // "type": "string"
5665 // },
5666 // "userProject": {
5667 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
5668 // "location": "query",
5669 // "type": "string"
5670 // }
5671 // },
5672 // "path": "b/{bucket}/defaultObjectAcl",
5673 // "response": {
5674 // "$ref": "ObjectAccessControls"
5675 // },
5676 // "scopes": [
5677 // "https://www.googleapis.com/auth/cloud-platform",
5678 // "https://www.googleapis.com/auth/devstorage.full_control"
5679 // ]
5680 // }
5681
5682}
5683
5684// method id "storage.defaultObjectAccessControls.patch":
5685
5686type DefaultObjectAccessControlsPatchCall struct {
5687 s *Service
5688 bucket string
5689 entity string
5690 objectaccesscontrol *ObjectAccessControl
5691 urlParams_ gensupport.URLParams
5692 ctx_ context.Context
5693 header_ http.Header
5694}
5695
5696// Patch: Patches a default object ACL entry on the specified bucket.
5697func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall {
5698 c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
5699 c.bucket = bucket
5700 c.entity = entity
5701 c.objectaccesscontrol = objectaccesscontrol
5702 return c
5703}
5704
5705// UserProject sets the optional parameter "userProject": The project to
5706// be billed for this request. Required for Requester Pays buckets.
5707func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall {
5708 c.urlParams_.Set("userProject", userProject)
5709 return c
5710}
5711
5712// Fields allows partial responses to be retrieved. See
5713// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
5714// for more information.
5715func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall {
5716 c.urlParams_.Set("fields", googleapi.CombineFields(s))
5717 return c
5718}
5719
5720// Context sets the context to be used in this call's Do method. Any
5721// pending HTTP request will be aborted if the provided context is
5722// canceled.
5723func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall {
5724 c.ctx_ = ctx
5725 return c
5726}
5727
5728// Header returns an http.Header that can be modified by the caller to
5729// add HTTP headers to the request.
5730func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header {
5731 if c.header_ == nil {
5732 c.header_ = make(http.Header)
5733 }
5734 return c.header_
5735}
5736
5737func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
5738 reqHeaders := make(http.Header)
5739 for k, v := range c.header_ {
5740 reqHeaders[k] = v
5741 }
5742 reqHeaders.Set("User-Agent", c.s.userAgent())
5743 var body io.Reader = nil
5744 body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
5745 if err != nil {
5746 return nil, err
5747 }
5748 reqHeaders.Set("Content-Type", "application/json")
5749 c.urlParams_.Set("alt", alt)
5750 c.urlParams_.Set("prettyPrint", "false")
5751 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
5752 urls += "?" + c.urlParams_.Encode()
5753 req, err := http.NewRequest("PATCH", urls, body)
5754 if err != nil {
5755 return nil, err
5756 }
5757 req.Header = reqHeaders
5758 googleapi.Expand(req.URL, map[string]string{
5759 "bucket": c.bucket,
5760 "entity": c.entity,
5761 })
5762 return gensupport.SendRequest(c.ctx_, c.s.client, req)
5763}
5764
5765// Do executes the "storage.defaultObjectAccessControls.patch" call.
5766// Exactly one of *ObjectAccessControl or error will be non-nil. Any
5767// non-2xx status code is an error. Response headers are in either
5768// *ObjectAccessControl.ServerResponse.Header or (if a response was
5769// returned at all) in error.(*googleapi.Error).Header. Use
5770// googleapi.IsNotModified to check whether the returned error was
5771// because http.StatusNotModified was returned.
5772func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
5773 gensupport.SetOptions(c.urlParams_, opts...)
5774 res, err := c.doRequest("json")
5775 if res != nil && res.StatusCode == http.StatusNotModified {
5776 if res.Body != nil {
5777 res.Body.Close()
5778 }
5779 return nil, &googleapi.Error{
5780 Code: res.StatusCode,
5781 Header: res.Header,
5782 }
5783 }
5784 if err != nil {
5785 return nil, err
5786 }
5787 defer googleapi.CloseBody(res)
5788 if err := googleapi.CheckResponse(res); err != nil {
5789 return nil, err
5790 }
5791 ret := &ObjectAccessControl{
5792 ServerResponse: googleapi.ServerResponse{
5793 Header: res.Header,
5794 HTTPStatusCode: res.StatusCode,
5795 },
5796 }
5797 target := &ret
5798 if err := gensupport.DecodeResponse(target, res); err != nil {
5799 return nil, err
5800 }
5801 return ret, nil
5802 // {
5803 // "description": "Patches a default object ACL entry on the specified bucket.",
5804 // "httpMethod": "PATCH",
5805 // "id": "storage.defaultObjectAccessControls.patch",
5806 // "parameterOrder": [
5807 // "bucket",
5808 // "entity"
5809 // ],
5810 // "parameters": {
5811 // "bucket": {
5812 // "description": "Name of a bucket.",
5813 // "location": "path",
5814 // "required": true,
5815 // "type": "string"
5816 // },
5817 // "entity": {
5818 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
5819 // "location": "path",
5820 // "required": true,
5821 // "type": "string"
5822 // },
5823 // "userProject": {
5824 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
5825 // "location": "query",
5826 // "type": "string"
5827 // }
5828 // },
5829 // "path": "b/{bucket}/defaultObjectAcl/{entity}",
5830 // "request": {
5831 // "$ref": "ObjectAccessControl"
5832 // },
5833 // "response": {
5834 // "$ref": "ObjectAccessControl"
5835 // },
5836 // "scopes": [
5837 // "https://www.googleapis.com/auth/cloud-platform",
5838 // "https://www.googleapis.com/auth/devstorage.full_control"
5839 // ]
5840 // }
5841
5842}
5843
5844// method id "storage.defaultObjectAccessControls.update":
5845
5846type DefaultObjectAccessControlsUpdateCall struct {
5847 s *Service
5848 bucket string
5849 entity string
5850 objectaccesscontrol *ObjectAccessControl
5851 urlParams_ gensupport.URLParams
5852 ctx_ context.Context
5853 header_ http.Header
5854}
5855
5856// Update: Updates a default object ACL entry on the specified bucket.
5857func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall {
5858 c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
5859 c.bucket = bucket
5860 c.entity = entity
5861 c.objectaccesscontrol = objectaccesscontrol
5862 return c
5863}
5864
5865// UserProject sets the optional parameter "userProject": The project to
5866// be billed for this request. Required for Requester Pays buckets.
5867func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall {
5868 c.urlParams_.Set("userProject", userProject)
5869 return c
5870}
5871
5872// Fields allows partial responses to be retrieved. See
5873// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
5874// for more information.
5875func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall {
5876 c.urlParams_.Set("fields", googleapi.CombineFields(s))
5877 return c
5878}
5879
5880// Context sets the context to be used in this call's Do method. Any
5881// pending HTTP request will be aborted if the provided context is
5882// canceled.
5883func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall {
5884 c.ctx_ = ctx
5885 return c
5886}
5887
5888// Header returns an http.Header that can be modified by the caller to
5889// add HTTP headers to the request.
5890func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header {
5891 if c.header_ == nil {
5892 c.header_ = make(http.Header)
5893 }
5894 return c.header_
5895}
5896
5897func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
5898 reqHeaders := make(http.Header)
5899 for k, v := range c.header_ {
5900 reqHeaders[k] = v
5901 }
5902 reqHeaders.Set("User-Agent", c.s.userAgent())
5903 var body io.Reader = nil
5904 body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
5905 if err != nil {
5906 return nil, err
5907 }
5908 reqHeaders.Set("Content-Type", "application/json")
5909 c.urlParams_.Set("alt", alt)
5910 c.urlParams_.Set("prettyPrint", "false")
5911 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
5912 urls += "?" + c.urlParams_.Encode()
5913 req, err := http.NewRequest("PUT", urls, body)
5914 if err != nil {
5915 return nil, err
5916 }
5917 req.Header = reqHeaders
5918 googleapi.Expand(req.URL, map[string]string{
5919 "bucket": c.bucket,
5920 "entity": c.entity,
5921 })
5922 return gensupport.SendRequest(c.ctx_, c.s.client, req)
5923}
5924
5925// Do executes the "storage.defaultObjectAccessControls.update" call.
5926// Exactly one of *ObjectAccessControl or error will be non-nil. Any
5927// non-2xx status code is an error. Response headers are in either
5928// *ObjectAccessControl.ServerResponse.Header or (if a response was
5929// returned at all) in error.(*googleapi.Error).Header. Use
5930// googleapi.IsNotModified to check whether the returned error was
5931// because http.StatusNotModified was returned.
5932func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
5933 gensupport.SetOptions(c.urlParams_, opts...)
5934 res, err := c.doRequest("json")
5935 if res != nil && res.StatusCode == http.StatusNotModified {
5936 if res.Body != nil {
5937 res.Body.Close()
5938 }
5939 return nil, &googleapi.Error{
5940 Code: res.StatusCode,
5941 Header: res.Header,
5942 }
5943 }
5944 if err != nil {
5945 return nil, err
5946 }
5947 defer googleapi.CloseBody(res)
5948 if err := googleapi.CheckResponse(res); err != nil {
5949 return nil, err
5950 }
5951 ret := &ObjectAccessControl{
5952 ServerResponse: googleapi.ServerResponse{
5953 Header: res.Header,
5954 HTTPStatusCode: res.StatusCode,
5955 },
5956 }
5957 target := &ret
5958 if err := gensupport.DecodeResponse(target, res); err != nil {
5959 return nil, err
5960 }
5961 return ret, nil
5962 // {
5963 // "description": "Updates a default object ACL entry on the specified bucket.",
5964 // "httpMethod": "PUT",
5965 // "id": "storage.defaultObjectAccessControls.update",
5966 // "parameterOrder": [
5967 // "bucket",
5968 // "entity"
5969 // ],
5970 // "parameters": {
5971 // "bucket": {
5972 // "description": "Name of a bucket.",
5973 // "location": "path",
5974 // "required": true,
5975 // "type": "string"
5976 // },
5977 // "entity": {
5978 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
5979 // "location": "path",
5980 // "required": true,
5981 // "type": "string"
5982 // },
5983 // "userProject": {
5984 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
5985 // "location": "query",
5986 // "type": "string"
5987 // }
5988 // },
5989 // "path": "b/{bucket}/defaultObjectAcl/{entity}",
5990 // "request": {
5991 // "$ref": "ObjectAccessControl"
5992 // },
5993 // "response": {
5994 // "$ref": "ObjectAccessControl"
5995 // },
5996 // "scopes": [
5997 // "https://www.googleapis.com/auth/cloud-platform",
5998 // "https://www.googleapis.com/auth/devstorage.full_control"
5999 // ]
6000 // }
6001
6002}
6003
6004// method id "storage.notifications.delete":
6005
6006type NotificationsDeleteCall struct {
6007 s *Service
6008 bucket string
6009 notification string
6010 urlParams_ gensupport.URLParams
6011 ctx_ context.Context
6012 header_ http.Header
6013}
6014
6015// Delete: Permanently deletes a notification subscription.
6016func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall {
6017 c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
6018 c.bucket = bucket
6019 c.notification = notification
6020 return c
6021}
6022
6023// UserProject sets the optional parameter "userProject": The project to
6024// be billed for this request. Required for Requester Pays buckets.
6025func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall {
6026 c.urlParams_.Set("userProject", userProject)
6027 return c
6028}
6029
6030// Fields allows partial responses to be retrieved. See
6031// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
6032// for more information.
6033func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall {
6034 c.urlParams_.Set("fields", googleapi.CombineFields(s))
6035 return c
6036}
6037
6038// Context sets the context to be used in this call's Do method. Any
6039// pending HTTP request will be aborted if the provided context is
6040// canceled.
6041func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall {
6042 c.ctx_ = ctx
6043 return c
6044}
6045
6046// Header returns an http.Header that can be modified by the caller to
6047// add HTTP headers to the request.
6048func (c *NotificationsDeleteCall) Header() http.Header {
6049 if c.header_ == nil {
6050 c.header_ = make(http.Header)
6051 }
6052 return c.header_
6053}
6054
6055func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) {
6056 reqHeaders := make(http.Header)
6057 for k, v := range c.header_ {
6058 reqHeaders[k] = v
6059 }
6060 reqHeaders.Set("User-Agent", c.s.userAgent())
6061 var body io.Reader = nil
6062 c.urlParams_.Set("alt", alt)
6063 c.urlParams_.Set("prettyPrint", "false")
6064 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}")
6065 urls += "?" + c.urlParams_.Encode()
6066 req, err := http.NewRequest("DELETE", urls, body)
6067 if err != nil {
6068 return nil, err
6069 }
6070 req.Header = reqHeaders
6071 googleapi.Expand(req.URL, map[string]string{
6072 "bucket": c.bucket,
6073 "notification": c.notification,
6074 })
6075 return gensupport.SendRequest(c.ctx_, c.s.client, req)
6076}
6077
6078// Do executes the "storage.notifications.delete" call.
6079func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error {
6080 gensupport.SetOptions(c.urlParams_, opts...)
6081 res, err := c.doRequest("json")
6082 if err != nil {
6083 return err
6084 }
6085 defer googleapi.CloseBody(res)
6086 if err := googleapi.CheckResponse(res); err != nil {
6087 return err
6088 }
6089 return nil
6090 // {
6091 // "description": "Permanently deletes a notification subscription.",
6092 // "httpMethod": "DELETE",
6093 // "id": "storage.notifications.delete",
6094 // "parameterOrder": [
6095 // "bucket",
6096 // "notification"
6097 // ],
6098 // "parameters": {
6099 // "bucket": {
6100 // "description": "The parent bucket of the notification.",
6101 // "location": "path",
6102 // "required": true,
6103 // "type": "string"
6104 // },
6105 // "notification": {
6106 // "description": "ID of the notification to delete.",
6107 // "location": "path",
6108 // "required": true,
6109 // "type": "string"
6110 // },
6111 // "userProject": {
6112 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
6113 // "location": "query",
6114 // "type": "string"
6115 // }
6116 // },
6117 // "path": "b/{bucket}/notificationConfigs/{notification}",
6118 // "scopes": [
6119 // "https://www.googleapis.com/auth/cloud-platform",
6120 // "https://www.googleapis.com/auth/devstorage.full_control",
6121 // "https://www.googleapis.com/auth/devstorage.read_write"
6122 // ]
6123 // }
6124
6125}
6126
6127// method id "storage.notifications.get":
6128
6129type NotificationsGetCall struct {
6130 s *Service
6131 bucket string
6132 notification string
6133 urlParams_ gensupport.URLParams
6134 ifNoneMatch_ string
6135 ctx_ context.Context
6136 header_ http.Header
6137}
6138
6139// Get: View a notification configuration.
6140func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall {
6141 c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
6142 c.bucket = bucket
6143 c.notification = notification
6144 return c
6145}
6146
6147// UserProject sets the optional parameter "userProject": The project to
6148// be billed for this request. Required for Requester Pays buckets.
6149func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall {
6150 c.urlParams_.Set("userProject", userProject)
6151 return c
6152}
6153
6154// Fields allows partial responses to be retrieved. See
6155// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
6156// for more information.
6157func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall {
6158 c.urlParams_.Set("fields", googleapi.CombineFields(s))
6159 return c
6160}
6161
6162// IfNoneMatch sets the optional parameter which makes the operation
6163// fail if the object's ETag matches the given value. This is useful for
6164// getting updates only after the object has changed since the last
6165// request. Use googleapi.IsNotModified to check whether the response
6166// error from Do is the result of In-None-Match.
6167func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall {
6168 c.ifNoneMatch_ = entityTag
6169 return c
6170}
6171
6172// Context sets the context to be used in this call's Do method. Any
6173// pending HTTP request will be aborted if the provided context is
6174// canceled.
6175func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall {
6176 c.ctx_ = ctx
6177 return c
6178}
6179
6180// Header returns an http.Header that can be modified by the caller to
6181// add HTTP headers to the request.
6182func (c *NotificationsGetCall) Header() http.Header {
6183 if c.header_ == nil {
6184 c.header_ = make(http.Header)
6185 }
6186 return c.header_
6187}
6188
6189func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) {
6190 reqHeaders := make(http.Header)
6191 for k, v := range c.header_ {
6192 reqHeaders[k] = v
6193 }
6194 reqHeaders.Set("User-Agent", c.s.userAgent())
6195 if c.ifNoneMatch_ != "" {
6196 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
6197 }
6198 var body io.Reader = nil
6199 c.urlParams_.Set("alt", alt)
6200 c.urlParams_.Set("prettyPrint", "false")
6201 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}")
6202 urls += "?" + c.urlParams_.Encode()
6203 req, err := http.NewRequest("GET", urls, body)
6204 if err != nil {
6205 return nil, err
6206 }
6207 req.Header = reqHeaders
6208 googleapi.Expand(req.URL, map[string]string{
6209 "bucket": c.bucket,
6210 "notification": c.notification,
6211 })
6212 return gensupport.SendRequest(c.ctx_, c.s.client, req)
6213}
6214
6215// Do executes the "storage.notifications.get" call.
6216// Exactly one of *Notification or error will be non-nil. Any non-2xx
6217// status code is an error. Response headers are in either
6218// *Notification.ServerResponse.Header or (if a response was returned at
6219// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
6220// to check whether the returned error was because
6221// http.StatusNotModified was returned.
6222func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) {
6223 gensupport.SetOptions(c.urlParams_, opts...)
6224 res, err := c.doRequest("json")
6225 if res != nil && res.StatusCode == http.StatusNotModified {
6226 if res.Body != nil {
6227 res.Body.Close()
6228 }
6229 return nil, &googleapi.Error{
6230 Code: res.StatusCode,
6231 Header: res.Header,
6232 }
6233 }
6234 if err != nil {
6235 return nil, err
6236 }
6237 defer googleapi.CloseBody(res)
6238 if err := googleapi.CheckResponse(res); err != nil {
6239 return nil, err
6240 }
6241 ret := &Notification{
6242 ServerResponse: googleapi.ServerResponse{
6243 Header: res.Header,
6244 HTTPStatusCode: res.StatusCode,
6245 },
6246 }
6247 target := &ret
6248 if err := gensupport.DecodeResponse(target, res); err != nil {
6249 return nil, err
6250 }
6251 return ret, nil
6252 // {
6253 // "description": "View a notification configuration.",
6254 // "httpMethod": "GET",
6255 // "id": "storage.notifications.get",
6256 // "parameterOrder": [
6257 // "bucket",
6258 // "notification"
6259 // ],
6260 // "parameters": {
6261 // "bucket": {
6262 // "description": "The parent bucket of the notification.",
6263 // "location": "path",
6264 // "required": true,
6265 // "type": "string"
6266 // },
6267 // "notification": {
6268 // "description": "Notification ID",
6269 // "location": "path",
6270 // "required": true,
6271 // "type": "string"
6272 // },
6273 // "userProject": {
6274 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
6275 // "location": "query",
6276 // "type": "string"
6277 // }
6278 // },
6279 // "path": "b/{bucket}/notificationConfigs/{notification}",
6280 // "response": {
6281 // "$ref": "Notification"
6282 // },
6283 // "scopes": [
6284 // "https://www.googleapis.com/auth/cloud-platform",
6285 // "https://www.googleapis.com/auth/cloud-platform.read-only",
6286 // "https://www.googleapis.com/auth/devstorage.full_control",
6287 // "https://www.googleapis.com/auth/devstorage.read_only",
6288 // "https://www.googleapis.com/auth/devstorage.read_write"
6289 // ]
6290 // }
6291
6292}
6293
6294// method id "storage.notifications.insert":
6295
6296type NotificationsInsertCall struct {
6297 s *Service
6298 bucket string
6299 notification *Notification
6300 urlParams_ gensupport.URLParams
6301 ctx_ context.Context
6302 header_ http.Header
6303}
6304
6305// Insert: Creates a notification subscription for a given bucket.
6306func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall {
6307 c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
6308 c.bucket = bucket
6309 c.notification = notification
6310 return c
6311}
6312
6313// UserProject sets the optional parameter "userProject": The project to
6314// be billed for this request. Required for Requester Pays buckets.
6315func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall {
6316 c.urlParams_.Set("userProject", userProject)
6317 return c
6318}
6319
6320// Fields allows partial responses to be retrieved. See
6321// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
6322// for more information.
6323func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall {
6324 c.urlParams_.Set("fields", googleapi.CombineFields(s))
6325 return c
6326}
6327
6328// Context sets the context to be used in this call's Do method. Any
6329// pending HTTP request will be aborted if the provided context is
6330// canceled.
6331func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall {
6332 c.ctx_ = ctx
6333 return c
6334}
6335
6336// Header returns an http.Header that can be modified by the caller to
6337// add HTTP headers to the request.
6338func (c *NotificationsInsertCall) Header() http.Header {
6339 if c.header_ == nil {
6340 c.header_ = make(http.Header)
6341 }
6342 return c.header_
6343}
6344
6345func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) {
6346 reqHeaders := make(http.Header)
6347 for k, v := range c.header_ {
6348 reqHeaders[k] = v
6349 }
6350 reqHeaders.Set("User-Agent", c.s.userAgent())
6351 var body io.Reader = nil
6352 body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification)
6353 if err != nil {
6354 return nil, err
6355 }
6356 reqHeaders.Set("Content-Type", "application/json")
6357 c.urlParams_.Set("alt", alt)
6358 c.urlParams_.Set("prettyPrint", "false")
6359 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs")
6360 urls += "?" + c.urlParams_.Encode()
6361 req, err := http.NewRequest("POST", urls, body)
6362 if err != nil {
6363 return nil, err
6364 }
6365 req.Header = reqHeaders
6366 googleapi.Expand(req.URL, map[string]string{
6367 "bucket": c.bucket,
6368 })
6369 return gensupport.SendRequest(c.ctx_, c.s.client, req)
6370}
6371
6372// Do executes the "storage.notifications.insert" call.
6373// Exactly one of *Notification or error will be non-nil. Any non-2xx
6374// status code is an error. Response headers are in either
6375// *Notification.ServerResponse.Header or (if a response was returned at
6376// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
6377// to check whether the returned error was because
6378// http.StatusNotModified was returned.
6379func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) {
6380 gensupport.SetOptions(c.urlParams_, opts...)
6381 res, err := c.doRequest("json")
6382 if res != nil && res.StatusCode == http.StatusNotModified {
6383 if res.Body != nil {
6384 res.Body.Close()
6385 }
6386 return nil, &googleapi.Error{
6387 Code: res.StatusCode,
6388 Header: res.Header,
6389 }
6390 }
6391 if err != nil {
6392 return nil, err
6393 }
6394 defer googleapi.CloseBody(res)
6395 if err := googleapi.CheckResponse(res); err != nil {
6396 return nil, err
6397 }
6398 ret := &Notification{
6399 ServerResponse: googleapi.ServerResponse{
6400 Header: res.Header,
6401 HTTPStatusCode: res.StatusCode,
6402 },
6403 }
6404 target := &ret
6405 if err := gensupport.DecodeResponse(target, res); err != nil {
6406 return nil, err
6407 }
6408 return ret, nil
6409 // {
6410 // "description": "Creates a notification subscription for a given bucket.",
6411 // "httpMethod": "POST",
6412 // "id": "storage.notifications.insert",
6413 // "parameterOrder": [
6414 // "bucket"
6415 // ],
6416 // "parameters": {
6417 // "bucket": {
6418 // "description": "The parent bucket of the notification.",
6419 // "location": "path",
6420 // "required": true,
6421 // "type": "string"
6422 // },
6423 // "userProject": {
6424 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
6425 // "location": "query",
6426 // "type": "string"
6427 // }
6428 // },
6429 // "path": "b/{bucket}/notificationConfigs",
6430 // "request": {
6431 // "$ref": "Notification"
6432 // },
6433 // "response": {
6434 // "$ref": "Notification"
6435 // },
6436 // "scopes": [
6437 // "https://www.googleapis.com/auth/cloud-platform",
6438 // "https://www.googleapis.com/auth/devstorage.full_control",
6439 // "https://www.googleapis.com/auth/devstorage.read_write"
6440 // ]
6441 // }
6442
6443}
6444
6445// method id "storage.notifications.list":
6446
6447type NotificationsListCall struct {
6448 s *Service
6449 bucket string
6450 urlParams_ gensupport.URLParams
6451 ifNoneMatch_ string
6452 ctx_ context.Context
6453 header_ http.Header
6454}
6455
6456// List: Retrieves a list of notification subscriptions for a given
6457// bucket.
6458func (r *NotificationsService) List(bucket string) *NotificationsListCall {
6459 c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
6460 c.bucket = bucket
6461 return c
6462}
6463
6464// UserProject sets the optional parameter "userProject": The project to
6465// be billed for this request. Required for Requester Pays buckets.
6466func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall {
6467 c.urlParams_.Set("userProject", userProject)
6468 return c
6469}
6470
6471// Fields allows partial responses to be retrieved. See
6472// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
6473// for more information.
6474func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall {
6475 c.urlParams_.Set("fields", googleapi.CombineFields(s))
6476 return c
6477}
6478
6479// IfNoneMatch sets the optional parameter which makes the operation
6480// fail if the object's ETag matches the given value. This is useful for
6481// getting updates only after the object has changed since the last
6482// request. Use googleapi.IsNotModified to check whether the response
6483// error from Do is the result of In-None-Match.
6484func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall {
6485 c.ifNoneMatch_ = entityTag
6486 return c
6487}
6488
6489// Context sets the context to be used in this call's Do method. Any
6490// pending HTTP request will be aborted if the provided context is
6491// canceled.
6492func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall {
6493 c.ctx_ = ctx
6494 return c
6495}
6496
6497// Header returns an http.Header that can be modified by the caller to
6498// add HTTP headers to the request.
6499func (c *NotificationsListCall) Header() http.Header {
6500 if c.header_ == nil {
6501 c.header_ = make(http.Header)
6502 }
6503 return c.header_
6504}
6505
6506func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) {
6507 reqHeaders := make(http.Header)
6508 for k, v := range c.header_ {
6509 reqHeaders[k] = v
6510 }
6511 reqHeaders.Set("User-Agent", c.s.userAgent())
6512 if c.ifNoneMatch_ != "" {
6513 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
6514 }
6515 var body io.Reader = nil
6516 c.urlParams_.Set("alt", alt)
6517 c.urlParams_.Set("prettyPrint", "false")
6518 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs")
6519 urls += "?" + c.urlParams_.Encode()
6520 req, err := http.NewRequest("GET", urls, body)
6521 if err != nil {
6522 return nil, err
6523 }
6524 req.Header = reqHeaders
6525 googleapi.Expand(req.URL, map[string]string{
6526 "bucket": c.bucket,
6527 })
6528 return gensupport.SendRequest(c.ctx_, c.s.client, req)
6529}
6530
6531// Do executes the "storage.notifications.list" call.
6532// Exactly one of *Notifications or error will be non-nil. Any non-2xx
6533// status code is an error. Response headers are in either
6534// *Notifications.ServerResponse.Header or (if a response was returned
6535// at all) in error.(*googleapi.Error).Header. Use
6536// googleapi.IsNotModified to check whether the returned error was
6537// because http.StatusNotModified was returned.
6538func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) {
6539 gensupport.SetOptions(c.urlParams_, opts...)
6540 res, err := c.doRequest("json")
6541 if res != nil && res.StatusCode == http.StatusNotModified {
6542 if res.Body != nil {
6543 res.Body.Close()
6544 }
6545 return nil, &googleapi.Error{
6546 Code: res.StatusCode,
6547 Header: res.Header,
6548 }
6549 }
6550 if err != nil {
6551 return nil, err
6552 }
6553 defer googleapi.CloseBody(res)
6554 if err := googleapi.CheckResponse(res); err != nil {
6555 return nil, err
6556 }
6557 ret := &Notifications{
6558 ServerResponse: googleapi.ServerResponse{
6559 Header: res.Header,
6560 HTTPStatusCode: res.StatusCode,
6561 },
6562 }
6563 target := &ret
6564 if err := gensupport.DecodeResponse(target, res); err != nil {
6565 return nil, err
6566 }
6567 return ret, nil
6568 // {
6569 // "description": "Retrieves a list of notification subscriptions for a given bucket.",
6570 // "httpMethod": "GET",
6571 // "id": "storage.notifications.list",
6572 // "parameterOrder": [
6573 // "bucket"
6574 // ],
6575 // "parameters": {
6576 // "bucket": {
6577 // "description": "Name of a Google Cloud Storage bucket.",
6578 // "location": "path",
6579 // "required": true,
6580 // "type": "string"
6581 // },
6582 // "userProject": {
6583 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
6584 // "location": "query",
6585 // "type": "string"
6586 // }
6587 // },
6588 // "path": "b/{bucket}/notificationConfigs",
6589 // "response": {
6590 // "$ref": "Notifications"
6591 // },
6592 // "scopes": [
6593 // "https://www.googleapis.com/auth/cloud-platform",
6594 // "https://www.googleapis.com/auth/cloud-platform.read-only",
6595 // "https://www.googleapis.com/auth/devstorage.full_control",
6596 // "https://www.googleapis.com/auth/devstorage.read_only",
6597 // "https://www.googleapis.com/auth/devstorage.read_write"
6598 // ]
6599 // }
6600
6601}
6602
6603// method id "storage.objectAccessControls.delete":
6604
6605type ObjectAccessControlsDeleteCall struct {
6606 s *Service
6607 bucket string
6608 object string
6609 entity string
6610 urlParams_ gensupport.URLParams
6611 ctx_ context.Context
6612 header_ http.Header
6613}
6614
6615// Delete: Permanently deletes the ACL entry for the specified entity on
6616// the specified object.
6617func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall {
6618 c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
6619 c.bucket = bucket
6620 c.object = object
6621 c.entity = entity
6622 return c
6623}
6624
6625// Generation sets the optional parameter "generation": If present,
6626// selects a specific revision of this object (as opposed to the latest
6627// version, the default).
6628func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall {
6629 c.urlParams_.Set("generation", fmt.Sprint(generation))
6630 return c
6631}
6632
6633// UserProject sets the optional parameter "userProject": The project to
6634// be billed for this request. Required for Requester Pays buckets.
6635func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall {
6636 c.urlParams_.Set("userProject", userProject)
6637 return c
6638}
6639
6640// Fields allows partial responses to be retrieved. See
6641// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
6642// for more information.
6643func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall {
6644 c.urlParams_.Set("fields", googleapi.CombineFields(s))
6645 return c
6646}
6647
6648// Context sets the context to be used in this call's Do method. Any
6649// pending HTTP request will be aborted if the provided context is
6650// canceled.
6651func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall {
6652 c.ctx_ = ctx
6653 return c
6654}
6655
6656// Header returns an http.Header that can be modified by the caller to
6657// add HTTP headers to the request.
6658func (c *ObjectAccessControlsDeleteCall) Header() http.Header {
6659 if c.header_ == nil {
6660 c.header_ = make(http.Header)
6661 }
6662 return c.header_
6663}
6664
6665func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
6666 reqHeaders := make(http.Header)
6667 for k, v := range c.header_ {
6668 reqHeaders[k] = v
6669 }
6670 reqHeaders.Set("User-Agent", c.s.userAgent())
6671 var body io.Reader = nil
6672 c.urlParams_.Set("alt", alt)
6673 c.urlParams_.Set("prettyPrint", "false")
6674 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
6675 urls += "?" + c.urlParams_.Encode()
6676 req, err := http.NewRequest("DELETE", urls, body)
6677 if err != nil {
6678 return nil, err
6679 }
6680 req.Header = reqHeaders
6681 googleapi.Expand(req.URL, map[string]string{
6682 "bucket": c.bucket,
6683 "object": c.object,
6684 "entity": c.entity,
6685 })
6686 return gensupport.SendRequest(c.ctx_, c.s.client, req)
6687}
6688
6689// Do executes the "storage.objectAccessControls.delete" call.
6690func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
6691 gensupport.SetOptions(c.urlParams_, opts...)
6692 res, err := c.doRequest("json")
6693 if err != nil {
6694 return err
6695 }
6696 defer googleapi.CloseBody(res)
6697 if err := googleapi.CheckResponse(res); err != nil {
6698 return err
6699 }
6700 return nil
6701 // {
6702 // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.",
6703 // "httpMethod": "DELETE",
6704 // "id": "storage.objectAccessControls.delete",
6705 // "parameterOrder": [
6706 // "bucket",
6707 // "object",
6708 // "entity"
6709 // ],
6710 // "parameters": {
6711 // "bucket": {
6712 // "description": "Name of a bucket.",
6713 // "location": "path",
6714 // "required": true,
6715 // "type": "string"
6716 // },
6717 // "entity": {
6718 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
6719 // "location": "path",
6720 // "required": true,
6721 // "type": "string"
6722 // },
6723 // "generation": {
6724 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
6725 // "format": "int64",
6726 // "location": "query",
6727 // "type": "string"
6728 // },
6729 // "object": {
6730 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
6731 // "location": "path",
6732 // "required": true,
6733 // "type": "string"
6734 // },
6735 // "userProject": {
6736 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
6737 // "location": "query",
6738 // "type": "string"
6739 // }
6740 // },
6741 // "path": "b/{bucket}/o/{object}/acl/{entity}",
6742 // "scopes": [
6743 // "https://www.googleapis.com/auth/cloud-platform",
6744 // "https://www.googleapis.com/auth/devstorage.full_control"
6745 // ]
6746 // }
6747
6748}
6749
6750// method id "storage.objectAccessControls.get":
6751
6752type ObjectAccessControlsGetCall struct {
6753 s *Service
6754 bucket string
6755 object string
6756 entity string
6757 urlParams_ gensupport.URLParams
6758 ifNoneMatch_ string
6759 ctx_ context.Context
6760 header_ http.Header
6761}
6762
6763// Get: Returns the ACL entry for the specified entity on the specified
6764// object.
6765func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall {
6766 c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
6767 c.bucket = bucket
6768 c.object = object
6769 c.entity = entity
6770 return c
6771}
6772
6773// Generation sets the optional parameter "generation": If present,
6774// selects a specific revision of this object (as opposed to the latest
6775// version, the default).
6776func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall {
6777 c.urlParams_.Set("generation", fmt.Sprint(generation))
6778 return c
6779}
6780
6781// UserProject sets the optional parameter "userProject": The project to
6782// be billed for this request. Required for Requester Pays buckets.
6783func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall {
6784 c.urlParams_.Set("userProject", userProject)
6785 return c
6786}
6787
6788// Fields allows partial responses to be retrieved. See
6789// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
6790// for more information.
6791func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall {
6792 c.urlParams_.Set("fields", googleapi.CombineFields(s))
6793 return c
6794}
6795
6796// IfNoneMatch sets the optional parameter which makes the operation
6797// fail if the object's ETag matches the given value. This is useful for
6798// getting updates only after the object has changed since the last
6799// request. Use googleapi.IsNotModified to check whether the response
6800// error from Do is the result of In-None-Match.
6801func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall {
6802 c.ifNoneMatch_ = entityTag
6803 return c
6804}
6805
6806// Context sets the context to be used in this call's Do method. Any
6807// pending HTTP request will be aborted if the provided context is
6808// canceled.
6809func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall {
6810 c.ctx_ = ctx
6811 return c
6812}
6813
6814// Header returns an http.Header that can be modified by the caller to
6815// add HTTP headers to the request.
6816func (c *ObjectAccessControlsGetCall) Header() http.Header {
6817 if c.header_ == nil {
6818 c.header_ = make(http.Header)
6819 }
6820 return c.header_
6821}
6822
6823func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
6824 reqHeaders := make(http.Header)
6825 for k, v := range c.header_ {
6826 reqHeaders[k] = v
6827 }
6828 reqHeaders.Set("User-Agent", c.s.userAgent())
6829 if c.ifNoneMatch_ != "" {
6830 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
6831 }
6832 var body io.Reader = nil
6833 c.urlParams_.Set("alt", alt)
6834 c.urlParams_.Set("prettyPrint", "false")
6835 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
6836 urls += "?" + c.urlParams_.Encode()
6837 req, err := http.NewRequest("GET", urls, body)
6838 if err != nil {
6839 return nil, err
6840 }
6841 req.Header = reqHeaders
6842 googleapi.Expand(req.URL, map[string]string{
6843 "bucket": c.bucket,
6844 "object": c.object,
6845 "entity": c.entity,
6846 })
6847 return gensupport.SendRequest(c.ctx_, c.s.client, req)
6848}
6849
6850// Do executes the "storage.objectAccessControls.get" call.
6851// Exactly one of *ObjectAccessControl or error will be non-nil. Any
6852// non-2xx status code is an error. Response headers are in either
6853// *ObjectAccessControl.ServerResponse.Header or (if a response was
6854// returned at all) in error.(*googleapi.Error).Header. Use
6855// googleapi.IsNotModified to check whether the returned error was
6856// because http.StatusNotModified was returned.
6857func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
6858 gensupport.SetOptions(c.urlParams_, opts...)
6859 res, err := c.doRequest("json")
6860 if res != nil && res.StatusCode == http.StatusNotModified {
6861 if res.Body != nil {
6862 res.Body.Close()
6863 }
6864 return nil, &googleapi.Error{
6865 Code: res.StatusCode,
6866 Header: res.Header,
6867 }
6868 }
6869 if err != nil {
6870 return nil, err
6871 }
6872 defer googleapi.CloseBody(res)
6873 if err := googleapi.CheckResponse(res); err != nil {
6874 return nil, err
6875 }
6876 ret := &ObjectAccessControl{
6877 ServerResponse: googleapi.ServerResponse{
6878 Header: res.Header,
6879 HTTPStatusCode: res.StatusCode,
6880 },
6881 }
6882 target := &ret
6883 if err := gensupport.DecodeResponse(target, res); err != nil {
6884 return nil, err
6885 }
6886 return ret, nil
6887 // {
6888 // "description": "Returns the ACL entry for the specified entity on the specified object.",
6889 // "httpMethod": "GET",
6890 // "id": "storage.objectAccessControls.get",
6891 // "parameterOrder": [
6892 // "bucket",
6893 // "object",
6894 // "entity"
6895 // ],
6896 // "parameters": {
6897 // "bucket": {
6898 // "description": "Name of a bucket.",
6899 // "location": "path",
6900 // "required": true,
6901 // "type": "string"
6902 // },
6903 // "entity": {
6904 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
6905 // "location": "path",
6906 // "required": true,
6907 // "type": "string"
6908 // },
6909 // "generation": {
6910 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
6911 // "format": "int64",
6912 // "location": "query",
6913 // "type": "string"
6914 // },
6915 // "object": {
6916 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
6917 // "location": "path",
6918 // "required": true,
6919 // "type": "string"
6920 // },
6921 // "userProject": {
6922 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
6923 // "location": "query",
6924 // "type": "string"
6925 // }
6926 // },
6927 // "path": "b/{bucket}/o/{object}/acl/{entity}",
6928 // "response": {
6929 // "$ref": "ObjectAccessControl"
6930 // },
6931 // "scopes": [
6932 // "https://www.googleapis.com/auth/cloud-platform",
6933 // "https://www.googleapis.com/auth/devstorage.full_control"
6934 // ]
6935 // }
6936
6937}
6938
6939// method id "storage.objectAccessControls.insert":
6940
6941type ObjectAccessControlsInsertCall struct {
6942 s *Service
6943 bucket string
6944 object string
6945 objectaccesscontrol *ObjectAccessControl
6946 urlParams_ gensupport.URLParams
6947 ctx_ context.Context
6948 header_ http.Header
6949}
6950
6951// Insert: Creates a new ACL entry on the specified object.
6952func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall {
6953 c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
6954 c.bucket = bucket
6955 c.object = object
6956 c.objectaccesscontrol = objectaccesscontrol
6957 return c
6958}
6959
6960// Generation sets the optional parameter "generation": If present,
6961// selects a specific revision of this object (as opposed to the latest
6962// version, the default).
6963func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall {
6964 c.urlParams_.Set("generation", fmt.Sprint(generation))
6965 return c
6966}
6967
6968// UserProject sets the optional parameter "userProject": The project to
6969// be billed for this request. Required for Requester Pays buckets.
6970func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall {
6971 c.urlParams_.Set("userProject", userProject)
6972 return c
6973}
6974
6975// Fields allows partial responses to be retrieved. See
6976// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
6977// for more information.
6978func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall {
6979 c.urlParams_.Set("fields", googleapi.CombineFields(s))
6980 return c
6981}
6982
6983// Context sets the context to be used in this call's Do method. Any
6984// pending HTTP request will be aborted if the provided context is
6985// canceled.
6986func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall {
6987 c.ctx_ = ctx
6988 return c
6989}
6990
6991// Header returns an http.Header that can be modified by the caller to
6992// add HTTP headers to the request.
6993func (c *ObjectAccessControlsInsertCall) Header() http.Header {
6994 if c.header_ == nil {
6995 c.header_ = make(http.Header)
6996 }
6997 return c.header_
6998}
6999
7000func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
7001 reqHeaders := make(http.Header)
7002 for k, v := range c.header_ {
7003 reqHeaders[k] = v
7004 }
7005 reqHeaders.Set("User-Agent", c.s.userAgent())
7006 var body io.Reader = nil
7007 body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
7008 if err != nil {
7009 return nil, err
7010 }
7011 reqHeaders.Set("Content-Type", "application/json")
7012 c.urlParams_.Set("alt", alt)
7013 c.urlParams_.Set("prettyPrint", "false")
7014 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
7015 urls += "?" + c.urlParams_.Encode()
7016 req, err := http.NewRequest("POST", urls, body)
7017 if err != nil {
7018 return nil, err
7019 }
7020 req.Header = reqHeaders
7021 googleapi.Expand(req.URL, map[string]string{
7022 "bucket": c.bucket,
7023 "object": c.object,
7024 })
7025 return gensupport.SendRequest(c.ctx_, c.s.client, req)
7026}
7027
7028// Do executes the "storage.objectAccessControls.insert" call.
7029// Exactly one of *ObjectAccessControl or error will be non-nil. Any
7030// non-2xx status code is an error. Response headers are in either
7031// *ObjectAccessControl.ServerResponse.Header or (if a response was
7032// returned at all) in error.(*googleapi.Error).Header. Use
7033// googleapi.IsNotModified to check whether the returned error was
7034// because http.StatusNotModified was returned.
7035func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
7036 gensupport.SetOptions(c.urlParams_, opts...)
7037 res, err := c.doRequest("json")
7038 if res != nil && res.StatusCode == http.StatusNotModified {
7039 if res.Body != nil {
7040 res.Body.Close()
7041 }
7042 return nil, &googleapi.Error{
7043 Code: res.StatusCode,
7044 Header: res.Header,
7045 }
7046 }
7047 if err != nil {
7048 return nil, err
7049 }
7050 defer googleapi.CloseBody(res)
7051 if err := googleapi.CheckResponse(res); err != nil {
7052 return nil, err
7053 }
7054 ret := &ObjectAccessControl{
7055 ServerResponse: googleapi.ServerResponse{
7056 Header: res.Header,
7057 HTTPStatusCode: res.StatusCode,
7058 },
7059 }
7060 target := &ret
7061 if err := gensupport.DecodeResponse(target, res); err != nil {
7062 return nil, err
7063 }
7064 return ret, nil
7065 // {
7066 // "description": "Creates a new ACL entry on the specified object.",
7067 // "httpMethod": "POST",
7068 // "id": "storage.objectAccessControls.insert",
7069 // "parameterOrder": [
7070 // "bucket",
7071 // "object"
7072 // ],
7073 // "parameters": {
7074 // "bucket": {
7075 // "description": "Name of a bucket.",
7076 // "location": "path",
7077 // "required": true,
7078 // "type": "string"
7079 // },
7080 // "generation": {
7081 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
7082 // "format": "int64",
7083 // "location": "query",
7084 // "type": "string"
7085 // },
7086 // "object": {
7087 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
7088 // "location": "path",
7089 // "required": true,
7090 // "type": "string"
7091 // },
7092 // "userProject": {
7093 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
7094 // "location": "query",
7095 // "type": "string"
7096 // }
7097 // },
7098 // "path": "b/{bucket}/o/{object}/acl",
7099 // "request": {
7100 // "$ref": "ObjectAccessControl"
7101 // },
7102 // "response": {
7103 // "$ref": "ObjectAccessControl"
7104 // },
7105 // "scopes": [
7106 // "https://www.googleapis.com/auth/cloud-platform",
7107 // "https://www.googleapis.com/auth/devstorage.full_control"
7108 // ]
7109 // }
7110
7111}
7112
7113// method id "storage.objectAccessControls.list":
7114
7115type ObjectAccessControlsListCall struct {
7116 s *Service
7117 bucket string
7118 object string
7119 urlParams_ gensupport.URLParams
7120 ifNoneMatch_ string
7121 ctx_ context.Context
7122 header_ http.Header
7123}
7124
7125// List: Retrieves ACL entries on the specified object.
7126func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall {
7127 c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
7128 c.bucket = bucket
7129 c.object = object
7130 return c
7131}
7132
7133// Generation sets the optional parameter "generation": If present,
7134// selects a specific revision of this object (as opposed to the latest
7135// version, the default).
7136func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall {
7137 c.urlParams_.Set("generation", fmt.Sprint(generation))
7138 return c
7139}
7140
7141// UserProject sets the optional parameter "userProject": The project to
7142// be billed for this request. Required for Requester Pays buckets.
7143func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall {
7144 c.urlParams_.Set("userProject", userProject)
7145 return c
7146}
7147
7148// Fields allows partial responses to be retrieved. See
7149// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
7150// for more information.
7151func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall {
7152 c.urlParams_.Set("fields", googleapi.CombineFields(s))
7153 return c
7154}
7155
7156// IfNoneMatch sets the optional parameter which makes the operation
7157// fail if the object's ETag matches the given value. This is useful for
7158// getting updates only after the object has changed since the last
7159// request. Use googleapi.IsNotModified to check whether the response
7160// error from Do is the result of In-None-Match.
7161func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall {
7162 c.ifNoneMatch_ = entityTag
7163 return c
7164}
7165
7166// Context sets the context to be used in this call's Do method. Any
7167// pending HTTP request will be aborted if the provided context is
7168// canceled.
7169func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall {
7170 c.ctx_ = ctx
7171 return c
7172}
7173
7174// Header returns an http.Header that can be modified by the caller to
7175// add HTTP headers to the request.
7176func (c *ObjectAccessControlsListCall) Header() http.Header {
7177 if c.header_ == nil {
7178 c.header_ = make(http.Header)
7179 }
7180 return c.header_
7181}
7182
7183func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
7184 reqHeaders := make(http.Header)
7185 for k, v := range c.header_ {
7186 reqHeaders[k] = v
7187 }
7188 reqHeaders.Set("User-Agent", c.s.userAgent())
7189 if c.ifNoneMatch_ != "" {
7190 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
7191 }
7192 var body io.Reader = nil
7193 c.urlParams_.Set("alt", alt)
7194 c.urlParams_.Set("prettyPrint", "false")
7195 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
7196 urls += "?" + c.urlParams_.Encode()
7197 req, err := http.NewRequest("GET", urls, body)
7198 if err != nil {
7199 return nil, err
7200 }
7201 req.Header = reqHeaders
7202 googleapi.Expand(req.URL, map[string]string{
7203 "bucket": c.bucket,
7204 "object": c.object,
7205 })
7206 return gensupport.SendRequest(c.ctx_, c.s.client, req)
7207}
7208
7209// Do executes the "storage.objectAccessControls.list" call.
7210// Exactly one of *ObjectAccessControls or error will be non-nil. Any
7211// non-2xx status code is an error. Response headers are in either
7212// *ObjectAccessControls.ServerResponse.Header or (if a response was
7213// returned at all) in error.(*googleapi.Error).Header. Use
7214// googleapi.IsNotModified to check whether the returned error was
7215// because http.StatusNotModified was returned.
7216func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) {
7217 gensupport.SetOptions(c.urlParams_, opts...)
7218 res, err := c.doRequest("json")
7219 if res != nil && res.StatusCode == http.StatusNotModified {
7220 if res.Body != nil {
7221 res.Body.Close()
7222 }
7223 return nil, &googleapi.Error{
7224 Code: res.StatusCode,
7225 Header: res.Header,
7226 }
7227 }
7228 if err != nil {
7229 return nil, err
7230 }
7231 defer googleapi.CloseBody(res)
7232 if err := googleapi.CheckResponse(res); err != nil {
7233 return nil, err
7234 }
7235 ret := &ObjectAccessControls{
7236 ServerResponse: googleapi.ServerResponse{
7237 Header: res.Header,
7238 HTTPStatusCode: res.StatusCode,
7239 },
7240 }
7241 target := &ret
7242 if err := gensupport.DecodeResponse(target, res); err != nil {
7243 return nil, err
7244 }
7245 return ret, nil
7246 // {
7247 // "description": "Retrieves ACL entries on the specified object.",
7248 // "httpMethod": "GET",
7249 // "id": "storage.objectAccessControls.list",
7250 // "parameterOrder": [
7251 // "bucket",
7252 // "object"
7253 // ],
7254 // "parameters": {
7255 // "bucket": {
7256 // "description": "Name of a bucket.",
7257 // "location": "path",
7258 // "required": true,
7259 // "type": "string"
7260 // },
7261 // "generation": {
7262 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
7263 // "format": "int64",
7264 // "location": "query",
7265 // "type": "string"
7266 // },
7267 // "object": {
7268 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
7269 // "location": "path",
7270 // "required": true,
7271 // "type": "string"
7272 // },
7273 // "userProject": {
7274 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
7275 // "location": "query",
7276 // "type": "string"
7277 // }
7278 // },
7279 // "path": "b/{bucket}/o/{object}/acl",
7280 // "response": {
7281 // "$ref": "ObjectAccessControls"
7282 // },
7283 // "scopes": [
7284 // "https://www.googleapis.com/auth/cloud-platform",
7285 // "https://www.googleapis.com/auth/devstorage.full_control"
7286 // ]
7287 // }
7288
7289}
7290
7291// method id "storage.objectAccessControls.patch":
7292
7293type ObjectAccessControlsPatchCall struct {
7294 s *Service
7295 bucket string
7296 object string
7297 entity string
7298 objectaccesscontrol *ObjectAccessControl
7299 urlParams_ gensupport.URLParams
7300 ctx_ context.Context
7301 header_ http.Header
7302}
7303
7304// Patch: Patches an ACL entry on the specified object.
7305func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall {
7306 c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
7307 c.bucket = bucket
7308 c.object = object
7309 c.entity = entity
7310 c.objectaccesscontrol = objectaccesscontrol
7311 return c
7312}
7313
7314// Generation sets the optional parameter "generation": If present,
7315// selects a specific revision of this object (as opposed to the latest
7316// version, the default).
7317func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall {
7318 c.urlParams_.Set("generation", fmt.Sprint(generation))
7319 return c
7320}
7321
7322// UserProject sets the optional parameter "userProject": The project to
7323// be billed for this request. Required for Requester Pays buckets.
7324func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall {
7325 c.urlParams_.Set("userProject", userProject)
7326 return c
7327}
7328
7329// Fields allows partial responses to be retrieved. See
7330// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
7331// for more information.
7332func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall {
7333 c.urlParams_.Set("fields", googleapi.CombineFields(s))
7334 return c
7335}
7336
7337// Context sets the context to be used in this call's Do method. Any
7338// pending HTTP request will be aborted if the provided context is
7339// canceled.
7340func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall {
7341 c.ctx_ = ctx
7342 return c
7343}
7344
7345// Header returns an http.Header that can be modified by the caller to
7346// add HTTP headers to the request.
7347func (c *ObjectAccessControlsPatchCall) Header() http.Header {
7348 if c.header_ == nil {
7349 c.header_ = make(http.Header)
7350 }
7351 return c.header_
7352}
7353
7354func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
7355 reqHeaders := make(http.Header)
7356 for k, v := range c.header_ {
7357 reqHeaders[k] = v
7358 }
7359 reqHeaders.Set("User-Agent", c.s.userAgent())
7360 var body io.Reader = nil
7361 body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
7362 if err != nil {
7363 return nil, err
7364 }
7365 reqHeaders.Set("Content-Type", "application/json")
7366 c.urlParams_.Set("alt", alt)
7367 c.urlParams_.Set("prettyPrint", "false")
7368 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
7369 urls += "?" + c.urlParams_.Encode()
7370 req, err := http.NewRequest("PATCH", urls, body)
7371 if err != nil {
7372 return nil, err
7373 }
7374 req.Header = reqHeaders
7375 googleapi.Expand(req.URL, map[string]string{
7376 "bucket": c.bucket,
7377 "object": c.object,
7378 "entity": c.entity,
7379 })
7380 return gensupport.SendRequest(c.ctx_, c.s.client, req)
7381}
7382
7383// Do executes the "storage.objectAccessControls.patch" call.
7384// Exactly one of *ObjectAccessControl or error will be non-nil. Any
7385// non-2xx status code is an error. Response headers are in either
7386// *ObjectAccessControl.ServerResponse.Header or (if a response was
7387// returned at all) in error.(*googleapi.Error).Header. Use
7388// googleapi.IsNotModified to check whether the returned error was
7389// because http.StatusNotModified was returned.
7390func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
7391 gensupport.SetOptions(c.urlParams_, opts...)
7392 res, err := c.doRequest("json")
7393 if res != nil && res.StatusCode == http.StatusNotModified {
7394 if res.Body != nil {
7395 res.Body.Close()
7396 }
7397 return nil, &googleapi.Error{
7398 Code: res.StatusCode,
7399 Header: res.Header,
7400 }
7401 }
7402 if err != nil {
7403 return nil, err
7404 }
7405 defer googleapi.CloseBody(res)
7406 if err := googleapi.CheckResponse(res); err != nil {
7407 return nil, err
7408 }
7409 ret := &ObjectAccessControl{
7410 ServerResponse: googleapi.ServerResponse{
7411 Header: res.Header,
7412 HTTPStatusCode: res.StatusCode,
7413 },
7414 }
7415 target := &ret
7416 if err := gensupport.DecodeResponse(target, res); err != nil {
7417 return nil, err
7418 }
7419 return ret, nil
7420 // {
7421 // "description": "Patches an ACL entry on the specified object.",
7422 // "httpMethod": "PATCH",
7423 // "id": "storage.objectAccessControls.patch",
7424 // "parameterOrder": [
7425 // "bucket",
7426 // "object",
7427 // "entity"
7428 // ],
7429 // "parameters": {
7430 // "bucket": {
7431 // "description": "Name of a bucket.",
7432 // "location": "path",
7433 // "required": true,
7434 // "type": "string"
7435 // },
7436 // "entity": {
7437 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
7438 // "location": "path",
7439 // "required": true,
7440 // "type": "string"
7441 // },
7442 // "generation": {
7443 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
7444 // "format": "int64",
7445 // "location": "query",
7446 // "type": "string"
7447 // },
7448 // "object": {
7449 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
7450 // "location": "path",
7451 // "required": true,
7452 // "type": "string"
7453 // },
7454 // "userProject": {
7455 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
7456 // "location": "query",
7457 // "type": "string"
7458 // }
7459 // },
7460 // "path": "b/{bucket}/o/{object}/acl/{entity}",
7461 // "request": {
7462 // "$ref": "ObjectAccessControl"
7463 // },
7464 // "response": {
7465 // "$ref": "ObjectAccessControl"
7466 // },
7467 // "scopes": [
7468 // "https://www.googleapis.com/auth/cloud-platform",
7469 // "https://www.googleapis.com/auth/devstorage.full_control"
7470 // ]
7471 // }
7472
7473}
7474
7475// method id "storage.objectAccessControls.update":
7476
7477type ObjectAccessControlsUpdateCall struct {
7478 s *Service
7479 bucket string
7480 object string
7481 entity string
7482 objectaccesscontrol *ObjectAccessControl
7483 urlParams_ gensupport.URLParams
7484 ctx_ context.Context
7485 header_ http.Header
7486}
7487
7488// Update: Updates an ACL entry on the specified object.
7489func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall {
7490 c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
7491 c.bucket = bucket
7492 c.object = object
7493 c.entity = entity
7494 c.objectaccesscontrol = objectaccesscontrol
7495 return c
7496}
7497
7498// Generation sets the optional parameter "generation": If present,
7499// selects a specific revision of this object (as opposed to the latest
7500// version, the default).
7501func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall {
7502 c.urlParams_.Set("generation", fmt.Sprint(generation))
7503 return c
7504}
7505
7506// UserProject sets the optional parameter "userProject": The project to
7507// be billed for this request. Required for Requester Pays buckets.
7508func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall {
7509 c.urlParams_.Set("userProject", userProject)
7510 return c
7511}
7512
7513// Fields allows partial responses to be retrieved. See
7514// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
7515// for more information.
7516func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall {
7517 c.urlParams_.Set("fields", googleapi.CombineFields(s))
7518 return c
7519}
7520
7521// Context sets the context to be used in this call's Do method. Any
7522// pending HTTP request will be aborted if the provided context is
7523// canceled.
7524func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall {
7525 c.ctx_ = ctx
7526 return c
7527}
7528
7529// Header returns an http.Header that can be modified by the caller to
7530// add HTTP headers to the request.
7531func (c *ObjectAccessControlsUpdateCall) Header() http.Header {
7532 if c.header_ == nil {
7533 c.header_ = make(http.Header)
7534 }
7535 return c.header_
7536}
7537
7538func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
7539 reqHeaders := make(http.Header)
7540 for k, v := range c.header_ {
7541 reqHeaders[k] = v
7542 }
7543 reqHeaders.Set("User-Agent", c.s.userAgent())
7544 var body io.Reader = nil
7545 body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
7546 if err != nil {
7547 return nil, err
7548 }
7549 reqHeaders.Set("Content-Type", "application/json")
7550 c.urlParams_.Set("alt", alt)
7551 c.urlParams_.Set("prettyPrint", "false")
7552 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
7553 urls += "?" + c.urlParams_.Encode()
7554 req, err := http.NewRequest("PUT", urls, body)
7555 if err != nil {
7556 return nil, err
7557 }
7558 req.Header = reqHeaders
7559 googleapi.Expand(req.URL, map[string]string{
7560 "bucket": c.bucket,
7561 "object": c.object,
7562 "entity": c.entity,
7563 })
7564 return gensupport.SendRequest(c.ctx_, c.s.client, req)
7565}
7566
7567// Do executes the "storage.objectAccessControls.update" call.
7568// Exactly one of *ObjectAccessControl or error will be non-nil. Any
7569// non-2xx status code is an error. Response headers are in either
7570// *ObjectAccessControl.ServerResponse.Header or (if a response was
7571// returned at all) in error.(*googleapi.Error).Header. Use
7572// googleapi.IsNotModified to check whether the returned error was
7573// because http.StatusNotModified was returned.
7574func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
7575 gensupport.SetOptions(c.urlParams_, opts...)
7576 res, err := c.doRequest("json")
7577 if res != nil && res.StatusCode == http.StatusNotModified {
7578 if res.Body != nil {
7579 res.Body.Close()
7580 }
7581 return nil, &googleapi.Error{
7582 Code: res.StatusCode,
7583 Header: res.Header,
7584 }
7585 }
7586 if err != nil {
7587 return nil, err
7588 }
7589 defer googleapi.CloseBody(res)
7590 if err := googleapi.CheckResponse(res); err != nil {
7591 return nil, err
7592 }
7593 ret := &ObjectAccessControl{
7594 ServerResponse: googleapi.ServerResponse{
7595 Header: res.Header,
7596 HTTPStatusCode: res.StatusCode,
7597 },
7598 }
7599 target := &ret
7600 if err := gensupport.DecodeResponse(target, res); err != nil {
7601 return nil, err
7602 }
7603 return ret, nil
7604 // {
7605 // "description": "Updates an ACL entry on the specified object.",
7606 // "httpMethod": "PUT",
7607 // "id": "storage.objectAccessControls.update",
7608 // "parameterOrder": [
7609 // "bucket",
7610 // "object",
7611 // "entity"
7612 // ],
7613 // "parameters": {
7614 // "bucket": {
7615 // "description": "Name of a bucket.",
7616 // "location": "path",
7617 // "required": true,
7618 // "type": "string"
7619 // },
7620 // "entity": {
7621 // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
7622 // "location": "path",
7623 // "required": true,
7624 // "type": "string"
7625 // },
7626 // "generation": {
7627 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
7628 // "format": "int64",
7629 // "location": "query",
7630 // "type": "string"
7631 // },
7632 // "object": {
7633 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
7634 // "location": "path",
7635 // "required": true,
7636 // "type": "string"
7637 // },
7638 // "userProject": {
7639 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
7640 // "location": "query",
7641 // "type": "string"
7642 // }
7643 // },
7644 // "path": "b/{bucket}/o/{object}/acl/{entity}",
7645 // "request": {
7646 // "$ref": "ObjectAccessControl"
7647 // },
7648 // "response": {
7649 // "$ref": "ObjectAccessControl"
7650 // },
7651 // "scopes": [
7652 // "https://www.googleapis.com/auth/cloud-platform",
7653 // "https://www.googleapis.com/auth/devstorage.full_control"
7654 // ]
7655 // }
7656
7657}
7658
7659// method id "storage.objects.compose":
7660
7661type ObjectsComposeCall struct {
7662 s *Service
7663 destinationBucket string
7664 destinationObject string
7665 composerequest *ComposeRequest
7666 urlParams_ gensupport.URLParams
7667 ctx_ context.Context
7668 header_ http.Header
7669}
7670
7671// Compose: Concatenates a list of existing objects into a new object in
7672// the same bucket.
7673func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall {
7674 c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
7675 c.destinationBucket = destinationBucket
7676 c.destinationObject = destinationObject
7677 c.composerequest = composerequest
7678 return c
7679}
7680
7681// DestinationPredefinedAcl sets the optional parameter
7682// "destinationPredefinedAcl": Apply a predefined set of access controls
7683// to the destination object.
7684//
7685// Possible values:
7686// "authenticatedRead" - Object owner gets OWNER access, and
7687// allAuthenticatedUsers get READER access.
7688// "bucketOwnerFullControl" - Object owner gets OWNER access, and
7689// project team owners get OWNER access.
7690// "bucketOwnerRead" - Object owner gets OWNER access, and project
7691// team owners get READER access.
7692// "private" - Object owner gets OWNER access.
7693// "projectPrivate" - Object owner gets OWNER access, and project team
7694// members get access according to their roles.
7695// "publicRead" - Object owner gets OWNER access, and allUsers get
7696// READER access.
7697func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall {
7698 c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
7699 return c
7700}
7701
7702// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
7703// Makes the operation conditional on whether the object's current
7704// generation matches the given value. Setting to 0 makes the operation
7705// succeed only if there are no live versions of the object.
7706func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall {
7707 c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
7708 return c
7709}
7710
7711// IfMetagenerationMatch sets the optional parameter
7712// "ifMetagenerationMatch": Makes the operation conditional on whether
7713// the object's current metageneration matches the given value.
7714func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall {
7715 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
7716 return c
7717}
7718
7719// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of
7720// the Cloud KMS key, of the form
7721// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key,
7722// that will be used to encrypt the object. Overrides the object
7723// metadata's kms_key_name value, if any.
7724func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall {
7725 c.urlParams_.Set("kmsKeyName", kmsKeyName)
7726 return c
7727}
7728
7729// UserProject sets the optional parameter "userProject": The project to
7730// be billed for this request. Required for Requester Pays buckets.
7731func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall {
7732 c.urlParams_.Set("userProject", userProject)
7733 return c
7734}
7735
7736// Fields allows partial responses to be retrieved. See
7737// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
7738// for more information.
7739func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall {
7740 c.urlParams_.Set("fields", googleapi.CombineFields(s))
7741 return c
7742}
7743
7744// Context sets the context to be used in this call's Do method. Any
7745// pending HTTP request will be aborted if the provided context is
7746// canceled.
7747func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall {
7748 c.ctx_ = ctx
7749 return c
7750}
7751
7752// Header returns an http.Header that can be modified by the caller to
7753// add HTTP headers to the request.
7754func (c *ObjectsComposeCall) Header() http.Header {
7755 if c.header_ == nil {
7756 c.header_ = make(http.Header)
7757 }
7758 return c.header_
7759}
7760
7761func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
7762 reqHeaders := make(http.Header)
7763 for k, v := range c.header_ {
7764 reqHeaders[k] = v
7765 }
7766 reqHeaders.Set("User-Agent", c.s.userAgent())
7767 var body io.Reader = nil
7768 body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest)
7769 if err != nil {
7770 return nil, err
7771 }
7772 reqHeaders.Set("Content-Type", "application/json")
7773 c.urlParams_.Set("alt", alt)
7774 c.urlParams_.Set("prettyPrint", "false")
7775 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose")
7776 urls += "?" + c.urlParams_.Encode()
7777 req, err := http.NewRequest("POST", urls, body)
7778 if err != nil {
7779 return nil, err
7780 }
7781 req.Header = reqHeaders
7782 googleapi.Expand(req.URL, map[string]string{
7783 "destinationBucket": c.destinationBucket,
7784 "destinationObject": c.destinationObject,
7785 })
7786 return gensupport.SendRequest(c.ctx_, c.s.client, req)
7787}
7788
7789// Do executes the "storage.objects.compose" call.
7790// Exactly one of *Object or error will be non-nil. Any non-2xx status
7791// code is an error. Response headers are in either
7792// *Object.ServerResponse.Header or (if a response was returned at all)
7793// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
7794// check whether the returned error was because http.StatusNotModified
7795// was returned.
7796func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) {
7797 gensupport.SetOptions(c.urlParams_, opts...)
7798 res, err := c.doRequest("json")
7799 if res != nil && res.StatusCode == http.StatusNotModified {
7800 if res.Body != nil {
7801 res.Body.Close()
7802 }
7803 return nil, &googleapi.Error{
7804 Code: res.StatusCode,
7805 Header: res.Header,
7806 }
7807 }
7808 if err != nil {
7809 return nil, err
7810 }
7811 defer googleapi.CloseBody(res)
7812 if err := googleapi.CheckResponse(res); err != nil {
7813 return nil, err
7814 }
7815 ret := &Object{
7816 ServerResponse: googleapi.ServerResponse{
7817 Header: res.Header,
7818 HTTPStatusCode: res.StatusCode,
7819 },
7820 }
7821 target := &ret
7822 if err := gensupport.DecodeResponse(target, res); err != nil {
7823 return nil, err
7824 }
7825 return ret, nil
7826 // {
7827 // "description": "Concatenates a list of existing objects into a new object in the same bucket.",
7828 // "httpMethod": "POST",
7829 // "id": "storage.objects.compose",
7830 // "parameterOrder": [
7831 // "destinationBucket",
7832 // "destinationObject"
7833 // ],
7834 // "parameters": {
7835 // "destinationBucket": {
7836 // "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.",
7837 // "location": "path",
7838 // "required": true,
7839 // "type": "string"
7840 // },
7841 // "destinationObject": {
7842 // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
7843 // "location": "path",
7844 // "required": true,
7845 // "type": "string"
7846 // },
7847 // "destinationPredefinedAcl": {
7848 // "description": "Apply a predefined set of access controls to the destination object.",
7849 // "enum": [
7850 // "authenticatedRead",
7851 // "bucketOwnerFullControl",
7852 // "bucketOwnerRead",
7853 // "private",
7854 // "projectPrivate",
7855 // "publicRead"
7856 // ],
7857 // "enumDescriptions": [
7858 // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
7859 // "Object owner gets OWNER access, and project team owners get OWNER access.",
7860 // "Object owner gets OWNER access, and project team owners get READER access.",
7861 // "Object owner gets OWNER access.",
7862 // "Object owner gets OWNER access, and project team members get access according to their roles.",
7863 // "Object owner gets OWNER access, and allUsers get READER access."
7864 // ],
7865 // "location": "query",
7866 // "type": "string"
7867 // },
7868 // "ifGenerationMatch": {
7869 // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
7870 // "format": "int64",
7871 // "location": "query",
7872 // "type": "string"
7873 // },
7874 // "ifMetagenerationMatch": {
7875 // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
7876 // "format": "int64",
7877 // "location": "query",
7878 // "type": "string"
7879 // },
7880 // "kmsKeyName": {
7881 // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
7882 // "location": "query",
7883 // "type": "string"
7884 // },
7885 // "userProject": {
7886 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
7887 // "location": "query",
7888 // "type": "string"
7889 // }
7890 // },
7891 // "path": "b/{destinationBucket}/o/{destinationObject}/compose",
7892 // "request": {
7893 // "$ref": "ComposeRequest"
7894 // },
7895 // "response": {
7896 // "$ref": "Object"
7897 // },
7898 // "scopes": [
7899 // "https://www.googleapis.com/auth/cloud-platform",
7900 // "https://www.googleapis.com/auth/devstorage.full_control",
7901 // "https://www.googleapis.com/auth/devstorage.read_write"
7902 // ]
7903 // }
7904
7905}
7906
7907// method id "storage.objects.copy":
7908
7909type ObjectsCopyCall struct {
7910 s *Service
7911 sourceBucket string
7912 sourceObject string
7913 destinationBucket string
7914 destinationObject string
7915 object *Object
7916 urlParams_ gensupport.URLParams
7917 ctx_ context.Context
7918 header_ http.Header
7919}
7920
7921// Copy: Copies a source object to a destination object. Optionally
7922// overrides metadata.
7923func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall {
7924 c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
7925 c.sourceBucket = sourceBucket
7926 c.sourceObject = sourceObject
7927 c.destinationBucket = destinationBucket
7928 c.destinationObject = destinationObject
7929 c.object = object
7930 return c
7931}
7932
7933// DestinationPredefinedAcl sets the optional parameter
7934// "destinationPredefinedAcl": Apply a predefined set of access controls
7935// to the destination object.
7936//
7937// Possible values:
7938// "authenticatedRead" - Object owner gets OWNER access, and
7939// allAuthenticatedUsers get READER access.
7940// "bucketOwnerFullControl" - Object owner gets OWNER access, and
7941// project team owners get OWNER access.
7942// "bucketOwnerRead" - Object owner gets OWNER access, and project
7943// team owners get READER access.
7944// "private" - Object owner gets OWNER access.
7945// "projectPrivate" - Object owner gets OWNER access, and project team
7946// members get access according to their roles.
7947// "publicRead" - Object owner gets OWNER access, and allUsers get
7948// READER access.
7949func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall {
7950 c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
7951 return c
7952}
7953
7954// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
7955// Makes the operation conditional on whether the destination object's
7956// current generation matches the given value. Setting to 0 makes the
7957// operation succeed only if there are no live versions of the object.
7958func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall {
7959 c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
7960 return c
7961}
7962
7963// IfGenerationNotMatch sets the optional parameter
7964// "ifGenerationNotMatch": Makes the operation conditional on whether
7965// the destination object's current generation does not match the given
7966// value. If no live object exists, the precondition fails. Setting to 0
7967// makes the operation succeed only if there is a live version of the
7968// object.
7969func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall {
7970 c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
7971 return c
7972}
7973
7974// IfMetagenerationMatch sets the optional parameter
7975// "ifMetagenerationMatch": Makes the operation conditional on whether
7976// the destination object's current metageneration matches the given
7977// value.
7978func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall {
7979 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
7980 return c
7981}
7982
7983// IfMetagenerationNotMatch sets the optional parameter
7984// "ifMetagenerationNotMatch": Makes the operation conditional on
7985// whether the destination object's current metageneration does not
7986// match the given value.
7987func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall {
7988 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
7989 return c
7990}
7991
7992// IfSourceGenerationMatch sets the optional parameter
7993// "ifSourceGenerationMatch": Makes the operation conditional on whether
7994// the source object's current generation matches the given value.
7995func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall {
7996 c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch))
7997 return c
7998}
7999
8000// IfSourceGenerationNotMatch sets the optional parameter
8001// "ifSourceGenerationNotMatch": Makes the operation conditional on
8002// whether the source object's current generation does not match the
8003// given value.
8004func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall {
8005 c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch))
8006 return c
8007}
8008
8009// IfSourceMetagenerationMatch sets the optional parameter
8010// "ifSourceMetagenerationMatch": Makes the operation conditional on
8011// whether the source object's current metageneration matches the given
8012// value.
8013func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall {
8014 c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch))
8015 return c
8016}
8017
8018// IfSourceMetagenerationNotMatch sets the optional parameter
8019// "ifSourceMetagenerationNotMatch": Makes the operation conditional on
8020// whether the source object's current metageneration does not match the
8021// given value.
8022func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall {
8023 c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch))
8024 return c
8025}
8026
8027// Projection sets the optional parameter "projection": Set of
8028// properties to return. Defaults to noAcl, unless the object resource
8029// specifies the acl property, when it defaults to full.
8030//
8031// Possible values:
8032// "full" - Include all properties.
8033// "noAcl" - Omit the owner, acl property.
8034func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall {
8035 c.urlParams_.Set("projection", projection)
8036 return c
8037}
8038
8039// SourceGeneration sets the optional parameter "sourceGeneration": If
8040// present, selects a specific revision of the source object (as opposed
8041// to the latest version, the default).
8042func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall {
8043 c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration))
8044 return c
8045}
8046
8047// UserProject sets the optional parameter "userProject": The project to
8048// be billed for this request. Required for Requester Pays buckets.
8049func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall {
8050 c.urlParams_.Set("userProject", userProject)
8051 return c
8052}
8053
8054// Fields allows partial responses to be retrieved. See
8055// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
8056// for more information.
8057func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall {
8058 c.urlParams_.Set("fields", googleapi.CombineFields(s))
8059 return c
8060}
8061
8062// Context sets the context to be used in this call's Do method. Any
8063// pending HTTP request will be aborted if the provided context is
8064// canceled.
8065func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall {
8066 c.ctx_ = ctx
8067 return c
8068}
8069
8070// Header returns an http.Header that can be modified by the caller to
8071// add HTTP headers to the request.
8072func (c *ObjectsCopyCall) Header() http.Header {
8073 if c.header_ == nil {
8074 c.header_ = make(http.Header)
8075 }
8076 return c.header_
8077}
8078
8079func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
8080 reqHeaders := make(http.Header)
8081 for k, v := range c.header_ {
8082 reqHeaders[k] = v
8083 }
8084 reqHeaders.Set("User-Agent", c.s.userAgent())
8085 var body io.Reader = nil
8086 body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
8087 if err != nil {
8088 return nil, err
8089 }
8090 reqHeaders.Set("Content-Type", "application/json")
8091 c.urlParams_.Set("alt", alt)
8092 c.urlParams_.Set("prettyPrint", "false")
8093 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}")
8094 urls += "?" + c.urlParams_.Encode()
8095 req, err := http.NewRequest("POST", urls, body)
8096 if err != nil {
8097 return nil, err
8098 }
8099 req.Header = reqHeaders
8100 googleapi.Expand(req.URL, map[string]string{
8101 "sourceBucket": c.sourceBucket,
8102 "sourceObject": c.sourceObject,
8103 "destinationBucket": c.destinationBucket,
8104 "destinationObject": c.destinationObject,
8105 })
8106 return gensupport.SendRequest(c.ctx_, c.s.client, req)
8107}
8108
8109// Do executes the "storage.objects.copy" call.
8110// Exactly one of *Object or error will be non-nil. Any non-2xx status
8111// code is an error. Response headers are in either
8112// *Object.ServerResponse.Header or (if a response was returned at all)
8113// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
8114// check whether the returned error was because http.StatusNotModified
8115// was returned.
8116func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) {
8117 gensupport.SetOptions(c.urlParams_, opts...)
8118 res, err := c.doRequest("json")
8119 if res != nil && res.StatusCode == http.StatusNotModified {
8120 if res.Body != nil {
8121 res.Body.Close()
8122 }
8123 return nil, &googleapi.Error{
8124 Code: res.StatusCode,
8125 Header: res.Header,
8126 }
8127 }
8128 if err != nil {
8129 return nil, err
8130 }
8131 defer googleapi.CloseBody(res)
8132 if err := googleapi.CheckResponse(res); err != nil {
8133 return nil, err
8134 }
8135 ret := &Object{
8136 ServerResponse: googleapi.ServerResponse{
8137 Header: res.Header,
8138 HTTPStatusCode: res.StatusCode,
8139 },
8140 }
8141 target := &ret
8142 if err := gensupport.DecodeResponse(target, res); err != nil {
8143 return nil, err
8144 }
8145 return ret, nil
8146 // {
8147 // "description": "Copies a source object to a destination object. Optionally overrides metadata.",
8148 // "httpMethod": "POST",
8149 // "id": "storage.objects.copy",
8150 // "parameterOrder": [
8151 // "sourceBucket",
8152 // "sourceObject",
8153 // "destinationBucket",
8154 // "destinationObject"
8155 // ],
8156 // "parameters": {
8157 // "destinationBucket": {
8158 // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
8159 // "location": "path",
8160 // "required": true,
8161 // "type": "string"
8162 // },
8163 // "destinationObject": {
8164 // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
8165 // "location": "path",
8166 // "required": true,
8167 // "type": "string"
8168 // },
8169 // "destinationPredefinedAcl": {
8170 // "description": "Apply a predefined set of access controls to the destination object.",
8171 // "enum": [
8172 // "authenticatedRead",
8173 // "bucketOwnerFullControl",
8174 // "bucketOwnerRead",
8175 // "private",
8176 // "projectPrivate",
8177 // "publicRead"
8178 // ],
8179 // "enumDescriptions": [
8180 // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
8181 // "Object owner gets OWNER access, and project team owners get OWNER access.",
8182 // "Object owner gets OWNER access, and project team owners get READER access.",
8183 // "Object owner gets OWNER access.",
8184 // "Object owner gets OWNER access, and project team members get access according to their roles.",
8185 // "Object owner gets OWNER access, and allUsers get READER access."
8186 // ],
8187 // "location": "query",
8188 // "type": "string"
8189 // },
8190 // "ifGenerationMatch": {
8191 // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
8192 // "format": "int64",
8193 // "location": "query",
8194 // "type": "string"
8195 // },
8196 // "ifGenerationNotMatch": {
8197 // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
8198 // "format": "int64",
8199 // "location": "query",
8200 // "type": "string"
8201 // },
8202 // "ifMetagenerationMatch": {
8203 // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
8204 // "format": "int64",
8205 // "location": "query",
8206 // "type": "string"
8207 // },
8208 // "ifMetagenerationNotMatch": {
8209 // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
8210 // "format": "int64",
8211 // "location": "query",
8212 // "type": "string"
8213 // },
8214 // "ifSourceGenerationMatch": {
8215 // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
8216 // "format": "int64",
8217 // "location": "query",
8218 // "type": "string"
8219 // },
8220 // "ifSourceGenerationNotMatch": {
8221 // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
8222 // "format": "int64",
8223 // "location": "query",
8224 // "type": "string"
8225 // },
8226 // "ifSourceMetagenerationMatch": {
8227 // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
8228 // "format": "int64",
8229 // "location": "query",
8230 // "type": "string"
8231 // },
8232 // "ifSourceMetagenerationNotMatch": {
8233 // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
8234 // "format": "int64",
8235 // "location": "query",
8236 // "type": "string"
8237 // },
8238 // "projection": {
8239 // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
8240 // "enum": [
8241 // "full",
8242 // "noAcl"
8243 // ],
8244 // "enumDescriptions": [
8245 // "Include all properties.",
8246 // "Omit the owner, acl property."
8247 // ],
8248 // "location": "query",
8249 // "type": "string"
8250 // },
8251 // "sourceBucket": {
8252 // "description": "Name of the bucket in which to find the source object.",
8253 // "location": "path",
8254 // "required": true,
8255 // "type": "string"
8256 // },
8257 // "sourceGeneration": {
8258 // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
8259 // "format": "int64",
8260 // "location": "query",
8261 // "type": "string"
8262 // },
8263 // "sourceObject": {
8264 // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
8265 // "location": "path",
8266 // "required": true,
8267 // "type": "string"
8268 // },
8269 // "userProject": {
8270 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
8271 // "location": "query",
8272 // "type": "string"
8273 // }
8274 // },
8275 // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}",
8276 // "request": {
8277 // "$ref": "Object"
8278 // },
8279 // "response": {
8280 // "$ref": "Object"
8281 // },
8282 // "scopes": [
8283 // "https://www.googleapis.com/auth/cloud-platform",
8284 // "https://www.googleapis.com/auth/devstorage.full_control",
8285 // "https://www.googleapis.com/auth/devstorage.read_write"
8286 // ]
8287 // }
8288
8289}
8290
8291// method id "storage.objects.delete":
8292
8293type ObjectsDeleteCall struct {
8294 s *Service
8295 bucket string
8296 object string
8297 urlParams_ gensupport.URLParams
8298 ctx_ context.Context
8299 header_ http.Header
8300}
8301
8302// Delete: Deletes an object and its metadata. Deletions are permanent
8303// if versioning is not enabled for the bucket, or if the generation
8304// parameter is used.
8305func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall {
8306 c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
8307 c.bucket = bucket
8308 c.object = object
8309 return c
8310}
8311
8312// Generation sets the optional parameter "generation": If present,
8313// permanently deletes a specific revision of this object (as opposed to
8314// the latest version, the default).
8315func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall {
8316 c.urlParams_.Set("generation", fmt.Sprint(generation))
8317 return c
8318}
8319
8320// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
8321// Makes the operation conditional on whether the object's current
8322// generation matches the given value. Setting to 0 makes the operation
8323// succeed only if there are no live versions of the object.
8324func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall {
8325 c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
8326 return c
8327}
8328
8329// IfGenerationNotMatch sets the optional parameter
8330// "ifGenerationNotMatch": Makes the operation conditional on whether
8331// the object's current generation does not match the given value. If no
8332// live object exists, the precondition fails. Setting to 0 makes the
8333// operation succeed only if there is a live version of the object.
8334func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall {
8335 c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
8336 return c
8337}
8338
8339// IfMetagenerationMatch sets the optional parameter
8340// "ifMetagenerationMatch": Makes the operation conditional on whether
8341// the object's current metageneration matches the given value.
8342func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall {
8343 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
8344 return c
8345}
8346
8347// IfMetagenerationNotMatch sets the optional parameter
8348// "ifMetagenerationNotMatch": Makes the operation conditional on
8349// whether the object's current metageneration does not match the given
8350// value.
8351func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall {
8352 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
8353 return c
8354}
8355
8356// UserProject sets the optional parameter "userProject": The project to
8357// be billed for this request. Required for Requester Pays buckets.
8358func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall {
8359 c.urlParams_.Set("userProject", userProject)
8360 return c
8361}
8362
8363// Fields allows partial responses to be retrieved. See
8364// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
8365// for more information.
8366func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall {
8367 c.urlParams_.Set("fields", googleapi.CombineFields(s))
8368 return c
8369}
8370
8371// Context sets the context to be used in this call's Do method. Any
8372// pending HTTP request will be aborted if the provided context is
8373// canceled.
8374func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall {
8375 c.ctx_ = ctx
8376 return c
8377}
8378
8379// Header returns an http.Header that can be modified by the caller to
8380// add HTTP headers to the request.
8381func (c *ObjectsDeleteCall) Header() http.Header {
8382 if c.header_ == nil {
8383 c.header_ = make(http.Header)
8384 }
8385 return c.header_
8386}
8387
8388func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
8389 reqHeaders := make(http.Header)
8390 for k, v := range c.header_ {
8391 reqHeaders[k] = v
8392 }
8393 reqHeaders.Set("User-Agent", c.s.userAgent())
8394 var body io.Reader = nil
8395 c.urlParams_.Set("alt", alt)
8396 c.urlParams_.Set("prettyPrint", "false")
8397 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
8398 urls += "?" + c.urlParams_.Encode()
8399 req, err := http.NewRequest("DELETE", urls, body)
8400 if err != nil {
8401 return nil, err
8402 }
8403 req.Header = reqHeaders
8404 googleapi.Expand(req.URL, map[string]string{
8405 "bucket": c.bucket,
8406 "object": c.object,
8407 })
8408 return gensupport.SendRequest(c.ctx_, c.s.client, req)
8409}
8410
8411// Do executes the "storage.objects.delete" call.
8412func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error {
8413 gensupport.SetOptions(c.urlParams_, opts...)
8414 res, err := c.doRequest("json")
8415 if err != nil {
8416 return err
8417 }
8418 defer googleapi.CloseBody(res)
8419 if err := googleapi.CheckResponse(res); err != nil {
8420 return err
8421 }
8422 return nil
8423 // {
8424 // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.",
8425 // "httpMethod": "DELETE",
8426 // "id": "storage.objects.delete",
8427 // "parameterOrder": [
8428 // "bucket",
8429 // "object"
8430 // ],
8431 // "parameters": {
8432 // "bucket": {
8433 // "description": "Name of the bucket in which the object resides.",
8434 // "location": "path",
8435 // "required": true,
8436 // "type": "string"
8437 // },
8438 // "generation": {
8439 // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).",
8440 // "format": "int64",
8441 // "location": "query",
8442 // "type": "string"
8443 // },
8444 // "ifGenerationMatch": {
8445 // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
8446 // "format": "int64",
8447 // "location": "query",
8448 // "type": "string"
8449 // },
8450 // "ifGenerationNotMatch": {
8451 // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
8452 // "format": "int64",
8453 // "location": "query",
8454 // "type": "string"
8455 // },
8456 // "ifMetagenerationMatch": {
8457 // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
8458 // "format": "int64",
8459 // "location": "query",
8460 // "type": "string"
8461 // },
8462 // "ifMetagenerationNotMatch": {
8463 // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
8464 // "format": "int64",
8465 // "location": "query",
8466 // "type": "string"
8467 // },
8468 // "object": {
8469 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
8470 // "location": "path",
8471 // "required": true,
8472 // "type": "string"
8473 // },
8474 // "userProject": {
8475 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
8476 // "location": "query",
8477 // "type": "string"
8478 // }
8479 // },
8480 // "path": "b/{bucket}/o/{object}",
8481 // "scopes": [
8482 // "https://www.googleapis.com/auth/cloud-platform",
8483 // "https://www.googleapis.com/auth/devstorage.full_control",
8484 // "https://www.googleapis.com/auth/devstorage.read_write"
8485 // ]
8486 // }
8487
8488}
8489
8490// method id "storage.objects.get":
8491
8492type ObjectsGetCall struct {
8493 s *Service
8494 bucket string
8495 object string
8496 urlParams_ gensupport.URLParams
8497 ifNoneMatch_ string
8498 ctx_ context.Context
8499 header_ http.Header
8500}
8501
8502// Get: Retrieves an object or its metadata.
8503func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall {
8504 c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
8505 c.bucket = bucket
8506 c.object = object
8507 return c
8508}
8509
8510// Generation sets the optional parameter "generation": If present,
8511// selects a specific revision of this object (as opposed to the latest
8512// version, the default).
8513func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall {
8514 c.urlParams_.Set("generation", fmt.Sprint(generation))
8515 return c
8516}
8517
8518// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
8519// Makes the operation conditional on whether the object's current
8520// generation matches the given value. Setting to 0 makes the operation
8521// succeed only if there are no live versions of the object.
8522func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall {
8523 c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
8524 return c
8525}
8526
8527// IfGenerationNotMatch sets the optional parameter
8528// "ifGenerationNotMatch": Makes the operation conditional on whether
8529// the object's current generation does not match the given value. If no
8530// live object exists, the precondition fails. Setting to 0 makes the
8531// operation succeed only if there is a live version of the object.
8532func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall {
8533 c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
8534 return c
8535}
8536
8537// IfMetagenerationMatch sets the optional parameter
8538// "ifMetagenerationMatch": Makes the operation conditional on whether
8539// the object's current metageneration matches the given value.
8540func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall {
8541 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
8542 return c
8543}
8544
8545// IfMetagenerationNotMatch sets the optional parameter
8546// "ifMetagenerationNotMatch": Makes the operation conditional on
8547// whether the object's current metageneration does not match the given
8548// value.
8549func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall {
8550 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
8551 return c
8552}
8553
8554// Projection sets the optional parameter "projection": Set of
8555// properties to return. Defaults to noAcl.
8556//
8557// Possible values:
8558// "full" - Include all properties.
8559// "noAcl" - Omit the owner, acl property.
8560func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall {
8561 c.urlParams_.Set("projection", projection)
8562 return c
8563}
8564
8565// UserProject sets the optional parameter "userProject": The project to
8566// be billed for this request. Required for Requester Pays buckets.
8567func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall {
8568 c.urlParams_.Set("userProject", userProject)
8569 return c
8570}
8571
8572// Fields allows partial responses to be retrieved. See
8573// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
8574// for more information.
8575func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall {
8576 c.urlParams_.Set("fields", googleapi.CombineFields(s))
8577 return c
8578}
8579
8580// IfNoneMatch sets the optional parameter which makes the operation
8581// fail if the object's ETag matches the given value. This is useful for
8582// getting updates only after the object has changed since the last
8583// request. Use googleapi.IsNotModified to check whether the response
8584// error from Do is the result of In-None-Match.
8585func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall {
8586 c.ifNoneMatch_ = entityTag
8587 return c
8588}
8589
8590// Context sets the context to be used in this call's Do and Download
8591// methods. Any pending HTTP request will be aborted if the provided
8592// context is canceled.
8593func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall {
8594 c.ctx_ = ctx
8595 return c
8596}
8597
8598// Header returns an http.Header that can be modified by the caller to
8599// add HTTP headers to the request.
8600func (c *ObjectsGetCall) Header() http.Header {
8601 if c.header_ == nil {
8602 c.header_ = make(http.Header)
8603 }
8604 return c.header_
8605}
8606
8607func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
8608 reqHeaders := make(http.Header)
8609 for k, v := range c.header_ {
8610 reqHeaders[k] = v
8611 }
8612 reqHeaders.Set("User-Agent", c.s.userAgent())
8613 if c.ifNoneMatch_ != "" {
8614 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
8615 }
8616 var body io.Reader = nil
8617 c.urlParams_.Set("alt", alt)
8618 c.urlParams_.Set("prettyPrint", "false")
8619 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
8620 urls += "?" + c.urlParams_.Encode()
8621 req, err := http.NewRequest("GET", urls, body)
8622 if err != nil {
8623 return nil, err
8624 }
8625 req.Header = reqHeaders
8626 googleapi.Expand(req.URL, map[string]string{
8627 "bucket": c.bucket,
8628 "object": c.object,
8629 })
8630 return gensupport.SendRequest(c.ctx_, c.s.client, req)
8631}
8632
8633// Download fetches the API endpoint's "media" value, instead of the normal
8634// API response value. If the returned error is nil, the Response is guaranteed to
8635// have a 2xx status code. Callers must close the Response.Body as usual.
8636func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
8637 gensupport.SetOptions(c.urlParams_, opts...)
8638 res, err := c.doRequest("media")
8639 if err != nil {
8640 return nil, err
8641 }
8642 if err := googleapi.CheckMediaResponse(res); err != nil {
8643 res.Body.Close()
8644 return nil, err
8645 }
8646 return res, nil
8647}
8648
8649// Do executes the "storage.objects.get" call.
8650// Exactly one of *Object or error will be non-nil. Any non-2xx status
8651// code is an error. Response headers are in either
8652// *Object.ServerResponse.Header or (if a response was returned at all)
8653// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
8654// check whether the returned error was because http.StatusNotModified
8655// was returned.
8656func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) {
8657 gensupport.SetOptions(c.urlParams_, opts...)
8658 res, err := c.doRequest("json")
8659 if res != nil && res.StatusCode == http.StatusNotModified {
8660 if res.Body != nil {
8661 res.Body.Close()
8662 }
8663 return nil, &googleapi.Error{
8664 Code: res.StatusCode,
8665 Header: res.Header,
8666 }
8667 }
8668 if err != nil {
8669 return nil, err
8670 }
8671 defer googleapi.CloseBody(res)
8672 if err := googleapi.CheckResponse(res); err != nil {
8673 return nil, err
8674 }
8675 ret := &Object{
8676 ServerResponse: googleapi.ServerResponse{
8677 Header: res.Header,
8678 HTTPStatusCode: res.StatusCode,
8679 },
8680 }
8681 target := &ret
8682 if err := gensupport.DecodeResponse(target, res); err != nil {
8683 return nil, err
8684 }
8685 return ret, nil
8686 // {
8687 // "description": "Retrieves an object or its metadata.",
8688 // "httpMethod": "GET",
8689 // "id": "storage.objects.get",
8690 // "parameterOrder": [
8691 // "bucket",
8692 // "object"
8693 // ],
8694 // "parameters": {
8695 // "bucket": {
8696 // "description": "Name of the bucket in which the object resides.",
8697 // "location": "path",
8698 // "required": true,
8699 // "type": "string"
8700 // },
8701 // "generation": {
8702 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
8703 // "format": "int64",
8704 // "location": "query",
8705 // "type": "string"
8706 // },
8707 // "ifGenerationMatch": {
8708 // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
8709 // "format": "int64",
8710 // "location": "query",
8711 // "type": "string"
8712 // },
8713 // "ifGenerationNotMatch": {
8714 // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
8715 // "format": "int64",
8716 // "location": "query",
8717 // "type": "string"
8718 // },
8719 // "ifMetagenerationMatch": {
8720 // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
8721 // "format": "int64",
8722 // "location": "query",
8723 // "type": "string"
8724 // },
8725 // "ifMetagenerationNotMatch": {
8726 // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
8727 // "format": "int64",
8728 // "location": "query",
8729 // "type": "string"
8730 // },
8731 // "object": {
8732 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
8733 // "location": "path",
8734 // "required": true,
8735 // "type": "string"
8736 // },
8737 // "projection": {
8738 // "description": "Set of properties to return. Defaults to noAcl.",
8739 // "enum": [
8740 // "full",
8741 // "noAcl"
8742 // ],
8743 // "enumDescriptions": [
8744 // "Include all properties.",
8745 // "Omit the owner, acl property."
8746 // ],
8747 // "location": "query",
8748 // "type": "string"
8749 // },
8750 // "userProject": {
8751 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
8752 // "location": "query",
8753 // "type": "string"
8754 // }
8755 // },
8756 // "path": "b/{bucket}/o/{object}",
8757 // "response": {
8758 // "$ref": "Object"
8759 // },
8760 // "scopes": [
8761 // "https://www.googleapis.com/auth/cloud-platform",
8762 // "https://www.googleapis.com/auth/cloud-platform.read-only",
8763 // "https://www.googleapis.com/auth/devstorage.full_control",
8764 // "https://www.googleapis.com/auth/devstorage.read_only",
8765 // "https://www.googleapis.com/auth/devstorage.read_write"
8766 // ],
8767 // "supportsMediaDownload": true,
8768 // "useMediaDownloadService": true
8769 // }
8770
8771}
8772
8773// method id "storage.objects.getIamPolicy":
8774
8775type ObjectsGetIamPolicyCall struct {
8776 s *Service
8777 bucket string
8778 object string
8779 urlParams_ gensupport.URLParams
8780 ifNoneMatch_ string
8781 ctx_ context.Context
8782 header_ http.Header
8783}
8784
8785// GetIamPolicy: Returns an IAM policy for the specified object.
8786func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall {
8787 c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
8788 c.bucket = bucket
8789 c.object = object
8790 return c
8791}
8792
8793// Generation sets the optional parameter "generation": If present,
8794// selects a specific revision of this object (as opposed to the latest
8795// version, the default).
8796func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall {
8797 c.urlParams_.Set("generation", fmt.Sprint(generation))
8798 return c
8799}
8800
8801// UserProject sets the optional parameter "userProject": The project to
8802// be billed for this request. Required for Requester Pays buckets.
8803func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall {
8804 c.urlParams_.Set("userProject", userProject)
8805 return c
8806}
8807
8808// Fields allows partial responses to be retrieved. See
8809// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
8810// for more information.
8811func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall {
8812 c.urlParams_.Set("fields", googleapi.CombineFields(s))
8813 return c
8814}
8815
8816// IfNoneMatch sets the optional parameter which makes the operation
8817// fail if the object's ETag matches the given value. This is useful for
8818// getting updates only after the object has changed since the last
8819// request. Use googleapi.IsNotModified to check whether the response
8820// error from Do is the result of In-None-Match.
8821func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall {
8822 c.ifNoneMatch_ = entityTag
8823 return c
8824}
8825
8826// Context sets the context to be used in this call's Do method. Any
8827// pending HTTP request will be aborted if the provided context is
8828// canceled.
8829func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall {
8830 c.ctx_ = ctx
8831 return c
8832}
8833
8834// Header returns an http.Header that can be modified by the caller to
8835// add HTTP headers to the request.
8836func (c *ObjectsGetIamPolicyCall) Header() http.Header {
8837 if c.header_ == nil {
8838 c.header_ = make(http.Header)
8839 }
8840 return c.header_
8841}
8842
8843func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
8844 reqHeaders := make(http.Header)
8845 for k, v := range c.header_ {
8846 reqHeaders[k] = v
8847 }
8848 reqHeaders.Set("User-Agent", c.s.userAgent())
8849 if c.ifNoneMatch_ != "" {
8850 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
8851 }
8852 var body io.Reader = nil
8853 c.urlParams_.Set("alt", alt)
8854 c.urlParams_.Set("prettyPrint", "false")
8855 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam")
8856 urls += "?" + c.urlParams_.Encode()
8857 req, err := http.NewRequest("GET", urls, body)
8858 if err != nil {
8859 return nil, err
8860 }
8861 req.Header = reqHeaders
8862 googleapi.Expand(req.URL, map[string]string{
8863 "bucket": c.bucket,
8864 "object": c.object,
8865 })
8866 return gensupport.SendRequest(c.ctx_, c.s.client, req)
8867}
8868
8869// Do executes the "storage.objects.getIamPolicy" call.
8870// Exactly one of *Policy or error will be non-nil. Any non-2xx status
8871// code is an error. Response headers are in either
8872// *Policy.ServerResponse.Header or (if a response was returned at all)
8873// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
8874// check whether the returned error was because http.StatusNotModified
8875// was returned.
8876func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
8877 gensupport.SetOptions(c.urlParams_, opts...)
8878 res, err := c.doRequest("json")
8879 if res != nil && res.StatusCode == http.StatusNotModified {
8880 if res.Body != nil {
8881 res.Body.Close()
8882 }
8883 return nil, &googleapi.Error{
8884 Code: res.StatusCode,
8885 Header: res.Header,
8886 }
8887 }
8888 if err != nil {
8889 return nil, err
8890 }
8891 defer googleapi.CloseBody(res)
8892 if err := googleapi.CheckResponse(res); err != nil {
8893 return nil, err
8894 }
8895 ret := &Policy{
8896 ServerResponse: googleapi.ServerResponse{
8897 Header: res.Header,
8898 HTTPStatusCode: res.StatusCode,
8899 },
8900 }
8901 target := &ret
8902 if err := gensupport.DecodeResponse(target, res); err != nil {
8903 return nil, err
8904 }
8905 return ret, nil
8906 // {
8907 // "description": "Returns an IAM policy for the specified object.",
8908 // "httpMethod": "GET",
8909 // "id": "storage.objects.getIamPolicy",
8910 // "parameterOrder": [
8911 // "bucket",
8912 // "object"
8913 // ],
8914 // "parameters": {
8915 // "bucket": {
8916 // "description": "Name of the bucket in which the object resides.",
8917 // "location": "path",
8918 // "required": true,
8919 // "type": "string"
8920 // },
8921 // "generation": {
8922 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
8923 // "format": "int64",
8924 // "location": "query",
8925 // "type": "string"
8926 // },
8927 // "object": {
8928 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
8929 // "location": "path",
8930 // "required": true,
8931 // "type": "string"
8932 // },
8933 // "userProject": {
8934 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
8935 // "location": "query",
8936 // "type": "string"
8937 // }
8938 // },
8939 // "path": "b/{bucket}/o/{object}/iam",
8940 // "response": {
8941 // "$ref": "Policy"
8942 // },
8943 // "scopes": [
8944 // "https://www.googleapis.com/auth/cloud-platform",
8945 // "https://www.googleapis.com/auth/cloud-platform.read-only",
8946 // "https://www.googleapis.com/auth/devstorage.full_control",
8947 // "https://www.googleapis.com/auth/devstorage.read_only",
8948 // "https://www.googleapis.com/auth/devstorage.read_write"
8949 // ]
8950 // }
8951
8952}
8953
8954// method id "storage.objects.insert":
8955
8956type ObjectsInsertCall struct {
8957 s *Service
8958 bucket string
8959 object *Object
8960 urlParams_ gensupport.URLParams
8961 mediaInfo_ *gensupport.MediaInfo
8962 ctx_ context.Context
8963 header_ http.Header
8964}
8965
8966// Insert: Stores a new object and metadata.
8967func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall {
8968 c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
8969 c.bucket = bucket
8970 c.object = object
8971 return c
8972}
8973
8974// ContentEncoding sets the optional parameter "contentEncoding": If
8975// set, sets the contentEncoding property of the final object to this
8976// value. Setting this parameter is equivalent to setting the
8977// contentEncoding metadata property. This can be useful when uploading
8978// an object with uploadType=media to indicate the encoding of the
8979// content being uploaded.
8980func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall {
8981 c.urlParams_.Set("contentEncoding", contentEncoding)
8982 return c
8983}
8984
8985// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
8986// Makes the operation conditional on whether the object's current
8987// generation matches the given value. Setting to 0 makes the operation
8988// succeed only if there are no live versions of the object.
8989func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall {
8990 c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
8991 return c
8992}
8993
8994// IfGenerationNotMatch sets the optional parameter
8995// "ifGenerationNotMatch": Makes the operation conditional on whether
8996// the object's current generation does not match the given value. If no
8997// live object exists, the precondition fails. Setting to 0 makes the
8998// operation succeed only if there is a live version of the object.
8999func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall {
9000 c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
9001 return c
9002}
9003
9004// IfMetagenerationMatch sets the optional parameter
9005// "ifMetagenerationMatch": Makes the operation conditional on whether
9006// the object's current metageneration matches the given value.
9007func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall {
9008 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
9009 return c
9010}
9011
9012// IfMetagenerationNotMatch sets the optional parameter
9013// "ifMetagenerationNotMatch": Makes the operation conditional on
9014// whether the object's current metageneration does not match the given
9015// value.
9016func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall {
9017 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
9018 return c
9019}
9020
9021// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of
9022// the Cloud KMS key, of the form
9023// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key,
9024// that will be used to encrypt the object. Overrides the object
9025// metadata's kms_key_name value, if any.
9026func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall {
9027 c.urlParams_.Set("kmsKeyName", kmsKeyName)
9028 return c
9029}
9030
9031// Name sets the optional parameter "name": Name of the object. Required
9032// when the object metadata is not otherwise provided. Overrides the
9033// object metadata's name value, if any. For information about how to
9034// URL encode object names to be path safe, see Encoding URI Path Parts.
9035func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall {
9036 c.urlParams_.Set("name", name)
9037 return c
9038}
9039
9040// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
9041// predefined set of access controls to this object.
9042//
9043// Possible values:
9044// "authenticatedRead" - Object owner gets OWNER access, and
9045// allAuthenticatedUsers get READER access.
9046// "bucketOwnerFullControl" - Object owner gets OWNER access, and
9047// project team owners get OWNER access.
9048// "bucketOwnerRead" - Object owner gets OWNER access, and project
9049// team owners get READER access.
9050// "private" - Object owner gets OWNER access.
9051// "projectPrivate" - Object owner gets OWNER access, and project team
9052// members get access according to their roles.
9053// "publicRead" - Object owner gets OWNER access, and allUsers get
9054// READER access.
9055func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall {
9056 c.urlParams_.Set("predefinedAcl", predefinedAcl)
9057 return c
9058}
9059
9060// Projection sets the optional parameter "projection": Set of
9061// properties to return. Defaults to noAcl, unless the object resource
9062// specifies the acl property, when it defaults to full.
9063//
9064// Possible values:
9065// "full" - Include all properties.
9066// "noAcl" - Omit the owner, acl property.
9067func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall {
9068 c.urlParams_.Set("projection", projection)
9069 return c
9070}
9071
9072// UserProject sets the optional parameter "userProject": The project to
9073// be billed for this request. Required for Requester Pays buckets.
9074func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall {
9075 c.urlParams_.Set("userProject", userProject)
9076 return c
9077}
9078
9079// Media specifies the media to upload in one or more chunks. The chunk
9080// size may be controlled by supplying a MediaOption generated by
9081// googleapi.ChunkSize. The chunk size defaults to
9082// googleapi.DefaultUploadChunkSize.The Content-Type header used in the
9083// upload request will be determined by sniffing the contents of r,
9084// unless a MediaOption generated by googleapi.ContentType is
9085// supplied.
9086// At most one of Media and ResumableMedia may be set.
9087func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall {
9088 if ct := c.object.ContentType; ct != "" {
9089 options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...)
9090 }
9091 c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options)
9092 return c
9093}
9094
9095// ResumableMedia specifies the media to upload in chunks and can be
9096// canceled with ctx.
9097//
9098// Deprecated: use Media instead.
9099//
9100// At most one of Media and ResumableMedia may be set. mediaType
9101// identifies the MIME media type of the upload, such as "image/png". If
9102// mediaType is "", it will be auto-detected. The provided ctx will
9103// supersede any context previously provided to the Context method.
9104func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall {
9105 c.ctx_ = ctx
9106 c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType)
9107 return c
9108}
9109
9110// ProgressUpdater provides a callback function that will be called
9111// after every chunk. It should be a low-latency function in order to
9112// not slow down the upload operation. This should only be called when
9113// using ResumableMedia (as opposed to Media).
9114func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall {
9115 c.mediaInfo_.SetProgressUpdater(pu)
9116 return c
9117}
9118
9119// Fields allows partial responses to be retrieved. See
9120// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
9121// for more information.
9122func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall {
9123 c.urlParams_.Set("fields", googleapi.CombineFields(s))
9124 return c
9125}
9126
9127// Context sets the context to be used in this call's Do method. Any
9128// pending HTTP request will be aborted if the provided context is
9129// canceled.
9130// This context will supersede any context previously provided to the
9131// ResumableMedia method.
9132func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall {
9133 c.ctx_ = ctx
9134 return c
9135}
9136
9137// Header returns an http.Header that can be modified by the caller to
9138// add HTTP headers to the request.
9139func (c *ObjectsInsertCall) Header() http.Header {
9140 if c.header_ == nil {
9141 c.header_ = make(http.Header)
9142 }
9143 return c.header_
9144}
9145
9146func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
9147 reqHeaders := make(http.Header)
9148 for k, v := range c.header_ {
9149 reqHeaders[k] = v
9150 }
9151 reqHeaders.Set("User-Agent", c.s.userAgent())
9152 var body io.Reader = nil
9153 body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
9154 if err != nil {
9155 return nil, err
9156 }
9157 reqHeaders.Set("Content-Type", "application/json")
9158 c.urlParams_.Set("alt", alt)
9159 c.urlParams_.Set("prettyPrint", "false")
9160 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
9161 if c.mediaInfo_ != nil {
9162 urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1)
9163 c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType())
9164 }
9165 if body == nil {
9166 body = new(bytes.Buffer)
9167 reqHeaders.Set("Content-Type", "application/json")
9168 }
9169 body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body)
9170 defer cleanup()
9171 urls += "?" + c.urlParams_.Encode()
9172 req, err := http.NewRequest("POST", urls, body)
9173 if err != nil {
9174 return nil, err
9175 }
9176 req.Header = reqHeaders
9177 gensupport.SetGetBody(req, getBody)
9178 googleapi.Expand(req.URL, map[string]string{
9179 "bucket": c.bucket,
9180 })
9181 return gensupport.SendRequest(c.ctx_, c.s.client, req)
9182}
9183
9184// Do executes the "storage.objects.insert" call.
9185// Exactly one of *Object or error will be non-nil. Any non-2xx status
9186// code is an error. Response headers are in either
9187// *Object.ServerResponse.Header or (if a response was returned at all)
9188// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
9189// check whether the returned error was because http.StatusNotModified
9190// was returned.
9191func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) {
9192 gensupport.SetOptions(c.urlParams_, opts...)
9193 res, err := c.doRequest("json")
9194 if res != nil && res.StatusCode == http.StatusNotModified {
9195 if res.Body != nil {
9196 res.Body.Close()
9197 }
9198 return nil, &googleapi.Error{
9199 Code: res.StatusCode,
9200 Header: res.Header,
9201 }
9202 }
9203 if err != nil {
9204 return nil, err
9205 }
9206 defer googleapi.CloseBody(res)
9207 if err := googleapi.CheckResponse(res); err != nil {
9208 return nil, err
9209 }
9210 rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location"))
9211 if rx != nil {
9212 rx.Client = c.s.client
9213 rx.UserAgent = c.s.userAgent()
9214 ctx := c.ctx_
9215 if ctx == nil {
9216 ctx = context.TODO()
9217 }
9218 res, err = rx.Upload(ctx)
9219 if err != nil {
9220 return nil, err
9221 }
9222 defer res.Body.Close()
9223 if err := googleapi.CheckResponse(res); err != nil {
9224 return nil, err
9225 }
9226 }
9227 ret := &Object{
9228 ServerResponse: googleapi.ServerResponse{
9229 Header: res.Header,
9230 HTTPStatusCode: res.StatusCode,
9231 },
9232 }
9233 target := &ret
9234 if err := gensupport.DecodeResponse(target, res); err != nil {
9235 return nil, err
9236 }
9237 return ret, nil
9238 // {
9239 // "description": "Stores a new object and metadata.",
9240 // "httpMethod": "POST",
9241 // "id": "storage.objects.insert",
9242 // "mediaUpload": {
9243 // "accept": [
9244 // "*/*"
9245 // ],
9246 // "protocols": {
9247 // "resumable": {
9248 // "multipart": true,
9249 // "path": "/resumable/upload/storage/v1/b/{bucket}/o"
9250 // },
9251 // "simple": {
9252 // "multipart": true,
9253 // "path": "/upload/storage/v1/b/{bucket}/o"
9254 // }
9255 // }
9256 // },
9257 // "parameterOrder": [
9258 // "bucket"
9259 // ],
9260 // "parameters": {
9261 // "bucket": {
9262 // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
9263 // "location": "path",
9264 // "required": true,
9265 // "type": "string"
9266 // },
9267 // "contentEncoding": {
9268 // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.",
9269 // "location": "query",
9270 // "type": "string"
9271 // },
9272 // "ifGenerationMatch": {
9273 // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
9274 // "format": "int64",
9275 // "location": "query",
9276 // "type": "string"
9277 // },
9278 // "ifGenerationNotMatch": {
9279 // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
9280 // "format": "int64",
9281 // "location": "query",
9282 // "type": "string"
9283 // },
9284 // "ifMetagenerationMatch": {
9285 // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
9286 // "format": "int64",
9287 // "location": "query",
9288 // "type": "string"
9289 // },
9290 // "ifMetagenerationNotMatch": {
9291 // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
9292 // "format": "int64",
9293 // "location": "query",
9294 // "type": "string"
9295 // },
9296 // "kmsKeyName": {
9297 // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
9298 // "location": "query",
9299 // "type": "string"
9300 // },
9301 // "name": {
9302 // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
9303 // "location": "query",
9304 // "type": "string"
9305 // },
9306 // "predefinedAcl": {
9307 // "description": "Apply a predefined set of access controls to this object.",
9308 // "enum": [
9309 // "authenticatedRead",
9310 // "bucketOwnerFullControl",
9311 // "bucketOwnerRead",
9312 // "private",
9313 // "projectPrivate",
9314 // "publicRead"
9315 // ],
9316 // "enumDescriptions": [
9317 // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
9318 // "Object owner gets OWNER access, and project team owners get OWNER access.",
9319 // "Object owner gets OWNER access, and project team owners get READER access.",
9320 // "Object owner gets OWNER access.",
9321 // "Object owner gets OWNER access, and project team members get access according to their roles.",
9322 // "Object owner gets OWNER access, and allUsers get READER access."
9323 // ],
9324 // "location": "query",
9325 // "type": "string"
9326 // },
9327 // "projection": {
9328 // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
9329 // "enum": [
9330 // "full",
9331 // "noAcl"
9332 // ],
9333 // "enumDescriptions": [
9334 // "Include all properties.",
9335 // "Omit the owner, acl property."
9336 // ],
9337 // "location": "query",
9338 // "type": "string"
9339 // },
9340 // "userProject": {
9341 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
9342 // "location": "query",
9343 // "type": "string"
9344 // }
9345 // },
9346 // "path": "b/{bucket}/o",
9347 // "request": {
9348 // "$ref": "Object"
9349 // },
9350 // "response": {
9351 // "$ref": "Object"
9352 // },
9353 // "scopes": [
9354 // "https://www.googleapis.com/auth/cloud-platform",
9355 // "https://www.googleapis.com/auth/devstorage.full_control",
9356 // "https://www.googleapis.com/auth/devstorage.read_write"
9357 // ],
9358 // "supportsMediaUpload": true
9359 // }
9360
9361}
9362
9363// method id "storage.objects.list":
9364
9365type ObjectsListCall struct {
9366 s *Service
9367 bucket string
9368 urlParams_ gensupport.URLParams
9369 ifNoneMatch_ string
9370 ctx_ context.Context
9371 header_ http.Header
9372}
9373
9374// List: Retrieves a list of objects matching the criteria.
9375func (r *ObjectsService) List(bucket string) *ObjectsListCall {
9376 c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
9377 c.bucket = bucket
9378 return c
9379}
9380
9381// Delimiter sets the optional parameter "delimiter": Returns results in
9382// a directory-like mode. items will contain only objects whose names,
9383// aside from the prefix, do not contain delimiter. Objects whose names,
9384// aside from the prefix, contain delimiter will have their name,
9385// truncated after the delimiter, returned in prefixes. Duplicate
9386// prefixes are omitted.
9387func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall {
9388 c.urlParams_.Set("delimiter", delimiter)
9389 return c
9390}
9391
9392// IncludeTrailingDelimiter sets the optional parameter
9393// "includeTrailingDelimiter": If true, objects that end in exactly one
9394// instance of delimiter will have their metadata included in items in
9395// addition to prefixes.
9396func (c *ObjectsListCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsListCall {
9397 c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter))
9398 return c
9399}
9400
9401// MaxResults sets the optional parameter "maxResults": Maximum number
9402// of items plus prefixes to return in a single page of responses. As
9403// duplicate prefixes are omitted, fewer total results may be returned
9404// than requested. The service will use this parameter or 1,000 items,
9405// whichever is smaller.
9406func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall {
9407 c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
9408 return c
9409}
9410
9411// PageToken sets the optional parameter "pageToken": A
9412// previously-returned page token representing part of the larger set of
9413// results to view.
9414func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall {
9415 c.urlParams_.Set("pageToken", pageToken)
9416 return c
9417}
9418
9419// Prefix sets the optional parameter "prefix": Filter results to
9420// objects whose names begin with this prefix.
9421func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall {
9422 c.urlParams_.Set("prefix", prefix)
9423 return c
9424}
9425
9426// Projection sets the optional parameter "projection": Set of
9427// properties to return. Defaults to noAcl.
9428//
9429// Possible values:
9430// "full" - Include all properties.
9431// "noAcl" - Omit the owner, acl property.
9432func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall {
9433 c.urlParams_.Set("projection", projection)
9434 return c
9435}
9436
9437// UserProject sets the optional parameter "userProject": The project to
9438// be billed for this request. Required for Requester Pays buckets.
9439func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall {
9440 c.urlParams_.Set("userProject", userProject)
9441 return c
9442}
9443
9444// Versions sets the optional parameter "versions": If true, lists all
9445// versions of an object as distinct results. The default is false. For
9446// more information, see Object Versioning.
9447func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall {
9448 c.urlParams_.Set("versions", fmt.Sprint(versions))
9449 return c
9450}
9451
9452// Fields allows partial responses to be retrieved. See
9453// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
9454// for more information.
9455func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall {
9456 c.urlParams_.Set("fields", googleapi.CombineFields(s))
9457 return c
9458}
9459
9460// IfNoneMatch sets the optional parameter which makes the operation
9461// fail if the object's ETag matches the given value. This is useful for
9462// getting updates only after the object has changed since the last
9463// request. Use googleapi.IsNotModified to check whether the response
9464// error from Do is the result of In-None-Match.
9465func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall {
9466 c.ifNoneMatch_ = entityTag
9467 return c
9468}
9469
9470// Context sets the context to be used in this call's Do method. Any
9471// pending HTTP request will be aborted if the provided context is
9472// canceled.
9473func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall {
9474 c.ctx_ = ctx
9475 return c
9476}
9477
9478// Header returns an http.Header that can be modified by the caller to
9479// add HTTP headers to the request.
9480func (c *ObjectsListCall) Header() http.Header {
9481 if c.header_ == nil {
9482 c.header_ = make(http.Header)
9483 }
9484 return c.header_
9485}
9486
9487func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
9488 reqHeaders := make(http.Header)
9489 for k, v := range c.header_ {
9490 reqHeaders[k] = v
9491 }
9492 reqHeaders.Set("User-Agent", c.s.userAgent())
9493 if c.ifNoneMatch_ != "" {
9494 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
9495 }
9496 var body io.Reader = nil
9497 c.urlParams_.Set("alt", alt)
9498 c.urlParams_.Set("prettyPrint", "false")
9499 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
9500 urls += "?" + c.urlParams_.Encode()
9501 req, err := http.NewRequest("GET", urls, body)
9502 if err != nil {
9503 return nil, err
9504 }
9505 req.Header = reqHeaders
9506 googleapi.Expand(req.URL, map[string]string{
9507 "bucket": c.bucket,
9508 })
9509 return gensupport.SendRequest(c.ctx_, c.s.client, req)
9510}
9511
9512// Do executes the "storage.objects.list" call.
9513// Exactly one of *Objects or error will be non-nil. Any non-2xx status
9514// code is an error. Response headers are in either
9515// *Objects.ServerResponse.Header or (if a response was returned at all)
9516// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
9517// check whether the returned error was because http.StatusNotModified
9518// was returned.
9519func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) {
9520 gensupport.SetOptions(c.urlParams_, opts...)
9521 res, err := c.doRequest("json")
9522 if res != nil && res.StatusCode == http.StatusNotModified {
9523 if res.Body != nil {
9524 res.Body.Close()
9525 }
9526 return nil, &googleapi.Error{
9527 Code: res.StatusCode,
9528 Header: res.Header,
9529 }
9530 }
9531 if err != nil {
9532 return nil, err
9533 }
9534 defer googleapi.CloseBody(res)
9535 if err := googleapi.CheckResponse(res); err != nil {
9536 return nil, err
9537 }
9538 ret := &Objects{
9539 ServerResponse: googleapi.ServerResponse{
9540 Header: res.Header,
9541 HTTPStatusCode: res.StatusCode,
9542 },
9543 }
9544 target := &ret
9545 if err := gensupport.DecodeResponse(target, res); err != nil {
9546 return nil, err
9547 }
9548 return ret, nil
9549 // {
9550 // "description": "Retrieves a list of objects matching the criteria.",
9551 // "httpMethod": "GET",
9552 // "id": "storage.objects.list",
9553 // "parameterOrder": [
9554 // "bucket"
9555 // ],
9556 // "parameters": {
9557 // "bucket": {
9558 // "description": "Name of the bucket in which to look for objects.",
9559 // "location": "path",
9560 // "required": true,
9561 // "type": "string"
9562 // },
9563 // "delimiter": {
9564 // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
9565 // "location": "query",
9566 // "type": "string"
9567 // },
9568 // "includeTrailingDelimiter": {
9569 // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
9570 // "location": "query",
9571 // "type": "boolean"
9572 // },
9573 // "maxResults": {
9574 // "default": "1000",
9575 // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
9576 // "format": "uint32",
9577 // "location": "query",
9578 // "minimum": "0",
9579 // "type": "integer"
9580 // },
9581 // "pageToken": {
9582 // "description": "A previously-returned page token representing part of the larger set of results to view.",
9583 // "location": "query",
9584 // "type": "string"
9585 // },
9586 // "prefix": {
9587 // "description": "Filter results to objects whose names begin with this prefix.",
9588 // "location": "query",
9589 // "type": "string"
9590 // },
9591 // "projection": {
9592 // "description": "Set of properties to return. Defaults to noAcl.",
9593 // "enum": [
9594 // "full",
9595 // "noAcl"
9596 // ],
9597 // "enumDescriptions": [
9598 // "Include all properties.",
9599 // "Omit the owner, acl property."
9600 // ],
9601 // "location": "query",
9602 // "type": "string"
9603 // },
9604 // "userProject": {
9605 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
9606 // "location": "query",
9607 // "type": "string"
9608 // },
9609 // "versions": {
9610 // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
9611 // "location": "query",
9612 // "type": "boolean"
9613 // }
9614 // },
9615 // "path": "b/{bucket}/o",
9616 // "response": {
9617 // "$ref": "Objects"
9618 // },
9619 // "scopes": [
9620 // "https://www.googleapis.com/auth/cloud-platform",
9621 // "https://www.googleapis.com/auth/cloud-platform.read-only",
9622 // "https://www.googleapis.com/auth/devstorage.full_control",
9623 // "https://www.googleapis.com/auth/devstorage.read_only",
9624 // "https://www.googleapis.com/auth/devstorage.read_write"
9625 // ],
9626 // "supportsSubscription": true
9627 // }
9628
9629}
9630
9631// Pages invokes f for each page of results.
9632// A non-nil error returned from f will halt the iteration.
9633// The provided context supersedes any context provided to the Context method.
9634func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error {
9635 c.ctx_ = ctx
9636 defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
9637 for {
9638 x, err := c.Do()
9639 if err != nil {
9640 return err
9641 }
9642 if err := f(x); err != nil {
9643 return err
9644 }
9645 if x.NextPageToken == "" {
9646 return nil
9647 }
9648 c.PageToken(x.NextPageToken)
9649 }
9650}
9651
9652// method id "storage.objects.patch":
9653
9654type ObjectsPatchCall struct {
9655 s *Service
9656 bucket string
9657 object string
9658 object2 *Object
9659 urlParams_ gensupport.URLParams
9660 ctx_ context.Context
9661 header_ http.Header
9662}
9663
9664// Patch: Patches an object's metadata.
9665func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall {
9666 c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
9667 c.bucket = bucket
9668 c.object = object
9669 c.object2 = object2
9670 return c
9671}
9672
9673// Generation sets the optional parameter "generation": If present,
9674// selects a specific revision of this object (as opposed to the latest
9675// version, the default).
9676func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall {
9677 c.urlParams_.Set("generation", fmt.Sprint(generation))
9678 return c
9679}
9680
9681// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
9682// Makes the operation conditional on whether the object's current
9683// generation matches the given value. Setting to 0 makes the operation
9684// succeed only if there are no live versions of the object.
9685func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall {
9686 c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
9687 return c
9688}
9689
9690// IfGenerationNotMatch sets the optional parameter
9691// "ifGenerationNotMatch": Makes the operation conditional on whether
9692// the object's current generation does not match the given value. If no
9693// live object exists, the precondition fails. Setting to 0 makes the
9694// operation succeed only if there is a live version of the object.
9695func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall {
9696 c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
9697 return c
9698}
9699
9700// IfMetagenerationMatch sets the optional parameter
9701// "ifMetagenerationMatch": Makes the operation conditional on whether
9702// the object's current metageneration matches the given value.
9703func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall {
9704 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
9705 return c
9706}
9707
9708// IfMetagenerationNotMatch sets the optional parameter
9709// "ifMetagenerationNotMatch": Makes the operation conditional on
9710// whether the object's current metageneration does not match the given
9711// value.
9712func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall {
9713 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
9714 return c
9715}
9716
9717// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
9718// predefined set of access controls to this object.
9719//
9720// Possible values:
9721// "authenticatedRead" - Object owner gets OWNER access, and
9722// allAuthenticatedUsers get READER access.
9723// "bucketOwnerFullControl" - Object owner gets OWNER access, and
9724// project team owners get OWNER access.
9725// "bucketOwnerRead" - Object owner gets OWNER access, and project
9726// team owners get READER access.
9727// "private" - Object owner gets OWNER access.
9728// "projectPrivate" - Object owner gets OWNER access, and project team
9729// members get access according to their roles.
9730// "publicRead" - Object owner gets OWNER access, and allUsers get
9731// READER access.
9732func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall {
9733 c.urlParams_.Set("predefinedAcl", predefinedAcl)
9734 return c
9735}
9736
9737// Projection sets the optional parameter "projection": Set of
9738// properties to return. Defaults to full.
9739//
9740// Possible values:
9741// "full" - Include all properties.
9742// "noAcl" - Omit the owner, acl property.
9743func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall {
9744 c.urlParams_.Set("projection", projection)
9745 return c
9746}
9747
9748// UserProject sets the optional parameter "userProject": The project to
9749// be billed for this request, for Requester Pays buckets.
9750func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall {
9751 c.urlParams_.Set("userProject", userProject)
9752 return c
9753}
9754
9755// Fields allows partial responses to be retrieved. See
9756// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
9757// for more information.
9758func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall {
9759 c.urlParams_.Set("fields", googleapi.CombineFields(s))
9760 return c
9761}
9762
9763// Context sets the context to be used in this call's Do method. Any
9764// pending HTTP request will be aborted if the provided context is
9765// canceled.
9766func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall {
9767 c.ctx_ = ctx
9768 return c
9769}
9770
9771// Header returns an http.Header that can be modified by the caller to
9772// add HTTP headers to the request.
9773func (c *ObjectsPatchCall) Header() http.Header {
9774 if c.header_ == nil {
9775 c.header_ = make(http.Header)
9776 }
9777 return c.header_
9778}
9779
9780func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
9781 reqHeaders := make(http.Header)
9782 for k, v := range c.header_ {
9783 reqHeaders[k] = v
9784 }
9785 reqHeaders.Set("User-Agent", c.s.userAgent())
9786 var body io.Reader = nil
9787 body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
9788 if err != nil {
9789 return nil, err
9790 }
9791 reqHeaders.Set("Content-Type", "application/json")
9792 c.urlParams_.Set("alt", alt)
9793 c.urlParams_.Set("prettyPrint", "false")
9794 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
9795 urls += "?" + c.urlParams_.Encode()
9796 req, err := http.NewRequest("PATCH", urls, body)
9797 if err != nil {
9798 return nil, err
9799 }
9800 req.Header = reqHeaders
9801 googleapi.Expand(req.URL, map[string]string{
9802 "bucket": c.bucket,
9803 "object": c.object,
9804 })
9805 return gensupport.SendRequest(c.ctx_, c.s.client, req)
9806}
9807
9808// Do executes the "storage.objects.patch" call.
9809// Exactly one of *Object or error will be non-nil. Any non-2xx status
9810// code is an error. Response headers are in either
9811// *Object.ServerResponse.Header or (if a response was returned at all)
9812// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
9813// check whether the returned error was because http.StatusNotModified
9814// was returned.
9815func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) {
9816 gensupport.SetOptions(c.urlParams_, opts...)
9817 res, err := c.doRequest("json")
9818 if res != nil && res.StatusCode == http.StatusNotModified {
9819 if res.Body != nil {
9820 res.Body.Close()
9821 }
9822 return nil, &googleapi.Error{
9823 Code: res.StatusCode,
9824 Header: res.Header,
9825 }
9826 }
9827 if err != nil {
9828 return nil, err
9829 }
9830 defer googleapi.CloseBody(res)
9831 if err := googleapi.CheckResponse(res); err != nil {
9832 return nil, err
9833 }
9834 ret := &Object{
9835 ServerResponse: googleapi.ServerResponse{
9836 Header: res.Header,
9837 HTTPStatusCode: res.StatusCode,
9838 },
9839 }
9840 target := &ret
9841 if err := gensupport.DecodeResponse(target, res); err != nil {
9842 return nil, err
9843 }
9844 return ret, nil
9845 // {
9846 // "description": "Patches an object's metadata.",
9847 // "httpMethod": "PATCH",
9848 // "id": "storage.objects.patch",
9849 // "parameterOrder": [
9850 // "bucket",
9851 // "object"
9852 // ],
9853 // "parameters": {
9854 // "bucket": {
9855 // "description": "Name of the bucket in which the object resides.",
9856 // "location": "path",
9857 // "required": true,
9858 // "type": "string"
9859 // },
9860 // "generation": {
9861 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
9862 // "format": "int64",
9863 // "location": "query",
9864 // "type": "string"
9865 // },
9866 // "ifGenerationMatch": {
9867 // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
9868 // "format": "int64",
9869 // "location": "query",
9870 // "type": "string"
9871 // },
9872 // "ifGenerationNotMatch": {
9873 // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
9874 // "format": "int64",
9875 // "location": "query",
9876 // "type": "string"
9877 // },
9878 // "ifMetagenerationMatch": {
9879 // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
9880 // "format": "int64",
9881 // "location": "query",
9882 // "type": "string"
9883 // },
9884 // "ifMetagenerationNotMatch": {
9885 // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
9886 // "format": "int64",
9887 // "location": "query",
9888 // "type": "string"
9889 // },
9890 // "object": {
9891 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
9892 // "location": "path",
9893 // "required": true,
9894 // "type": "string"
9895 // },
9896 // "predefinedAcl": {
9897 // "description": "Apply a predefined set of access controls to this object.",
9898 // "enum": [
9899 // "authenticatedRead",
9900 // "bucketOwnerFullControl",
9901 // "bucketOwnerRead",
9902 // "private",
9903 // "projectPrivate",
9904 // "publicRead"
9905 // ],
9906 // "enumDescriptions": [
9907 // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
9908 // "Object owner gets OWNER access, and project team owners get OWNER access.",
9909 // "Object owner gets OWNER access, and project team owners get READER access.",
9910 // "Object owner gets OWNER access.",
9911 // "Object owner gets OWNER access, and project team members get access according to their roles.",
9912 // "Object owner gets OWNER access, and allUsers get READER access."
9913 // ],
9914 // "location": "query",
9915 // "type": "string"
9916 // },
9917 // "projection": {
9918 // "description": "Set of properties to return. Defaults to full.",
9919 // "enum": [
9920 // "full",
9921 // "noAcl"
9922 // ],
9923 // "enumDescriptions": [
9924 // "Include all properties.",
9925 // "Omit the owner, acl property."
9926 // ],
9927 // "location": "query",
9928 // "type": "string"
9929 // },
9930 // "userProject": {
9931 // "description": "The project to be billed for this request, for Requester Pays buckets.",
9932 // "location": "query",
9933 // "type": "string"
9934 // }
9935 // },
9936 // "path": "b/{bucket}/o/{object}",
9937 // "request": {
9938 // "$ref": "Object"
9939 // },
9940 // "response": {
9941 // "$ref": "Object"
9942 // },
9943 // "scopes": [
9944 // "https://www.googleapis.com/auth/cloud-platform",
9945 // "https://www.googleapis.com/auth/devstorage.full_control"
9946 // ]
9947 // }
9948
9949}
9950
9951// method id "storage.objects.rewrite":
9952
9953type ObjectsRewriteCall struct {
9954 s *Service
9955 sourceBucket string
9956 sourceObject string
9957 destinationBucket string
9958 destinationObject string
9959 object *Object
9960 urlParams_ gensupport.URLParams
9961 ctx_ context.Context
9962 header_ http.Header
9963}
9964
9965// Rewrite: Rewrites a source object to a destination object. Optionally
9966// overrides metadata.
9967func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall {
9968 c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
9969 c.sourceBucket = sourceBucket
9970 c.sourceObject = sourceObject
9971 c.destinationBucket = destinationBucket
9972 c.destinationObject = destinationObject
9973 c.object = object
9974 return c
9975}
9976
9977// DestinationKmsKeyName sets the optional parameter
9978// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the
9979// form
9980// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key,
9981// that will be used to encrypt the object. Overrides the object
9982// metadata's kms_key_name value, if any.
9983func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall {
9984 c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName)
9985 return c
9986}
9987
9988// DestinationPredefinedAcl sets the optional parameter
9989// "destinationPredefinedAcl": Apply a predefined set of access controls
9990// to the destination object.
9991//
9992// Possible values:
9993// "authenticatedRead" - Object owner gets OWNER access, and
9994// allAuthenticatedUsers get READER access.
9995// "bucketOwnerFullControl" - Object owner gets OWNER access, and
9996// project team owners get OWNER access.
9997// "bucketOwnerRead" - Object owner gets OWNER access, and project
9998// team owners get READER access.
9999// "private" - Object owner gets OWNER access.
10000// "projectPrivate" - Object owner gets OWNER access, and project team
10001// members get access according to their roles.
10002// "publicRead" - Object owner gets OWNER access, and allUsers get
10003// READER access.
10004func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall {
10005 c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
10006 return c
10007}
10008
10009// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
10010// Makes the operation conditional on whether the object's current
10011// generation matches the given value. Setting to 0 makes the operation
10012// succeed only if there are no live versions of the object.
10013func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall {
10014 c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
10015 return c
10016}
10017
10018// IfGenerationNotMatch sets the optional parameter
10019// "ifGenerationNotMatch": Makes the operation conditional on whether
10020// the object's current generation does not match the given value. If no
10021// live object exists, the precondition fails. Setting to 0 makes the
10022// operation succeed only if there is a live version of the object.
10023func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall {
10024 c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
10025 return c
10026}
10027
10028// IfMetagenerationMatch sets the optional parameter
10029// "ifMetagenerationMatch": Makes the operation conditional on whether
10030// the destination object's current metageneration matches the given
10031// value.
10032func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall {
10033 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
10034 return c
10035}
10036
10037// IfMetagenerationNotMatch sets the optional parameter
10038// "ifMetagenerationNotMatch": Makes the operation conditional on
10039// whether the destination object's current metageneration does not
10040// match the given value.
10041func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall {
10042 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
10043 return c
10044}
10045
10046// IfSourceGenerationMatch sets the optional parameter
10047// "ifSourceGenerationMatch": Makes the operation conditional on whether
10048// the source object's current generation matches the given value.
10049func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall {
10050 c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch))
10051 return c
10052}
10053
10054// IfSourceGenerationNotMatch sets the optional parameter
10055// "ifSourceGenerationNotMatch": Makes the operation conditional on
10056// whether the source object's current generation does not match the
10057// given value.
10058func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall {
10059 c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch))
10060 return c
10061}
10062
10063// IfSourceMetagenerationMatch sets the optional parameter
10064// "ifSourceMetagenerationMatch": Makes the operation conditional on
10065// whether the source object's current metageneration matches the given
10066// value.
10067func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall {
10068 c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch))
10069 return c
10070}
10071
10072// IfSourceMetagenerationNotMatch sets the optional parameter
10073// "ifSourceMetagenerationNotMatch": Makes the operation conditional on
10074// whether the source object's current metageneration does not match the
10075// given value.
10076func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall {
10077 c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch))
10078 return c
10079}
10080
10081// MaxBytesRewrittenPerCall sets the optional parameter
10082// "maxBytesRewrittenPerCall": The maximum number of bytes that will be
10083// rewritten per rewrite request. Most callers shouldn't need to specify
10084// this parameter - it is primarily in place to support testing. If
10085// specified the value must be an integral multiple of 1 MiB (1048576).
10086// Also, this only applies to requests where the source and destination
10087// span locations and/or storage classes. Finally, this value must not
10088// change across rewrite calls else you'll get an error that the
10089// rewriteToken is invalid.
10090func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall {
10091 c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall))
10092 return c
10093}
10094
10095// Projection sets the optional parameter "projection": Set of
10096// properties to return. Defaults to noAcl, unless the object resource
10097// specifies the acl property, when it defaults to full.
10098//
10099// Possible values:
10100// "full" - Include all properties.
10101// "noAcl" - Omit the owner, acl property.
10102func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall {
10103 c.urlParams_.Set("projection", projection)
10104 return c
10105}
10106
10107// RewriteToken sets the optional parameter "rewriteToken": Include this
10108// field (from the previous rewrite response) on each rewrite request
10109// after the first one, until the rewrite response 'done' flag is true.
10110// Calls that provide a rewriteToken can omit all other request fields,
10111// but if included those fields must match the values provided in the
10112// first rewrite request.
10113func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall {
10114 c.urlParams_.Set("rewriteToken", rewriteToken)
10115 return c
10116}
10117
10118// SourceGeneration sets the optional parameter "sourceGeneration": If
10119// present, selects a specific revision of the source object (as opposed
10120// to the latest version, the default).
10121func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall {
10122 c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration))
10123 return c
10124}
10125
10126// UserProject sets the optional parameter "userProject": The project to
10127// be billed for this request. Required for Requester Pays buckets.
10128func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall {
10129 c.urlParams_.Set("userProject", userProject)
10130 return c
10131}
10132
10133// Fields allows partial responses to be retrieved. See
10134// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
10135// for more information.
10136func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall {
10137 c.urlParams_.Set("fields", googleapi.CombineFields(s))
10138 return c
10139}
10140
10141// Context sets the context to be used in this call's Do method. Any
10142// pending HTTP request will be aborted if the provided context is
10143// canceled.
10144func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall {
10145 c.ctx_ = ctx
10146 return c
10147}
10148
10149// Header returns an http.Header that can be modified by the caller to
10150// add HTTP headers to the request.
10151func (c *ObjectsRewriteCall) Header() http.Header {
10152 if c.header_ == nil {
10153 c.header_ = make(http.Header)
10154 }
10155 return c.header_
10156}
10157
10158func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) {
10159 reqHeaders := make(http.Header)
10160 for k, v := range c.header_ {
10161 reqHeaders[k] = v
10162 }
10163 reqHeaders.Set("User-Agent", c.s.userAgent())
10164 var body io.Reader = nil
10165 body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
10166 if err != nil {
10167 return nil, err
10168 }
10169 reqHeaders.Set("Content-Type", "application/json")
10170 c.urlParams_.Set("alt", alt)
10171 c.urlParams_.Set("prettyPrint", "false")
10172 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}")
10173 urls += "?" + c.urlParams_.Encode()
10174 req, err := http.NewRequest("POST", urls, body)
10175 if err != nil {
10176 return nil, err
10177 }
10178 req.Header = reqHeaders
10179 googleapi.Expand(req.URL, map[string]string{
10180 "sourceBucket": c.sourceBucket,
10181 "sourceObject": c.sourceObject,
10182 "destinationBucket": c.destinationBucket,
10183 "destinationObject": c.destinationObject,
10184 })
10185 return gensupport.SendRequest(c.ctx_, c.s.client, req)
10186}
10187
10188// Do executes the "storage.objects.rewrite" call.
10189// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx
10190// status code is an error. Response headers are in either
10191// *RewriteResponse.ServerResponse.Header or (if a response was returned
10192// at all) in error.(*googleapi.Error).Header. Use
10193// googleapi.IsNotModified to check whether the returned error was
10194// because http.StatusNotModified was returned.
10195func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) {
10196 gensupport.SetOptions(c.urlParams_, opts...)
10197 res, err := c.doRequest("json")
10198 if res != nil && res.StatusCode == http.StatusNotModified {
10199 if res.Body != nil {
10200 res.Body.Close()
10201 }
10202 return nil, &googleapi.Error{
10203 Code: res.StatusCode,
10204 Header: res.Header,
10205 }
10206 }
10207 if err != nil {
10208 return nil, err
10209 }
10210 defer googleapi.CloseBody(res)
10211 if err := googleapi.CheckResponse(res); err != nil {
10212 return nil, err
10213 }
10214 ret := &RewriteResponse{
10215 ServerResponse: googleapi.ServerResponse{
10216 Header: res.Header,
10217 HTTPStatusCode: res.StatusCode,
10218 },
10219 }
10220 target := &ret
10221 if err := gensupport.DecodeResponse(target, res); err != nil {
10222 return nil, err
10223 }
10224 return ret, nil
10225 // {
10226 // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.",
10227 // "httpMethod": "POST",
10228 // "id": "storage.objects.rewrite",
10229 // "parameterOrder": [
10230 // "sourceBucket",
10231 // "sourceObject",
10232 // "destinationBucket",
10233 // "destinationObject"
10234 // ],
10235 // "parameters": {
10236 // "destinationBucket": {
10237 // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
10238 // "location": "path",
10239 // "required": true,
10240 // "type": "string"
10241 // },
10242 // "destinationKmsKeyName": {
10243 // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.",
10244 // "location": "query",
10245 // "type": "string"
10246 // },
10247 // "destinationObject": {
10248 // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
10249 // "location": "path",
10250 // "required": true,
10251 // "type": "string"
10252 // },
10253 // "destinationPredefinedAcl": {
10254 // "description": "Apply a predefined set of access controls to the destination object.",
10255 // "enum": [
10256 // "authenticatedRead",
10257 // "bucketOwnerFullControl",
10258 // "bucketOwnerRead",
10259 // "private",
10260 // "projectPrivate",
10261 // "publicRead"
10262 // ],
10263 // "enumDescriptions": [
10264 // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
10265 // "Object owner gets OWNER access, and project team owners get OWNER access.",
10266 // "Object owner gets OWNER access, and project team owners get READER access.",
10267 // "Object owner gets OWNER access.",
10268 // "Object owner gets OWNER access, and project team members get access according to their roles.",
10269 // "Object owner gets OWNER access, and allUsers get READER access."
10270 // ],
10271 // "location": "query",
10272 // "type": "string"
10273 // },
10274 // "ifGenerationMatch": {
10275 // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
10276 // "format": "int64",
10277 // "location": "query",
10278 // "type": "string"
10279 // },
10280 // "ifGenerationNotMatch": {
10281 // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
10282 // "format": "int64",
10283 // "location": "query",
10284 // "type": "string"
10285 // },
10286 // "ifMetagenerationMatch": {
10287 // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
10288 // "format": "int64",
10289 // "location": "query",
10290 // "type": "string"
10291 // },
10292 // "ifMetagenerationNotMatch": {
10293 // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
10294 // "format": "int64",
10295 // "location": "query",
10296 // "type": "string"
10297 // },
10298 // "ifSourceGenerationMatch": {
10299 // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.",
10300 // "format": "int64",
10301 // "location": "query",
10302 // "type": "string"
10303 // },
10304 // "ifSourceGenerationNotMatch": {
10305 // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.",
10306 // "format": "int64",
10307 // "location": "query",
10308 // "type": "string"
10309 // },
10310 // "ifSourceMetagenerationMatch": {
10311 // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
10312 // "format": "int64",
10313 // "location": "query",
10314 // "type": "string"
10315 // },
10316 // "ifSourceMetagenerationNotMatch": {
10317 // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
10318 // "format": "int64",
10319 // "location": "query",
10320 // "type": "string"
10321 // },
10322 // "maxBytesRewrittenPerCall": {
10323 // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.",
10324 // "format": "int64",
10325 // "location": "query",
10326 // "type": "string"
10327 // },
10328 // "projection": {
10329 // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
10330 // "enum": [
10331 // "full",
10332 // "noAcl"
10333 // ],
10334 // "enumDescriptions": [
10335 // "Include all properties.",
10336 // "Omit the owner, acl property."
10337 // ],
10338 // "location": "query",
10339 // "type": "string"
10340 // },
10341 // "rewriteToken": {
10342 // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.",
10343 // "location": "query",
10344 // "type": "string"
10345 // },
10346 // "sourceBucket": {
10347 // "description": "Name of the bucket in which to find the source object.",
10348 // "location": "path",
10349 // "required": true,
10350 // "type": "string"
10351 // },
10352 // "sourceGeneration": {
10353 // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
10354 // "format": "int64",
10355 // "location": "query",
10356 // "type": "string"
10357 // },
10358 // "sourceObject": {
10359 // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
10360 // "location": "path",
10361 // "required": true,
10362 // "type": "string"
10363 // },
10364 // "userProject": {
10365 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
10366 // "location": "query",
10367 // "type": "string"
10368 // }
10369 // },
10370 // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}",
10371 // "request": {
10372 // "$ref": "Object"
10373 // },
10374 // "response": {
10375 // "$ref": "RewriteResponse"
10376 // },
10377 // "scopes": [
10378 // "https://www.googleapis.com/auth/cloud-platform",
10379 // "https://www.googleapis.com/auth/devstorage.full_control",
10380 // "https://www.googleapis.com/auth/devstorage.read_write"
10381 // ]
10382 // }
10383
10384}
10385
10386// method id "storage.objects.setIamPolicy":
10387
10388type ObjectsSetIamPolicyCall struct {
10389 s *Service
10390 bucket string
10391 object string
10392 policy *Policy
10393 urlParams_ gensupport.URLParams
10394 ctx_ context.Context
10395 header_ http.Header
10396}
10397
10398// SetIamPolicy: Updates an IAM policy for the specified object.
10399func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall {
10400 c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
10401 c.bucket = bucket
10402 c.object = object
10403 c.policy = policy
10404 return c
10405}
10406
10407// Generation sets the optional parameter "generation": If present,
10408// selects a specific revision of this object (as opposed to the latest
10409// version, the default).
10410func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall {
10411 c.urlParams_.Set("generation", fmt.Sprint(generation))
10412 return c
10413}
10414
10415// UserProject sets the optional parameter "userProject": The project to
10416// be billed for this request. Required for Requester Pays buckets.
10417func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall {
10418 c.urlParams_.Set("userProject", userProject)
10419 return c
10420}
10421
10422// Fields allows partial responses to be retrieved. See
10423// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
10424// for more information.
10425func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall {
10426 c.urlParams_.Set("fields", googleapi.CombineFields(s))
10427 return c
10428}
10429
10430// Context sets the context to be used in this call's Do method. Any
10431// pending HTTP request will be aborted if the provided context is
10432// canceled.
10433func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall {
10434 c.ctx_ = ctx
10435 return c
10436}
10437
10438// Header returns an http.Header that can be modified by the caller to
10439// add HTTP headers to the request.
10440func (c *ObjectsSetIamPolicyCall) Header() http.Header {
10441 if c.header_ == nil {
10442 c.header_ = make(http.Header)
10443 }
10444 return c.header_
10445}
10446
10447func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
10448 reqHeaders := make(http.Header)
10449 for k, v := range c.header_ {
10450 reqHeaders[k] = v
10451 }
10452 reqHeaders.Set("User-Agent", c.s.userAgent())
10453 var body io.Reader = nil
10454 body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy)
10455 if err != nil {
10456 return nil, err
10457 }
10458 reqHeaders.Set("Content-Type", "application/json")
10459 c.urlParams_.Set("alt", alt)
10460 c.urlParams_.Set("prettyPrint", "false")
10461 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam")
10462 urls += "?" + c.urlParams_.Encode()
10463 req, err := http.NewRequest("PUT", urls, body)
10464 if err != nil {
10465 return nil, err
10466 }
10467 req.Header = reqHeaders
10468 googleapi.Expand(req.URL, map[string]string{
10469 "bucket": c.bucket,
10470 "object": c.object,
10471 })
10472 return gensupport.SendRequest(c.ctx_, c.s.client, req)
10473}
10474
10475// Do executes the "storage.objects.setIamPolicy" call.
10476// Exactly one of *Policy or error will be non-nil. Any non-2xx status
10477// code is an error. Response headers are in either
10478// *Policy.ServerResponse.Header or (if a response was returned at all)
10479// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
10480// check whether the returned error was because http.StatusNotModified
10481// was returned.
10482func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
10483 gensupport.SetOptions(c.urlParams_, opts...)
10484 res, err := c.doRequest("json")
10485 if res != nil && res.StatusCode == http.StatusNotModified {
10486 if res.Body != nil {
10487 res.Body.Close()
10488 }
10489 return nil, &googleapi.Error{
10490 Code: res.StatusCode,
10491 Header: res.Header,
10492 }
10493 }
10494 if err != nil {
10495 return nil, err
10496 }
10497 defer googleapi.CloseBody(res)
10498 if err := googleapi.CheckResponse(res); err != nil {
10499 return nil, err
10500 }
10501 ret := &Policy{
10502 ServerResponse: googleapi.ServerResponse{
10503 Header: res.Header,
10504 HTTPStatusCode: res.StatusCode,
10505 },
10506 }
10507 target := &ret
10508 if err := gensupport.DecodeResponse(target, res); err != nil {
10509 return nil, err
10510 }
10511 return ret, nil
10512 // {
10513 // "description": "Updates an IAM policy for the specified object.",
10514 // "httpMethod": "PUT",
10515 // "id": "storage.objects.setIamPolicy",
10516 // "parameterOrder": [
10517 // "bucket",
10518 // "object"
10519 // ],
10520 // "parameters": {
10521 // "bucket": {
10522 // "description": "Name of the bucket in which the object resides.",
10523 // "location": "path",
10524 // "required": true,
10525 // "type": "string"
10526 // },
10527 // "generation": {
10528 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
10529 // "format": "int64",
10530 // "location": "query",
10531 // "type": "string"
10532 // },
10533 // "object": {
10534 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
10535 // "location": "path",
10536 // "required": true,
10537 // "type": "string"
10538 // },
10539 // "userProject": {
10540 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
10541 // "location": "query",
10542 // "type": "string"
10543 // }
10544 // },
10545 // "path": "b/{bucket}/o/{object}/iam",
10546 // "request": {
10547 // "$ref": "Policy"
10548 // },
10549 // "response": {
10550 // "$ref": "Policy"
10551 // },
10552 // "scopes": [
10553 // "https://www.googleapis.com/auth/cloud-platform",
10554 // "https://www.googleapis.com/auth/devstorage.full_control",
10555 // "https://www.googleapis.com/auth/devstorage.read_write"
10556 // ]
10557 // }
10558
10559}
10560
10561// method id "storage.objects.testIamPermissions":
10562
10563type ObjectsTestIamPermissionsCall struct {
10564 s *Service
10565 bucket string
10566 object string
10567 urlParams_ gensupport.URLParams
10568 ifNoneMatch_ string
10569 ctx_ context.Context
10570 header_ http.Header
10571}
10572
10573// TestIamPermissions: Tests a set of permissions on the given object to
10574// see which, if any, are held by the caller.
10575func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall {
10576 c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
10577 c.bucket = bucket
10578 c.object = object
10579 c.urlParams_.SetMulti("permissions", append([]string{}, permissions...))
10580 return c
10581}
10582
10583// Generation sets the optional parameter "generation": If present,
10584// selects a specific revision of this object (as opposed to the latest
10585// version, the default).
10586func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall {
10587 c.urlParams_.Set("generation", fmt.Sprint(generation))
10588 return c
10589}
10590
10591// UserProject sets the optional parameter "userProject": The project to
10592// be billed for this request. Required for Requester Pays buckets.
10593func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall {
10594 c.urlParams_.Set("userProject", userProject)
10595 return c
10596}
10597
10598// Fields allows partial responses to be retrieved. See
10599// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
10600// for more information.
10601func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall {
10602 c.urlParams_.Set("fields", googleapi.CombineFields(s))
10603 return c
10604}
10605
10606// IfNoneMatch sets the optional parameter which makes the operation
10607// fail if the object's ETag matches the given value. This is useful for
10608// getting updates only after the object has changed since the last
10609// request. Use googleapi.IsNotModified to check whether the response
10610// error from Do is the result of In-None-Match.
10611func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall {
10612 c.ifNoneMatch_ = entityTag
10613 return c
10614}
10615
10616// Context sets the context to be used in this call's Do method. Any
10617// pending HTTP request will be aborted if the provided context is
10618// canceled.
10619func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall {
10620 c.ctx_ = ctx
10621 return c
10622}
10623
10624// Header returns an http.Header that can be modified by the caller to
10625// add HTTP headers to the request.
10626func (c *ObjectsTestIamPermissionsCall) Header() http.Header {
10627 if c.header_ == nil {
10628 c.header_ = make(http.Header)
10629 }
10630 return c.header_
10631}
10632
10633func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
10634 reqHeaders := make(http.Header)
10635 for k, v := range c.header_ {
10636 reqHeaders[k] = v
10637 }
10638 reqHeaders.Set("User-Agent", c.s.userAgent())
10639 if c.ifNoneMatch_ != "" {
10640 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
10641 }
10642 var body io.Reader = nil
10643 c.urlParams_.Set("alt", alt)
10644 c.urlParams_.Set("prettyPrint", "false")
10645 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions")
10646 urls += "?" + c.urlParams_.Encode()
10647 req, err := http.NewRequest("GET", urls, body)
10648 if err != nil {
10649 return nil, err
10650 }
10651 req.Header = reqHeaders
10652 googleapi.Expand(req.URL, map[string]string{
10653 "bucket": c.bucket,
10654 "object": c.object,
10655 })
10656 return gensupport.SendRequest(c.ctx_, c.s.client, req)
10657}
10658
10659// Do executes the "storage.objects.testIamPermissions" call.
10660// Exactly one of *TestIamPermissionsResponse or error will be non-nil.
10661// Any non-2xx status code is an error. Response headers are in either
10662// *TestIamPermissionsResponse.ServerResponse.Header or (if a response
10663// was returned at all) in error.(*googleapi.Error).Header. Use
10664// googleapi.IsNotModified to check whether the returned error was
10665// because http.StatusNotModified was returned.
10666func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) {
10667 gensupport.SetOptions(c.urlParams_, opts...)
10668 res, err := c.doRequest("json")
10669 if res != nil && res.StatusCode == http.StatusNotModified {
10670 if res.Body != nil {
10671 res.Body.Close()
10672 }
10673 return nil, &googleapi.Error{
10674 Code: res.StatusCode,
10675 Header: res.Header,
10676 }
10677 }
10678 if err != nil {
10679 return nil, err
10680 }
10681 defer googleapi.CloseBody(res)
10682 if err := googleapi.CheckResponse(res); err != nil {
10683 return nil, err
10684 }
10685 ret := &TestIamPermissionsResponse{
10686 ServerResponse: googleapi.ServerResponse{
10687 Header: res.Header,
10688 HTTPStatusCode: res.StatusCode,
10689 },
10690 }
10691 target := &ret
10692 if err := gensupport.DecodeResponse(target, res); err != nil {
10693 return nil, err
10694 }
10695 return ret, nil
10696 // {
10697 // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.",
10698 // "httpMethod": "GET",
10699 // "id": "storage.objects.testIamPermissions",
10700 // "parameterOrder": [
10701 // "bucket",
10702 // "object",
10703 // "permissions"
10704 // ],
10705 // "parameters": {
10706 // "bucket": {
10707 // "description": "Name of the bucket in which the object resides.",
10708 // "location": "path",
10709 // "required": true,
10710 // "type": "string"
10711 // },
10712 // "generation": {
10713 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
10714 // "format": "int64",
10715 // "location": "query",
10716 // "type": "string"
10717 // },
10718 // "object": {
10719 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
10720 // "location": "path",
10721 // "required": true,
10722 // "type": "string"
10723 // },
10724 // "permissions": {
10725 // "description": "Permissions to test.",
10726 // "location": "query",
10727 // "repeated": true,
10728 // "required": true,
10729 // "type": "string"
10730 // },
10731 // "userProject": {
10732 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
10733 // "location": "query",
10734 // "type": "string"
10735 // }
10736 // },
10737 // "path": "b/{bucket}/o/{object}/iam/testPermissions",
10738 // "response": {
10739 // "$ref": "TestIamPermissionsResponse"
10740 // },
10741 // "scopes": [
10742 // "https://www.googleapis.com/auth/cloud-platform",
10743 // "https://www.googleapis.com/auth/cloud-platform.read-only",
10744 // "https://www.googleapis.com/auth/devstorage.full_control",
10745 // "https://www.googleapis.com/auth/devstorage.read_only",
10746 // "https://www.googleapis.com/auth/devstorage.read_write"
10747 // ]
10748 // }
10749
10750}
10751
10752// method id "storage.objects.update":
10753
10754type ObjectsUpdateCall struct {
10755 s *Service
10756 bucket string
10757 object string
10758 object2 *Object
10759 urlParams_ gensupport.URLParams
10760 ctx_ context.Context
10761 header_ http.Header
10762}
10763
10764// Update: Updates an object's metadata.
10765func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall {
10766 c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
10767 c.bucket = bucket
10768 c.object = object
10769 c.object2 = object2
10770 return c
10771}
10772
10773// Generation sets the optional parameter "generation": If present,
10774// selects a specific revision of this object (as opposed to the latest
10775// version, the default).
10776func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall {
10777 c.urlParams_.Set("generation", fmt.Sprint(generation))
10778 return c
10779}
10780
10781// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
10782// Makes the operation conditional on whether the object's current
10783// generation matches the given value. Setting to 0 makes the operation
10784// succeed only if there are no live versions of the object.
10785func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall {
10786 c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
10787 return c
10788}
10789
10790// IfGenerationNotMatch sets the optional parameter
10791// "ifGenerationNotMatch": Makes the operation conditional on whether
10792// the object's current generation does not match the given value. If no
10793// live object exists, the precondition fails. Setting to 0 makes the
10794// operation succeed only if there is a live version of the object.
10795func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall {
10796 c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
10797 return c
10798}
10799
10800// IfMetagenerationMatch sets the optional parameter
10801// "ifMetagenerationMatch": Makes the operation conditional on whether
10802// the object's current metageneration matches the given value.
10803func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall {
10804 c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
10805 return c
10806}
10807
10808// IfMetagenerationNotMatch sets the optional parameter
10809// "ifMetagenerationNotMatch": Makes the operation conditional on
10810// whether the object's current metageneration does not match the given
10811// value.
10812func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall {
10813 c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
10814 return c
10815}
10816
10817// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
10818// predefined set of access controls to this object.
10819//
10820// Possible values:
10821// "authenticatedRead" - Object owner gets OWNER access, and
10822// allAuthenticatedUsers get READER access.
10823// "bucketOwnerFullControl" - Object owner gets OWNER access, and
10824// project team owners get OWNER access.
10825// "bucketOwnerRead" - Object owner gets OWNER access, and project
10826// team owners get READER access.
10827// "private" - Object owner gets OWNER access.
10828// "projectPrivate" - Object owner gets OWNER access, and project team
10829// members get access according to their roles.
10830// "publicRead" - Object owner gets OWNER access, and allUsers get
10831// READER access.
10832func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall {
10833 c.urlParams_.Set("predefinedAcl", predefinedAcl)
10834 return c
10835}
10836
10837// Projection sets the optional parameter "projection": Set of
10838// properties to return. Defaults to full.
10839//
10840// Possible values:
10841// "full" - Include all properties.
10842// "noAcl" - Omit the owner, acl property.
10843func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {
10844 c.urlParams_.Set("projection", projection)
10845 return c
10846}
10847
10848// UserProject sets the optional parameter "userProject": The project to
10849// be billed for this request. Required for Requester Pays buckets.
10850func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall {
10851 c.urlParams_.Set("userProject", userProject)
10852 return c
10853}
10854
10855// Fields allows partial responses to be retrieved. See
10856// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
10857// for more information.
10858func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall {
10859 c.urlParams_.Set("fields", googleapi.CombineFields(s))
10860 return c
10861}
10862
10863// Context sets the context to be used in this call's Do method. Any
10864// pending HTTP request will be aborted if the provided context is
10865// canceled.
10866func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall {
10867 c.ctx_ = ctx
10868 return c
10869}
10870
10871// Header returns an http.Header that can be modified by the caller to
10872// add HTTP headers to the request.
10873func (c *ObjectsUpdateCall) Header() http.Header {
10874 if c.header_ == nil {
10875 c.header_ = make(http.Header)
10876 }
10877 return c.header_
10878}
10879
10880func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
10881 reqHeaders := make(http.Header)
10882 for k, v := range c.header_ {
10883 reqHeaders[k] = v
10884 }
10885 reqHeaders.Set("User-Agent", c.s.userAgent())
10886 var body io.Reader = nil
10887 body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
10888 if err != nil {
10889 return nil, err
10890 }
10891 reqHeaders.Set("Content-Type", "application/json")
10892 c.urlParams_.Set("alt", alt)
10893 c.urlParams_.Set("prettyPrint", "false")
10894 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
10895 urls += "?" + c.urlParams_.Encode()
10896 req, err := http.NewRequest("PUT", urls, body)
10897 if err != nil {
10898 return nil, err
10899 }
10900 req.Header = reqHeaders
10901 googleapi.Expand(req.URL, map[string]string{
10902 "bucket": c.bucket,
10903 "object": c.object,
10904 })
10905 return gensupport.SendRequest(c.ctx_, c.s.client, req)
10906}
10907
10908// Do executes the "storage.objects.update" call.
10909// Exactly one of *Object or error will be non-nil. Any non-2xx status
10910// code is an error. Response headers are in either
10911// *Object.ServerResponse.Header or (if a response was returned at all)
10912// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
10913// check whether the returned error was because http.StatusNotModified
10914// was returned.
10915func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) {
10916 gensupport.SetOptions(c.urlParams_, opts...)
10917 res, err := c.doRequest("json")
10918 if res != nil && res.StatusCode == http.StatusNotModified {
10919 if res.Body != nil {
10920 res.Body.Close()
10921 }
10922 return nil, &googleapi.Error{
10923 Code: res.StatusCode,
10924 Header: res.Header,
10925 }
10926 }
10927 if err != nil {
10928 return nil, err
10929 }
10930 defer googleapi.CloseBody(res)
10931 if err := googleapi.CheckResponse(res); err != nil {
10932 return nil, err
10933 }
10934 ret := &Object{
10935 ServerResponse: googleapi.ServerResponse{
10936 Header: res.Header,
10937 HTTPStatusCode: res.StatusCode,
10938 },
10939 }
10940 target := &ret
10941 if err := gensupport.DecodeResponse(target, res); err != nil {
10942 return nil, err
10943 }
10944 return ret, nil
10945 // {
10946 // "description": "Updates an object's metadata.",
10947 // "httpMethod": "PUT",
10948 // "id": "storage.objects.update",
10949 // "parameterOrder": [
10950 // "bucket",
10951 // "object"
10952 // ],
10953 // "parameters": {
10954 // "bucket": {
10955 // "description": "Name of the bucket in which the object resides.",
10956 // "location": "path",
10957 // "required": true,
10958 // "type": "string"
10959 // },
10960 // "generation": {
10961 // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
10962 // "format": "int64",
10963 // "location": "query",
10964 // "type": "string"
10965 // },
10966 // "ifGenerationMatch": {
10967 // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.",
10968 // "format": "int64",
10969 // "location": "query",
10970 // "type": "string"
10971 // },
10972 // "ifGenerationNotMatch": {
10973 // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.",
10974 // "format": "int64",
10975 // "location": "query",
10976 // "type": "string"
10977 // },
10978 // "ifMetagenerationMatch": {
10979 // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
10980 // "format": "int64",
10981 // "location": "query",
10982 // "type": "string"
10983 // },
10984 // "ifMetagenerationNotMatch": {
10985 // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
10986 // "format": "int64",
10987 // "location": "query",
10988 // "type": "string"
10989 // },
10990 // "object": {
10991 // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
10992 // "location": "path",
10993 // "required": true,
10994 // "type": "string"
10995 // },
10996 // "predefinedAcl": {
10997 // "description": "Apply a predefined set of access controls to this object.",
10998 // "enum": [
10999 // "authenticatedRead",
11000 // "bucketOwnerFullControl",
11001 // "bucketOwnerRead",
11002 // "private",
11003 // "projectPrivate",
11004 // "publicRead"
11005 // ],
11006 // "enumDescriptions": [
11007 // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
11008 // "Object owner gets OWNER access, and project team owners get OWNER access.",
11009 // "Object owner gets OWNER access, and project team owners get READER access.",
11010 // "Object owner gets OWNER access.",
11011 // "Object owner gets OWNER access, and project team members get access according to their roles.",
11012 // "Object owner gets OWNER access, and allUsers get READER access."
11013 // ],
11014 // "location": "query",
11015 // "type": "string"
11016 // },
11017 // "projection": {
11018 // "description": "Set of properties to return. Defaults to full.",
11019 // "enum": [
11020 // "full",
11021 // "noAcl"
11022 // ],
11023 // "enumDescriptions": [
11024 // "Include all properties.",
11025 // "Omit the owner, acl property."
11026 // ],
11027 // "location": "query",
11028 // "type": "string"
11029 // },
11030 // "userProject": {
11031 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
11032 // "location": "query",
11033 // "type": "string"
11034 // }
11035 // },
11036 // "path": "b/{bucket}/o/{object}",
11037 // "request": {
11038 // "$ref": "Object"
11039 // },
11040 // "response": {
11041 // "$ref": "Object"
11042 // },
11043 // "scopes": [
11044 // "https://www.googleapis.com/auth/cloud-platform",
11045 // "https://www.googleapis.com/auth/devstorage.full_control"
11046 // ]
11047 // }
11048
11049}
11050
11051// method id "storage.objects.watchAll":
11052
11053type ObjectsWatchAllCall struct {
11054 s *Service
11055 bucket string
11056 channel *Channel
11057 urlParams_ gensupport.URLParams
11058 ctx_ context.Context
11059 header_ http.Header
11060}
11061
11062// WatchAll: Watch for changes on all objects in a bucket.
11063func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall {
11064 c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)}
11065 c.bucket = bucket
11066 c.channel = channel
11067 return c
11068}
11069
11070// Delimiter sets the optional parameter "delimiter": Returns results in
11071// a directory-like mode. items will contain only objects whose names,
11072// aside from the prefix, do not contain delimiter. Objects whose names,
11073// aside from the prefix, contain delimiter will have their name,
11074// truncated after the delimiter, returned in prefixes. Duplicate
11075// prefixes are omitted.
11076func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall {
11077 c.urlParams_.Set("delimiter", delimiter)
11078 return c
11079}
11080
11081// IncludeTrailingDelimiter sets the optional parameter
11082// "includeTrailingDelimiter": If true, objects that end in exactly one
11083// instance of delimiter will have their metadata included in items in
11084// addition to prefixes.
11085func (c *ObjectsWatchAllCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsWatchAllCall {
11086 c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter))
11087 return c
11088}
11089
11090// MaxResults sets the optional parameter "maxResults": Maximum number
11091// of items plus prefixes to return in a single page of responses. As
11092// duplicate prefixes are omitted, fewer total results may be returned
11093// than requested. The service will use this parameter or 1,000 items,
11094// whichever is smaller.
11095func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall {
11096 c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
11097 return c
11098}
11099
11100// PageToken sets the optional parameter "pageToken": A
11101// previously-returned page token representing part of the larger set of
11102// results to view.
11103func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall {
11104 c.urlParams_.Set("pageToken", pageToken)
11105 return c
11106}
11107
11108// Prefix sets the optional parameter "prefix": Filter results to
11109// objects whose names begin with this prefix.
11110func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall {
11111 c.urlParams_.Set("prefix", prefix)
11112 return c
11113}
11114
11115// Projection sets the optional parameter "projection": Set of
11116// properties to return. Defaults to noAcl.
11117//
11118// Possible values:
11119// "full" - Include all properties.
11120// "noAcl" - Omit the owner, acl property.
11121func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall {
11122 c.urlParams_.Set("projection", projection)
11123 return c
11124}
11125
11126// UserProject sets the optional parameter "userProject": The project to
11127// be billed for this request. Required for Requester Pays buckets.
11128func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall {
11129 c.urlParams_.Set("userProject", userProject)
11130 return c
11131}
11132
11133// Versions sets the optional parameter "versions": If true, lists all
11134// versions of an object as distinct results. The default is false. For
11135// more information, see Object Versioning.
11136func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall {
11137 c.urlParams_.Set("versions", fmt.Sprint(versions))
11138 return c
11139}
11140
11141// Fields allows partial responses to be retrieved. See
11142// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
11143// for more information.
11144func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall {
11145 c.urlParams_.Set("fields", googleapi.CombineFields(s))
11146 return c
11147}
11148
11149// Context sets the context to be used in this call's Do method. Any
11150// pending HTTP request will be aborted if the provided context is
11151// canceled.
11152func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall {
11153 c.ctx_ = ctx
11154 return c
11155}
11156
11157// Header returns an http.Header that can be modified by the caller to
11158// add HTTP headers to the request.
11159func (c *ObjectsWatchAllCall) Header() http.Header {
11160 if c.header_ == nil {
11161 c.header_ = make(http.Header)
11162 }
11163 return c.header_
11164}
11165
11166func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) {
11167 reqHeaders := make(http.Header)
11168 for k, v := range c.header_ {
11169 reqHeaders[k] = v
11170 }
11171 reqHeaders.Set("User-Agent", c.s.userAgent())
11172 var body io.Reader = nil
11173 body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
11174 if err != nil {
11175 return nil, err
11176 }
11177 reqHeaders.Set("Content-Type", "application/json")
11178 c.urlParams_.Set("alt", alt)
11179 c.urlParams_.Set("prettyPrint", "false")
11180 urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch")
11181 urls += "?" + c.urlParams_.Encode()
11182 req, err := http.NewRequest("POST", urls, body)
11183 if err != nil {
11184 return nil, err
11185 }
11186 req.Header = reqHeaders
11187 googleapi.Expand(req.URL, map[string]string{
11188 "bucket": c.bucket,
11189 })
11190 return gensupport.SendRequest(c.ctx_, c.s.client, req)
11191}
11192
11193// Do executes the "storage.objects.watchAll" call.
11194// Exactly one of *Channel or error will be non-nil. Any non-2xx status
11195// code is an error. Response headers are in either
11196// *Channel.ServerResponse.Header or (if a response was returned at all)
11197// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
11198// check whether the returned error was because http.StatusNotModified
11199// was returned.
11200func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) {
11201 gensupport.SetOptions(c.urlParams_, opts...)
11202 res, err := c.doRequest("json")
11203 if res != nil && res.StatusCode == http.StatusNotModified {
11204 if res.Body != nil {
11205 res.Body.Close()
11206 }
11207 return nil, &googleapi.Error{
11208 Code: res.StatusCode,
11209 Header: res.Header,
11210 }
11211 }
11212 if err != nil {
11213 return nil, err
11214 }
11215 defer googleapi.CloseBody(res)
11216 if err := googleapi.CheckResponse(res); err != nil {
11217 return nil, err
11218 }
11219 ret := &Channel{
11220 ServerResponse: googleapi.ServerResponse{
11221 Header: res.Header,
11222 HTTPStatusCode: res.StatusCode,
11223 },
11224 }
11225 target := &ret
11226 if err := gensupport.DecodeResponse(target, res); err != nil {
11227 return nil, err
11228 }
11229 return ret, nil
11230 // {
11231 // "description": "Watch for changes on all objects in a bucket.",
11232 // "httpMethod": "POST",
11233 // "id": "storage.objects.watchAll",
11234 // "parameterOrder": [
11235 // "bucket"
11236 // ],
11237 // "parameters": {
11238 // "bucket": {
11239 // "description": "Name of the bucket in which to look for objects.",
11240 // "location": "path",
11241 // "required": true,
11242 // "type": "string"
11243 // },
11244 // "delimiter": {
11245 // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
11246 // "location": "query",
11247 // "type": "string"
11248 // },
11249 // "includeTrailingDelimiter": {
11250 // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.",
11251 // "location": "query",
11252 // "type": "boolean"
11253 // },
11254 // "maxResults": {
11255 // "default": "1000",
11256 // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.",
11257 // "format": "uint32",
11258 // "location": "query",
11259 // "minimum": "0",
11260 // "type": "integer"
11261 // },
11262 // "pageToken": {
11263 // "description": "A previously-returned page token representing part of the larger set of results to view.",
11264 // "location": "query",
11265 // "type": "string"
11266 // },
11267 // "prefix": {
11268 // "description": "Filter results to objects whose names begin with this prefix.",
11269 // "location": "query",
11270 // "type": "string"
11271 // },
11272 // "projection": {
11273 // "description": "Set of properties to return. Defaults to noAcl.",
11274 // "enum": [
11275 // "full",
11276 // "noAcl"
11277 // ],
11278 // "enumDescriptions": [
11279 // "Include all properties.",
11280 // "Omit the owner, acl property."
11281 // ],
11282 // "location": "query",
11283 // "type": "string"
11284 // },
11285 // "userProject": {
11286 // "description": "The project to be billed for this request. Required for Requester Pays buckets.",
11287 // "location": "query",
11288 // "type": "string"
11289 // },
11290 // "versions": {
11291 // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
11292 // "location": "query",
11293 // "type": "boolean"
11294 // }
11295 // },
11296 // "path": "b/{bucket}/o/watch",
11297 // "request": {
11298 // "$ref": "Channel",
11299 // "parameterName": "resource"
11300 // },
11301 // "response": {
11302 // "$ref": "Channel"
11303 // },
11304 // "scopes": [
11305 // "https://www.googleapis.com/auth/cloud-platform",
11306 // "https://www.googleapis.com/auth/cloud-platform.read-only",
11307 // "https://www.googleapis.com/auth/devstorage.full_control",
11308 // "https://www.googleapis.com/auth/devstorage.read_only",
11309 // "https://www.googleapis.com/auth/devstorage.read_write"
11310 // ],
11311 // "supportsSubscription": true
11312 // }
11313
11314}
11315
11316// method id "storage.projects.serviceAccount.get":
11317
11318type ProjectsServiceAccountGetCall struct {
11319 s *Service
11320 projectId string
11321 urlParams_ gensupport.URLParams
11322 ifNoneMatch_ string
11323 ctx_ context.Context
11324 header_ http.Header
11325}
11326
11327// Get: Get the email address of this project's Google Cloud Storage
11328// service account.
11329func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall {
11330 c := &ProjectsServiceAccountGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
11331 c.projectId = projectId
11332 return c
11333}
11334
11335// UserProject sets the optional parameter "userProject": The project to
11336// be billed for this request.
11337func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall {
11338 c.urlParams_.Set("userProject", userProject)
11339 return c
11340}
11341
11342// Fields allows partial responses to be retrieved. See
11343// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
11344// for more information.
11345func (c *ProjectsServiceAccountGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountGetCall {
11346 c.urlParams_.Set("fields", googleapi.CombineFields(s))
11347 return c
11348}
11349
11350// IfNoneMatch sets the optional parameter which makes the operation
11351// fail if the object's ETag matches the given value. This is useful for
11352// getting updates only after the object has changed since the last
11353// request. Use googleapi.IsNotModified to check whether the response
11354// error from Do is the result of In-None-Match.
11355func (c *ProjectsServiceAccountGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountGetCall {
11356 c.ifNoneMatch_ = entityTag
11357 return c
11358}
11359
11360// Context sets the context to be used in this call's Do method. Any
11361// pending HTTP request will be aborted if the provided context is
11362// canceled.
11363func (c *ProjectsServiceAccountGetCall) Context(ctx context.Context) *ProjectsServiceAccountGetCall {
11364 c.ctx_ = ctx
11365 return c
11366}
11367
11368// Header returns an http.Header that can be modified by the caller to
11369// add HTTP headers to the request.
11370func (c *ProjectsServiceAccountGetCall) Header() http.Header {
11371 if c.header_ == nil {
11372 c.header_ = make(http.Header)
11373 }
11374 return c.header_
11375}
11376
11377func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) {
11378 reqHeaders := make(http.Header)
11379 for k, v := range c.header_ {
11380 reqHeaders[k] = v
11381 }
11382 reqHeaders.Set("User-Agent", c.s.userAgent())
11383 if c.ifNoneMatch_ != "" {
11384 reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
11385 }
11386 var body io.Reader = nil
11387 c.urlParams_.Set("alt", alt)
11388 c.urlParams_.Set("prettyPrint", "false")
11389 urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount")
11390 urls += "?" + c.urlParams_.Encode()
11391 req, err := http.NewRequest("GET", urls, body)
11392 if err != nil {
11393 return nil, err
11394 }
11395 req.Header = reqHeaders
11396 googleapi.Expand(req.URL, map[string]string{
11397 "projectId": c.projectId,
11398 })
11399 return gensupport.SendRequest(c.ctx_, c.s.client, req)
11400}
11401
11402// Do executes the "storage.projects.serviceAccount.get" call.
11403// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx
11404// status code is an error. Response headers are in either
11405// *ServiceAccount.ServerResponse.Header or (if a response was returned
11406// at all) in error.(*googleapi.Error).Header. Use
11407// googleapi.IsNotModified to check whether the returned error was
11408// because http.StatusNotModified was returned.
11409func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) {
11410 gensupport.SetOptions(c.urlParams_, opts...)
11411 res, err := c.doRequest("json")
11412 if res != nil && res.StatusCode == http.StatusNotModified {
11413 if res.Body != nil {
11414 res.Body.Close()
11415 }
11416 return nil, &googleapi.Error{
11417 Code: res.StatusCode,
11418 Header: res.Header,
11419 }
11420 }
11421 if err != nil {
11422 return nil, err
11423 }
11424 defer googleapi.CloseBody(res)
11425 if err := googleapi.CheckResponse(res); err != nil {
11426 return nil, err
11427 }
11428 ret := &ServiceAccount{
11429 ServerResponse: googleapi.ServerResponse{
11430 Header: res.Header,
11431 HTTPStatusCode: res.StatusCode,
11432 },
11433 }
11434 target := &ret
11435 if err := gensupport.DecodeResponse(target, res); err != nil {
11436 return nil, err
11437 }
11438 return ret, nil
11439 // {
11440 // "description": "Get the email address of this project's Google Cloud Storage service account.",
11441 // "httpMethod": "GET",
11442 // "id": "storage.projects.serviceAccount.get",
11443 // "parameterOrder": [
11444 // "projectId"
11445 // ],
11446 // "parameters": {
11447 // "projectId": {
11448 // "description": "Project ID",
11449 // "location": "path",
11450 // "required": true,
11451 // "type": "string"
11452 // },
11453 // "userProject": {
11454 // "description": "The project to be billed for this request.",
11455 // "location": "query",
11456 // "type": "string"
11457 // }
11458 // },
11459 // "path": "projects/{projectId}/serviceAccount",
11460 // "response": {
11461 // "$ref": "ServiceAccount"
11462 // },
11463 // "scopes": [
11464 // "https://www.googleapis.com/auth/cloud-platform",
11465 // "https://www.googleapis.com/auth/cloud-platform.read-only",
11466 // "https://www.googleapis.com/auth/devstorage.full_control",
11467 // "https://www.googleapis.com/auth/devstorage.read_only",
11468 // "https://www.googleapis.com/auth/devstorage.read_write"
11469 // ]
11470 // }
11471
11472}
diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go
new file mode 100644
index 0000000..a25da67
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/http/dial.go
@@ -0,0 +1,147 @@
1// Copyright 2015 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// Package http supports network connections to HTTP servers.
16// This package is not intended for use by end developers. Use the
17// google.golang.org/api/option package to configure API clients.
18package http
19
20import (
21 "context"
22 "errors"
23 "net/http"
24
25 "go.opencensus.io/plugin/ochttp"
26 "golang.org/x/oauth2"
27 "google.golang.org/api/googleapi/transport"
28 "google.golang.org/api/internal"
29 "google.golang.org/api/option"
30 "google.golang.org/api/transport/http/internal/propagation"
31)
32
33// NewClient returns an HTTP client for use communicating with a Google cloud
34// service, configured with the given ClientOptions. It also returns the endpoint
35// for the service as specified in the options.
36func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) {
37 settings, err := newSettings(opts)
38 if err != nil {
39 return nil, "", err
40 }
41 // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided?
42 if settings.HTTPClient != nil {
43 return settings.HTTPClient, settings.Endpoint, nil
44 }
45 trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings)
46 if err != nil {
47 return nil, "", err
48 }
49 return &http.Client{Transport: trans}, settings.Endpoint, nil
50}
51
52// NewTransport creates an http.RoundTripper for use communicating with a Google
53// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base.
54func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) {
55 settings, err := newSettings(opts)
56 if err != nil {
57 return nil, err
58 }
59 if settings.HTTPClient != nil {
60 return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport")
61 }
62 return newTransport(ctx, base, settings)
63}
64
65func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) {
66 trans := base
67 trans = userAgentTransport{
68 base: trans,
69 userAgent: settings.UserAgent,
70 }
71 trans = addOCTransport(trans)
72 switch {
73 case settings.NoAuth:
74 // Do nothing.
75 case settings.APIKey != "":
76 trans = &transport.APIKey{
77 Transport: trans,
78 Key: settings.APIKey,
79 }
80 default:
81 creds, err := internal.Creds(ctx, settings)
82 if err != nil {
83 return nil, err
84 }
85 trans = &oauth2.Transport{
86 Base: trans,
87 Source: creds.TokenSource,
88 }
89 }
90 return trans, nil
91}
92
93func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) {
94 var o internal.DialSettings
95 for _, opt := range opts {
96 opt.Apply(&o)
97 }
98 if err := o.Validate(); err != nil {
99 return nil, err
100 }
101 if o.GRPCConn != nil {
102 return nil, errors.New("unsupported gRPC connection specified")
103 }
104 return &o, nil
105}
106
107type userAgentTransport struct {
108 userAgent string
109 base http.RoundTripper
110}
111
112func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) {
113 rt := t.base
114 if rt == nil {
115 return nil, errors.New("transport: no Transport specified")
116 }
117 if t.userAgent == "" {
118 return rt.RoundTrip(req)
119 }
120 newReq := *req
121 newReq.Header = make(http.Header)
122 for k, vv := range req.Header {
123 newReq.Header[k] = vv
124 }
125 // TODO(cbro): append to existing User-Agent header?
126 newReq.Header["User-Agent"] = []string{t.userAgent}
127 return rt.RoundTrip(&newReq)
128}
129
130// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
131var appengineUrlfetchHook func(context.Context) http.RoundTripper
132
133// defaultBaseTransport returns the base HTTP transport.
134// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport.
135func defaultBaseTransport(ctx context.Context) http.RoundTripper {
136 if appengineUrlfetchHook != nil {
137 return appengineUrlfetchHook(ctx)
138 }
139 return http.DefaultTransport
140}
141
142func addOCTransport(trans http.RoundTripper) http.RoundTripper {
143 return &ochttp.Transport{
144 Base: trans,
145 Propagation: &propagation.HTTPFormat{},
146 }
147}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/api/transport/http/dial_appengine.go
index 6072fdc..04c8141 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto
+++ b/vendor/google.golang.org/api/transport/http/dial_appengine.go
@@ -1,10 +1,10 @@
1// Copyright 2017 gRPC authors. 1// Copyright 2016 Google LLC
2// 2//
3// Licensed under the Apache License, Version 2.0 (the "License"); 3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License. 4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at 5// You may obtain a copy of the License at
6// 6//
7// http://www.apache.org/licenses/LICENSE-2.0 7// http://www.apache.org/licenses/LICENSE-2.0
8// 8//
9// Unless required by applicable law or agreed to in writing, software 9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, 10// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,23 +12,19 @@
12// See the License for the specific language governing permissions and 12// See the License for the specific language governing permissions and
13// limitations under the License. 13// limitations under the License.
14 14
15syntax = "proto3"; 15// +build appengine
16 16
17package grpc.health.v1; 17package http
18 18
19message HealthCheckRequest { 19import (
20 string service = 1; 20 "context"
21} 21 "net/http"
22 22
23message HealthCheckResponse { 23 "google.golang.org/appengine/urlfetch"
24 enum ServingStatus { 24)
25 UNKNOWN = 0;
26 SERVING = 1;
27 NOT_SERVING = 2;
28 }
29 ServingStatus status = 1;
30}
31 25
32service Health{ 26func init() {
33 rpc Check(HealthCheckRequest) returns (HealthCheckResponse); 27 appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper {
34} 28 return &urlfetch.Transport{Context: ctx}
29 }
30}
diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go
new file mode 100644
index 0000000..24b4f0d
--- /dev/null
+++ b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go
@@ -0,0 +1,96 @@
1// Copyright 2018 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// +build go1.8
16
17// Package propagation implements X-Cloud-Trace-Context header propagation used
18// by Google Cloud products.
19package propagation
20
21import (
22 "encoding/binary"
23 "encoding/hex"
24 "fmt"
25 "net/http"
26 "strconv"
27 "strings"
28
29 "go.opencensus.io/trace"
30 "go.opencensus.io/trace/propagation"
31)
32
33const (
34 httpHeaderMaxSize = 200
35 httpHeader = `X-Cloud-Trace-Context`
36)
37
38var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
39
40// HTTPFormat implements propagation.HTTPFormat to propagate
41// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace.
42type HTTPFormat struct{}
43
44// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests.
45func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
46 h := req.Header.Get(httpHeader)
47 // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
48 // Return if the header is empty or missing, or if the header is unreasonably
49 // large, to avoid making unnecessary copies of a large string.
50 if h == "" || len(h) > httpHeaderMaxSize {
51 return trace.SpanContext{}, false
52 }
53
54 // Parse the trace id field.
55 slash := strings.Index(h, `/`)
56 if slash == -1 {
57 return trace.SpanContext{}, false
58 }
59 tid, h := h[:slash], h[slash+1:]
60
61 buf, err := hex.DecodeString(tid)
62 if err != nil {
63 return trace.SpanContext{}, false
64 }
65 copy(sc.TraceID[:], buf)
66
67 // Parse the span id field.
68 spanstr := h
69 semicolon := strings.Index(h, `;`)
70 if semicolon != -1 {
71 spanstr, h = h[:semicolon], h[semicolon+1:]
72 }
73 sid, err := strconv.ParseUint(spanstr, 10, 64)
74 if err != nil {
75 return trace.SpanContext{}, false
76 }
77 binary.BigEndian.PutUint64(sc.SpanID[:], sid)
78
79 // Parse the options field, options field is optional.
80 if !strings.HasPrefix(h, "o=") {
81 return sc, true
82 }
83 o, err := strconv.ParseUint(h[2:], 10, 64)
84 if err != nil {
85 return trace.SpanContext{}, false
86 }
87 sc.TraceOptions = trace.TraceOptions(o)
88 return sc, true
89}
90
91// SpanContextToRequest modifies the given request to include a Stackdriver Trace header.
92func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
93 sid := binary.BigEndian.Uint64(sc.SpanID[:])
94 header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
95 req.Header.Set(httpHeader, header)
96}
diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml
new file mode 100644
index 0000000..70ffe89
--- /dev/null
+++ b/vendor/google.golang.org/appengine/.travis.yml
@@ -0,0 +1,20 @@
1language: go
2
3go_import_path: google.golang.org/appengine
4
5install:
6 - ./travis_install.sh
7
8script:
9 - ./travis_test.sh
10
11matrix:
12 include:
13 - go: 1.8.x
14 env: GOAPP=true
15 - go: 1.9.x
16 env: GOAPP=true
17 - go: 1.10.x
18 env: GOAPP=false
19 - go: 1.11.x
20 env: GO111MODULE=on
diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md
new file mode 100644
index 0000000..ffc2985
--- /dev/null
+++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md
@@ -0,0 +1,90 @@
1# Contributing
2
31. Sign one of the contributor license agreements below.
41. Get the package:
5
6 `go get -d google.golang.org/appengine`
71. Change into the checked out source:
8
9 `cd $GOPATH/src/google.golang.org/appengine`
101. Fork the repo.
111. Set your fork as a remote:
12
13 `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git`
141. Make changes, commit to your fork.
151. Send a pull request with your changes.
16 The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request.
17
18# Testing
19
20## Running system tests
21
22Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`.
23
24Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`.
25
26Run tests with `goapp test`:
27
28```
29goapp test -v google.golang.org/appengine/...
30```
31
32## Contributor License Agreements
33
34Before we can accept your pull requests you'll need to sign a Contributor
35License Agreement (CLA):
36
37- **If you are an individual writing original source code** and **you own the
38intellectual property**, then you'll need to sign an [individual CLA][indvcla].
39- **If you work for a company that wants to allow you to contribute your work**,
40then you'll need to sign a [corporate CLA][corpcla].
41
42You can sign these electronically (just scroll to the bottom). After that,
43we'll be able to accept your pull requests.
44
45## Contributor Code of Conduct
46
47As contributors and maintainers of this project,
48and in the interest of fostering an open and welcoming community,
49we pledge to respect all people who contribute through reporting issues,
50posting feature requests, updating documentation,
51submitting pull requests or patches, and other activities.
52
53We are committed to making participation in this project
54a harassment-free experience for everyone,
55regardless of level of experience, gender, gender identity and expression,
56sexual orientation, disability, personal appearance,
57body size, race, ethnicity, age, religion, or nationality.
58
59Examples of unacceptable behavior by participants include:
60
61* The use of sexualized language or imagery
62* Personal attacks
63* Trolling or insulting/derogatory comments
64* Public or private harassment
65* Publishing other's private information,
66such as physical or electronic
67addresses, without explicit permission
68* Other unethical or unprofessional conduct.
69
70Project maintainers have the right and responsibility to remove, edit, or reject
71comments, commits, code, wiki edits, issues, and other contributions
72that are not aligned to this Code of Conduct.
73By adopting this Code of Conduct,
74project maintainers commit themselves to fairly and consistently
75applying these principles to every aspect of managing this project.
76Project maintainers who do not follow or enforce the Code of Conduct
77may be permanently removed from the project team.
78
79This code of conduct applies both within project spaces and in public spaces
80when an individual is representing the project or its community.
81
82Instances of abusive, harassing, or otherwise unacceptable behavior
83may be reported by opening an issue
84or contacting one or more of the project maintainers.
85
86This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
87available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
88
89[indvcla]: https://developers.google.com/open-source/cla/individual
90[corpcla]: https://developers.google.com/open-source/cla/corporate
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/google.golang.org/appengine/LICENSE
@@ -0,0 +1,202 @@
1
2 Apache License
3 Version 2.0, January 2004
4 http://www.apache.org/licenses/
5
6 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
8 1. Definitions.
9
10 "License" shall mean the terms and conditions for use, reproduction,
11 and distribution as defined by Sections 1 through 9 of this document.
12
13 "Licensor" shall mean the copyright owner or entity authorized by
14 the copyright owner that is granting the License.
15
16 "Legal Entity" shall mean the union of the acting entity and all
17 other entities that control, are controlled by, or are under common
18 control with that entity. For the purposes of this definition,
19 "control" means (i) the power, direct or indirect, to cause the
20 direction or management of such entity, whether by contract or
21 otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 outstanding shares, or (iii) beneficial ownership of such entity.
23
24 "You" (or "Your") shall mean an individual or Legal Entity
25 exercising permissions granted by this License.
26
27 "Source" form shall mean the preferred form for making modifications,
28 including but not limited to software source code, documentation
29 source, and configuration files.
30
31 "Object" form shall mean any form resulting from mechanical
32 transformation or translation of a Source form, including but
33 not limited to compiled object code, generated documentation,
34 and conversions to other media types.
35
36 "Work" shall mean the work of authorship, whether in Source or
37 Object form, made available under the License, as indicated by a
38 copyright notice that is included in or attached to the work
39 (an example is provided in the Appendix below).
40
41 "Derivative Works" shall mean any work, whether in Source or Object
42 form, that is based on (or derived from) the Work and for which the
43 editorial revisions, annotations, elaborations, or other modifications
44 represent, as a whole, an original work of authorship. For the purposes
45 of this License, Derivative Works shall not include works that remain
46 separable from, or merely link (or bind by name) to the interfaces of,
47 the Work and Derivative Works thereof.
48
49 "Contribution" shall mean any work of authorship, including
50 the original version of the Work and any modifications or additions
51 to that Work or Derivative Works thereof, that is intentionally
52 submitted to Licensor for inclusion in the Work by the copyright owner
53 or by an individual or Legal Entity authorized to submit on behalf of
54 the copyright owner. For the purposes of this definition, "submitted"
55 means any form of electronic, verbal, or written communication sent
56 to the Licensor or its representatives, including but not limited to
57 communication on electronic mailing lists, source code control systems,
58 and issue tracking systems that are managed by, or on behalf of, the
59 Licensor for the purpose of discussing and improving the Work, but
60 excluding communication that is conspicuously marked or otherwise
61 designated in writing by the copyright owner as "Not a Contribution."
62
63 "Contributor" shall mean Licensor and any individual or Legal Entity
64 on behalf of whom a Contribution has been received by Licensor and
65 subsequently incorporated within the Work.
66
67 2. Grant of Copyright License. Subject to the terms and conditions of
68 this License, each Contributor hereby grants to You a perpetual,
69 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 copyright license to reproduce, prepare Derivative Works of,
71 publicly display, publicly perform, sublicense, and distribute the
72 Work and such Derivative Works in Source or Object form.
73
74 3. Grant of Patent License. Subject to the terms and conditions of
75 this License, each Contributor hereby grants to You a perpetual,
76 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 (except as stated in this section) patent license to make, have made,
78 use, offer to sell, sell, import, and otherwise transfer the Work,
79 where such license applies only to those patent claims licensable
80 by such Contributor that are necessarily infringed by their
81 Contribution(s) alone or by combination of their Contribution(s)
82 with the Work to which such Contribution(s) was submitted. If You
83 institute patent litigation against any entity (including a
84 cross-claim or counterclaim in a lawsuit) alleging that the Work
85 or a Contribution incorporated within the Work constitutes direct
86 or contributory patent infringement, then any patent licenses
87 granted to You under this License for that Work shall terminate
88 as of the date such litigation is filed.
89
90 4. Redistribution. You may reproduce and distribute copies of the
91 Work or Derivative Works thereof in any medium, with or without
92 modifications, and in Source or Object form, provided that You
93 meet the following conditions:
94
95 (a) You must give any other recipients of the Work or
96 Derivative Works a copy of this License; and
97
98 (b) You must cause any modified files to carry prominent notices
99 stating that You changed the files; and
100
101 (c) You must retain, in the Source form of any Derivative Works
102 that You distribute, all copyright, patent, trademark, and
103 attribution notices from the Source form of the Work,
104 excluding those notices that do not pertain to any part of
105 the Derivative Works; and
106
107 (d) If the Work includes a "NOTICE" text file as part of its
108 distribution, then any Derivative Works that You distribute must
109 include a readable copy of the attribution notices contained
110 within such NOTICE file, excluding those notices that do not
111 pertain to any part of the Derivative Works, in at least one
112 of the following places: within a NOTICE text file distributed
113 as part of the Derivative Works; within the Source form or
114 documentation, if provided along with the Derivative Works; or,
115 within a display generated by the Derivative Works, if and
116 wherever such third-party notices normally appear. The contents
117 of the NOTICE file are for informational purposes only and
118 do not modify the License. You may add Your own attribution
119 notices within Derivative Works that You distribute, alongside
120 or as an addendum to the NOTICE text from the Work, provided
121 that such additional attribution notices cannot be construed
122 as modifying the License.
123
124 You may add Your own copyright statement to Your modifications and
125 may provide additional or different license terms and conditions
126 for use, reproduction, or distribution of Your modifications, or
127 for any such Derivative Works as a whole, provided Your use,
128 reproduction, and distribution of the Work otherwise complies with
129 the conditions stated in this License.
130
131 5. Submission of Contributions. Unless You explicitly state otherwise,
132 any Contribution intentionally submitted for inclusion in the Work
133 by You to the Licensor shall be under the terms and conditions of
134 this License, without any additional terms or conditions.
135 Notwithstanding the above, nothing herein shall supersede or modify
136 the terms of any separate license agreement you may have executed
137 with Licensor regarding such Contributions.
138
139 6. Trademarks. This License does not grant permission to use the trade
140 names, trademarks, service marks, or product names of the Licensor,
141 except as required for reasonable and customary use in describing the
142 origin of the Work and reproducing the content of the NOTICE file.
143
144 7. Disclaimer of Warranty. Unless required by applicable law or
145 agreed to in writing, Licensor provides the Work (and each
146 Contributor provides its Contributions) on an "AS IS" BASIS,
147 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 implied, including, without limitation, any warranties or conditions
149 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 PARTICULAR PURPOSE. You are solely responsible for determining the
151 appropriateness of using or redistributing the Work and assume any
152 risks associated with Your exercise of permissions under this License.
153
154 8. Limitation of Liability. In no event and under no legal theory,
155 whether in tort (including negligence), contract, or otherwise,
156 unless required by applicable law (such as deliberate and grossly
157 negligent acts) or agreed to in writing, shall any Contributor be
158 liable to You for damages, including any direct, indirect, special,
159 incidental, or consequential damages of any character arising as a
160 result of this License or out of the use or inability to use the
161 Work (including but not limited to damages for loss of goodwill,
162 work stoppage, computer failure or malfunction, or any and all
163 other commercial damages or losses), even if such Contributor
164 has been advised of the possibility of such damages.
165
166 9. Accepting Warranty or Additional Liability. While redistributing
167 the Work or Derivative Works thereof, You may choose to offer,
168 and charge a fee for, acceptance of support, warranty, indemnity,
169 or other liability obligations and/or rights consistent with this
170 License. However, in accepting such obligations, You may act only
171 on Your own behalf and on Your sole responsibility, not on behalf
172 of any other Contributor, and only if You agree to indemnify,
173 defend, and hold each Contributor harmless for any liability
174 incurred by, or claims asserted against, such Contributor by reason
175 of your accepting any such warranty or additional liability.
176
177 END OF TERMS AND CONDITIONS
178
179 APPENDIX: How to apply the Apache License to your work.
180
181 To apply the Apache License to your work, attach the following
182 boilerplate notice, with the fields enclosed by brackets "[]"
183 replaced with your own identifying information. (Don't include
184 the brackets!) The text should be enclosed in the appropriate
185 comment syntax for the file format. We also recommend that a
186 file or class name and description of purpose be included on the
187 same "printed page" as the copyright notice for easier
188 identification within third-party archives.
189
190 Copyright [yyyy] [name of copyright owner]
191
192 Licensed under the Apache License, Version 2.0 (the "License");
193 you may not use this file except in compliance with the License.
194 You may obtain a copy of the License at
195
196 http://www.apache.org/licenses/LICENSE-2.0
197
198 Unless required by applicable law or agreed to in writing, software
199 distributed under the License is distributed on an "AS IS" BASIS,
200 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 See the License for the specific language governing permissions and
202 limitations under the License.
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
new file mode 100644
index 0000000..d86768a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/README.md
@@ -0,0 +1,73 @@
1# Go App Engine packages
2
3[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
4
5This repository supports the Go runtime on *App Engine standard*.
6It provides APIs for interacting with App Engine services.
7Its canonical import path is `google.golang.org/appengine`.
8
9See https://cloud.google.com/appengine/docs/go/
10for more information.
11
12File issue reports and feature requests on the [GitHub's issue
13tracker](https://github.com/golang/appengine/issues).
14
15## Upgrading an App Engine app to the flexible environment
16
17This package does not work on *App Engine flexible*.
18
19There are many differences between the App Engine standard environment and
20the flexible environment.
21
22See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading).
23
24## Directory structure
25
26The top level directory of this repository is the `appengine` package. It
27contains the
28basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
29packages are in subdirectories (e.g. `datastore`).
30
31There is an `internal` subdirectory that contains service protocol buffers,
32plus packages required for connectivity to make API calls. App Engine apps
33should not directly import any package under `internal`.
34
35## Updating from legacy (`import "appengine"`) packages
36
37If you're currently using the bare `appengine` packages
38(that is, not these ones, imported via `google.golang.org/appengine`),
39then you can use the `aefix` tool to help automate an upgrade to these packages.
40
41Run `go get google.golang.org/appengine/cmd/aefix` to install it.
42
43### 1. Update import paths
44
45The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
46You will need to update your code to use import paths starting with that; for instance,
47code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
48
49### 2. Update code using deprecated, removed or modified APIs
50
51Most App Engine services are available with exactly the same API.
52A few APIs were cleaned up, and there are some differences:
53
54* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
55* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
56* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
57* `appengine.Datacenter` now takes a `context.Context` argument.
58* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
59* `delay.Call` now returns an error.
60* `search.FieldLoadSaver` now handles document metadata.
61* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
62 `context.Context` instead.
63* `aetest` no longer declares its own Context type, and uses the standard one instead.
64* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
65 deprecated and unused for a long time.
66* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
67 Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
68* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
69 Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the
70 feature you require is not present in the new
71 [blobstore package](https://google.golang.org/appengine/blobstore).
72* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
73 Use the standard `net` package instead.
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
new file mode 100644
index 0000000..0cca033
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -0,0 +1,137 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// Package appengine provides basic functionality for Google App Engine.
6//
7// For more information on how to write Go apps for Google App Engine, see:
8// https://cloud.google.com/appengine/docs/go/
9package appengine // import "google.golang.org/appengine"
10
11import (
12 "net/http"
13
14 "github.com/golang/protobuf/proto"
15 "golang.org/x/net/context"
16
17 "google.golang.org/appengine/internal"
18)
19
20// The gophers party all night; the rabbits provide the beats.
21
22// Main is the principal entry point for an app running in App Engine.
23//
24// On App Engine Flexible it installs a trivial health checker if one isn't
25// already registered, and starts listening on port 8080 (overridden by the
26// $PORT environment variable).
27//
28// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
29// for details on how to do your own health checking.
30//
31// On App Engine Standard it ensures the server has started and is prepared to
32// receive requests.
33//
34// Main never returns.
35//
36// Main is designed so that the app's main package looks like this:
37//
38// package main
39//
40// import (
41// "google.golang.org/appengine"
42//
43// _ "myapp/package0"
44// _ "myapp/package1"
45// )
46//
47// func main() {
48// appengine.Main()
49// }
50//
51// The "myapp/packageX" packages are expected to register HTTP handlers
52// in their init functions.
53func Main() {
54 internal.Main()
55}
56
57// IsDevAppServer reports whether the App Engine app is running in the
58// development App Server.
59func IsDevAppServer() bool {
60 return internal.IsDevAppServer()
61}
62
63// IsStandard reports whether the App Engine app is running in the standard
64// environment. This includes both the first generation runtimes (<= Go 1.9)
65// and the second generation runtimes (>= Go 1.11).
66func IsStandard() bool {
67 return internal.IsStandard()
68}
69
70// IsFlex reports whether the App Engine app is running in the flexible environment.
71func IsFlex() bool {
72 return internal.IsFlex()
73}
74
75// IsAppEngine reports whether the App Engine app is running on App Engine, in either
76// the standard or flexible environment.
77func IsAppEngine() bool {
78 return internal.IsAppEngine()
79}
80
81// IsSecondGen reports whether the App Engine app is running on the second generation
82// runtimes (>= Go 1.11).
83func IsSecondGen() bool {
84 return internal.IsSecondGen()
85}
86
87// NewContext returns a context for an in-flight HTTP request.
88// This function is cheap.
89func NewContext(req *http.Request) context.Context {
90 return internal.ReqContext(req)
91}
92
93// WithContext returns a copy of the parent context
94// and associates it with an in-flight HTTP request.
95// This function is cheap.
96func WithContext(parent context.Context, req *http.Request) context.Context {
97 return internal.WithContext(parent, req)
98}
99
100// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
101
102// BlobKey is a key for a blobstore blob.
103//
104// Conceptually, this type belongs in the blobstore package, but it lives in
105// the appengine package to avoid a circular dependency: blobstore depends on
106// datastore, and datastore needs to refer to the BlobKey type.
107type BlobKey string
108
109// GeoPoint represents a location as latitude/longitude in degrees.
110type GeoPoint struct {
111 Lat, Lng float64
112}
113
114// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
115func (g GeoPoint) Valid() bool {
116 return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
117}
118
119// APICallFunc defines a function type for handling an API call.
120// See WithCallOverride.
121type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
122
123// WithAPICallFunc returns a copy of the parent context
124// that will cause API calls to invoke f instead of their normal operation.
125//
126// This is intended for advanced users only.
127func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
128 return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
129}
130
131// APICall performs an API call.
132//
133// This is not intended for general use; it is exported for use in conjunction
134// with WithAPICallFunc.
135func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
136 return internal.Call(ctx, service, method, in, out)
137}
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
new file mode 100644
index 0000000..f4b645a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine_vm.go
@@ -0,0 +1,20 @@
1// Copyright 2015 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// +build !appengine
6
7package appengine
8
9import (
10 "golang.org/x/net/context"
11
12 "google.golang.org/appengine/internal"
13)
14
15// BackgroundContext returns a context not associated with a request.
16// This should only be used when not servicing a request.
17// This only works in App Engine "flexible environment".
18func BackgroundContext() context.Context {
19 return internal.BackgroundContext()
20}
diff --git a/vendor/google.golang.org/appengine/datastore/datastore.go b/vendor/google.golang.org/appengine/datastore/datastore.go
new file mode 100644
index 0000000..576bc50
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/datastore.go
@@ -0,0 +1,407 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package datastore
6
7import (
8 "errors"
9 "fmt"
10 "reflect"
11
12 "github.com/golang/protobuf/proto"
13 "golang.org/x/net/context"
14
15 "google.golang.org/appengine"
16 "google.golang.org/appengine/internal"
17 pb "google.golang.org/appengine/internal/datastore"
18)
19
20var (
21 // ErrInvalidEntityType is returned when functions like Get or Next are
22 // passed a dst or src argument of invalid type.
23 ErrInvalidEntityType = errors.New("datastore: invalid entity type")
24 // ErrInvalidKey is returned when an invalid key is presented.
25 ErrInvalidKey = errors.New("datastore: invalid key")
26 // ErrNoSuchEntity is returned when no entity was found for a given key.
27 ErrNoSuchEntity = errors.New("datastore: no such entity")
28)
29
30// ErrFieldMismatch is returned when a field is to be loaded into a different
31// type than the one it was stored from, or when a field is missing or
32// unexported in the destination struct.
33// StructType is the type of the struct pointed to by the destination argument
34// passed to Get or to Iterator.Next.
35type ErrFieldMismatch struct {
36 StructType reflect.Type
37 FieldName string
38 Reason string
39}
40
41func (e *ErrFieldMismatch) Error() string {
42 return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
43 e.FieldName, e.StructType, e.Reason)
44}
45
46// protoToKey converts a Reference proto to a *Key. If the key is invalid,
47// protoToKey will return the invalid key along with ErrInvalidKey.
48func protoToKey(r *pb.Reference) (k *Key, err error) {
49 appID := r.GetApp()
50 namespace := r.GetNameSpace()
51 for _, e := range r.Path.Element {
52 k = &Key{
53 kind: e.GetType(),
54 stringID: e.GetName(),
55 intID: e.GetId(),
56 parent: k,
57 appID: appID,
58 namespace: namespace,
59 }
60 if !k.valid() {
61 return k, ErrInvalidKey
62 }
63 }
64 return
65}
66
67// keyToProto converts a *Key to a Reference proto.
68func keyToProto(defaultAppID string, k *Key) *pb.Reference {
69 appID := k.appID
70 if appID == "" {
71 appID = defaultAppID
72 }
73 n := 0
74 for i := k; i != nil; i = i.parent {
75 n++
76 }
77 e := make([]*pb.Path_Element, n)
78 for i := k; i != nil; i = i.parent {
79 n--
80 e[n] = &pb.Path_Element{
81 Type: &i.kind,
82 }
83 // At most one of {Name,Id} should be set.
84 // Neither will be set for incomplete keys.
85 if i.stringID != "" {
86 e[n].Name = &i.stringID
87 } else if i.intID != 0 {
88 e[n].Id = &i.intID
89 }
90 }
91 var namespace *string
92 if k.namespace != "" {
93 namespace = proto.String(k.namespace)
94 }
95 return &pb.Reference{
96 App: proto.String(appID),
97 NameSpace: namespace,
98 Path: &pb.Path{
99 Element: e,
100 },
101 }
102}
103
104// multiKeyToProto is a batch version of keyToProto.
105func multiKeyToProto(appID string, key []*Key) []*pb.Reference {
106 ret := make([]*pb.Reference, len(key))
107 for i, k := range key {
108 ret[i] = keyToProto(appID, k)
109 }
110 return ret
111}
112
113// multiValid is a batch version of Key.valid. It returns an error, not a
114// []bool.
115func multiValid(key []*Key) error {
116 invalid := false
117 for _, k := range key {
118 if !k.valid() {
119 invalid = true
120 break
121 }
122 }
123 if !invalid {
124 return nil
125 }
126 err := make(appengine.MultiError, len(key))
127 for i, k := range key {
128 if !k.valid() {
129 err[i] = ErrInvalidKey
130 }
131 }
132 return err
133}
134
135// It's unfortunate that the two semantically equivalent concepts pb.Reference
136// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the
137// two have different protobuf field numbers.
138
139// referenceValueToKey is the same as protoToKey except the input is a
140// PropertyValue_ReferenceValue instead of a Reference.
141func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) {
142 appID := r.GetApp()
143 namespace := r.GetNameSpace()
144 for _, e := range r.Pathelement {
145 k = &Key{
146 kind: e.GetType(),
147 stringID: e.GetName(),
148 intID: e.GetId(),
149 parent: k,
150 appID: appID,
151 namespace: namespace,
152 }
153 if !k.valid() {
154 return nil, ErrInvalidKey
155 }
156 }
157 return
158}
159
160// keyToReferenceValue is the same as keyToProto except the output is a
161// PropertyValue_ReferenceValue instead of a Reference.
162func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue {
163 ref := keyToProto(defaultAppID, k)
164 pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element))
165 for i, e := range ref.Path.Element {
166 pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{
167 Type: e.Type,
168 Id: e.Id,
169 Name: e.Name,
170 }
171 }
172 return &pb.PropertyValue_ReferenceValue{
173 App: ref.App,
174 NameSpace: ref.NameSpace,
175 Pathelement: pe,
176 }
177}
178
179type multiArgType int
180
181const (
182 multiArgTypeInvalid multiArgType = iota
183 multiArgTypePropertyLoadSaver
184 multiArgTypeStruct
185 multiArgTypeStructPtr
186 multiArgTypeInterface
187)
188
189// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
190// type S, for some interface type I, or some non-interface non-pointer type P
191// such that P or *P implements PropertyLoadSaver.
192//
193// It returns what category the slice's elements are, and the reflect.Type
194// that represents S, I or P.
195//
196// As a special case, PropertyList is an invalid type for v.
197func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
198 if v.Kind() != reflect.Slice {
199 return multiArgTypeInvalid, nil
200 }
201 if v.Type() == typeOfPropertyList {
202 return multiArgTypeInvalid, nil
203 }
204 elemType = v.Type().Elem()
205 if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
206 return multiArgTypePropertyLoadSaver, elemType
207 }
208 switch elemType.Kind() {
209 case reflect.Struct:
210 return multiArgTypeStruct, elemType
211 case reflect.Interface:
212 return multiArgTypeInterface, elemType
213 case reflect.Ptr:
214 elemType = elemType.Elem()
215 if elemType.Kind() == reflect.Struct {
216 return multiArgTypeStructPtr, elemType
217 }
218 }
219 return multiArgTypeInvalid, nil
220}
221
222// Get loads the entity stored for k into dst, which must be a struct pointer
223// or implement PropertyLoadSaver. If there is no such entity for the key, Get
224// returns ErrNoSuchEntity.
225//
226// The values of dst's unmatched struct fields are not modified, and matching
227// slice-typed fields are not reset before appending to them. In particular, it
228// is recommended to pass a pointer to a zero valued struct on each Get call.
229//
230// ErrFieldMismatch is returned when a field is to be loaded into a different
231// type than the one it was stored from, or when a field is missing or
232// unexported in the destination struct. ErrFieldMismatch is only returned if
233// dst is a struct pointer.
234func Get(c context.Context, key *Key, dst interface{}) error {
235 if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here
236 return ErrInvalidEntityType
237 }
238 err := GetMulti(c, []*Key{key}, []interface{}{dst})
239 if me, ok := err.(appengine.MultiError); ok {
240 return me[0]
241 }
242 return err
243}
244
245// GetMulti is a batch version of Get.
246//
247// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
248// type I, or some non-interface non-pointer type P such that P or *P
249// implements PropertyLoadSaver. If an []I, each element must be a valid dst
250// for Get: it must be a struct pointer or implement PropertyLoadSaver.
251//
252// As a special case, PropertyList is an invalid type for dst, even though a
253// PropertyList is a slice of structs. It is treated as invalid to avoid being
254// mistakenly passed when []PropertyList was intended.
255func GetMulti(c context.Context, key []*Key, dst interface{}) error {
256 v := reflect.ValueOf(dst)
257 multiArgType, _ := checkMultiArg(v)
258 if multiArgType == multiArgTypeInvalid {
259 return errors.New("datastore: dst has invalid type")
260 }
261 if len(key) != v.Len() {
262 return errors.New("datastore: key and dst slices have different length")
263 }
264 if len(key) == 0 {
265 return nil
266 }
267 if err := multiValid(key); err != nil {
268 return err
269 }
270 req := &pb.GetRequest{
271 Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
272 }
273 res := &pb.GetResponse{}
274 if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil {
275 return err
276 }
277 if len(key) != len(res.Entity) {
278 return errors.New("datastore: internal error: server returned the wrong number of entities")
279 }
280 multiErr, any := make(appengine.MultiError, len(key)), false
281 for i, e := range res.Entity {
282 if e.Entity == nil {
283 multiErr[i] = ErrNoSuchEntity
284 } else {
285 elem := v.Index(i)
286 if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
287 elem = elem.Addr()
288 }
289 if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
290 elem.Set(reflect.New(elem.Type().Elem()))
291 }
292 multiErr[i] = loadEntity(elem.Interface(), e.Entity)
293 }
294 if multiErr[i] != nil {
295 any = true
296 }
297 }
298 if any {
299 return multiErr
300 }
301 return nil
302}
303
304// Put saves the entity src into the datastore with key k. src must be a struct
305// pointer or implement PropertyLoadSaver; if a struct pointer then any
306// unexported fields of that struct will be skipped. If k is an incomplete key,
307// the returned key will be a unique key generated by the datastore.
308func Put(c context.Context, key *Key, src interface{}) (*Key, error) {
309 k, err := PutMulti(c, []*Key{key}, []interface{}{src})
310 if err != nil {
311 if me, ok := err.(appengine.MultiError); ok {
312 return nil, me[0]
313 }
314 return nil, err
315 }
316 return k[0], nil
317}
318
319// PutMulti is a batch version of Put.
320//
321// src must satisfy the same conditions as the dst argument to GetMulti.
322func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) {
323 v := reflect.ValueOf(src)
324 multiArgType, _ := checkMultiArg(v)
325 if multiArgType == multiArgTypeInvalid {
326 return nil, errors.New("datastore: src has invalid type")
327 }
328 if len(key) != v.Len() {
329 return nil, errors.New("datastore: key and src slices have different length")
330 }
331 if len(key) == 0 {
332 return nil, nil
333 }
334 appID := internal.FullyQualifiedAppID(c)
335 if err := multiValid(key); err != nil {
336 return nil, err
337 }
338 req := &pb.PutRequest{}
339 for i := range key {
340 elem := v.Index(i)
341 if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
342 elem = elem.Addr()
343 }
344 sProto, err := saveEntity(appID, key[i], elem.Interface())
345 if err != nil {
346 return nil, err
347 }
348 req.Entity = append(req.Entity, sProto)
349 }
350 res := &pb.PutResponse{}
351 if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil {
352 return nil, err
353 }
354 if len(key) != len(res.Key) {
355 return nil, errors.New("datastore: internal error: server returned the wrong number of keys")
356 }
357 ret := make([]*Key, len(key))
358 for i := range ret {
359 var err error
360 ret[i], err = protoToKey(res.Key[i])
361 if err != nil || ret[i].Incomplete() {
362 return nil, errors.New("datastore: internal error: server returned an invalid key")
363 }
364 }
365 return ret, nil
366}
367
368// Delete deletes the entity for the given key.
369func Delete(c context.Context, key *Key) error {
370 err := DeleteMulti(c, []*Key{key})
371 if me, ok := err.(appengine.MultiError); ok {
372 return me[0]
373 }
374 return err
375}
376
377// DeleteMulti is a batch version of Delete.
378func DeleteMulti(c context.Context, key []*Key) error {
379 if len(key) == 0 {
380 return nil
381 }
382 if err := multiValid(key); err != nil {
383 return err
384 }
385 req := &pb.DeleteRequest{
386 Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
387 }
388 res := &pb.DeleteResponse{}
389 return internal.Call(c, "datastore_v3", "Delete", req, res)
390}
391
392func namespaceMod(m proto.Message, namespace string) {
393 // pb.Query is the only type that has a name_space field.
394 // All other namespace support in datastore is in the keys.
395 switch m := m.(type) {
396 case *pb.Query:
397 if m.NameSpace == nil {
398 m.NameSpace = &namespace
399 }
400 }
401}
402
403func init() {
404 internal.NamespaceMods["datastore_v3"] = namespaceMod
405 internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name)
406 internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT))
407}
diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go
new file mode 100644
index 0000000..85616cf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/doc.go
@@ -0,0 +1,361 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5/*
6Package datastore provides a client for App Engine's datastore service.
7
8
9Basic Operations
10
11Entities are the unit of storage and are associated with a key. A key
12consists of an optional parent key, a string application ID, a string kind
13(also known as an entity type), and either a StringID or an IntID. A
14StringID is also known as an entity name or key name.
15
16It is valid to create a key with a zero StringID and a zero IntID; this is
17called an incomplete key, and does not refer to any saved entity. Putting an
18entity into the datastore under an incomplete key will cause a unique key
19to be generated for that entity, with a non-zero IntID.
20
21An entity's contents are a mapping from case-sensitive field names to values.
22Valid value types are:
23 - signed integers (int, int8, int16, int32 and int64),
24 - bool,
25 - string,
26 - float32 and float64,
27 - []byte (up to 1 megabyte in length),
28 - any type whose underlying type is one of the above predeclared types,
29 - ByteString,
30 - *Key,
31 - time.Time (stored with microsecond precision),
32 - appengine.BlobKey,
33 - appengine.GeoPoint,
34 - structs whose fields are all valid value types,
35 - slices of any of the above.
36
37Slices of structs are valid, as are structs that contain slices. However, if
38one struct contains another, then at most one of those can be repeated. This
39disqualifies recursively defined struct types: any struct T that (directly or
40indirectly) contains a []T.
41
42The Get and Put functions load and save an entity's contents. An entity's
43contents are typically represented by a struct pointer.
44
45Example code:
46
47 type Entity struct {
48 Value string
49 }
50
51 func handle(w http.ResponseWriter, r *http.Request) {
52 ctx := appengine.NewContext(r)
53
54 k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil)
55 e := new(Entity)
56 if err := datastore.Get(ctx, k, e); err != nil {
57 http.Error(w, err.Error(), 500)
58 return
59 }
60
61 old := e.Value
62 e.Value = r.URL.Path
63
64 if _, err := datastore.Put(ctx, k, e); err != nil {
65 http.Error(w, err.Error(), 500)
66 return
67 }
68
69 w.Header().Set("Content-Type", "text/plain; charset=utf-8")
70 fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value)
71 }
72
73GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
74Delete functions. They take a []*Key instead of a *Key, and may return an
75appengine.MultiError when encountering partial failure.
76
77
78Properties
79
80An entity's contents can be represented by a variety of types. These are
81typically struct pointers, but can also be any type that implements the
82PropertyLoadSaver interface. If using a struct pointer, you do not have to
83explicitly implement the PropertyLoadSaver interface; the datastore will
84automatically convert via reflection. If a struct pointer does implement that
85interface then those methods will be used in preference to the default
86behavior for struct pointers. Struct pointers are more strongly typed and are
87easier to use; PropertyLoadSavers are more flexible.
88
89The actual types passed do not have to match between Get and Put calls or even
90across different calls to datastore. It is valid to put a *PropertyList and
91get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
92Conceptually, any entity is saved as a sequence of properties, and is loaded
93into the destination value on a property-by-property basis. When loading into
94a struct pointer, an entity that cannot be completely represented (such as a
95missing field) will result in an ErrFieldMismatch error but it is up to the
96caller whether this error is fatal, recoverable or ignorable.
97
98By default, for struct pointers, all properties are potentially indexed, and
99the property name is the same as the field name (and hence must start with an
100upper case letter).
101
102Fields may have a `datastore:"name,options"` tag. The tag name is the
103property name, which must be one or more valid Go identifiers joined by ".",
104but may start with a lower case letter. An empty tag name means to just use the
105field name. A "-" tag name means that the datastore will ignore that field.
106
107The only valid options are "omitempty" and "noindex".
108
109If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save.
110The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero.
111Struct field values will never be empty.
112
113If options include "noindex" then the field will not be indexed. All fields are indexed
114by default. Strings or byte slices longer than 1500 bytes cannot be indexed;
115fields used to store long strings and byte slices must be tagged with "noindex"
116or they will cause Put operations to fail.
117
118To use multiple options together, separate them by a comma.
119The order does not matter.
120
121If the options is "" then the comma may be omitted.
122
123Example code:
124
125 // A and B are renamed to a and b.
126 // A, C and J are not indexed.
127 // D's tag is equivalent to having no tag at all (E).
128 // I is ignored entirely by the datastore.
129 // J has tag information for both the datastore and json packages.
130 type TaggedStruct struct {
131 A int `datastore:"a,noindex"`
132 B int `datastore:"b"`
133 C int `datastore:",noindex"`
134 D int `datastore:""`
135 E int
136 I int `datastore:"-"`
137 J int `datastore:",noindex" json:"j"`
138 }
139
140
141Structured Properties
142
143If the struct pointed to contains other structs, then the nested or embedded
144structs are flattened. For example, given these definitions:
145
146 type Inner1 struct {
147 W int32
148 X string
149 }
150
151 type Inner2 struct {
152 Y float64
153 }
154
155 type Inner3 struct {
156 Z bool
157 }
158
159 type Outer struct {
160 A int16
161 I []Inner1
162 J Inner2
163 Inner3
164 }
165
166then an Outer's properties would be equivalent to those of:
167
168 type OuterEquivalent struct {
169 A int16
170 IDotW []int32 `datastore:"I.W"`
171 IDotX []string `datastore:"I.X"`
172 JDotY float64 `datastore:"J.Y"`
173 Z bool
174 }
175
176If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the
177equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`.
178
179If an outer struct is tagged "noindex" then all of its implicit flattened
180fields are effectively "noindex".
181
182
183The PropertyLoadSaver Interface
184
185An entity's contents can also be represented by any type that implements the
186PropertyLoadSaver interface. This type may be a struct pointer, but it does
187not have to be. The datastore package will call Load when getting the entity's
188contents, and Save when putting the entity's contents.
189Possible uses include deriving non-stored fields, verifying fields, or indexing
190a field only if its value is positive.
191
192Example code:
193
194 type CustomPropsExample struct {
195 I, J int
196 // Sum is not stored, but should always be equal to I + J.
197 Sum int `datastore:"-"`
198 }
199
200 func (x *CustomPropsExample) Load(ps []datastore.Property) error {
201 // Load I and J as usual.
202 if err := datastore.LoadStruct(x, ps); err != nil {
203 return err
204 }
205 // Derive the Sum field.
206 x.Sum = x.I + x.J
207 return nil
208 }
209
210 func (x *CustomPropsExample) Save() ([]datastore.Property, error) {
211 // Validate the Sum field.
212 if x.Sum != x.I + x.J {
213 return nil, errors.New("CustomPropsExample has inconsistent sum")
214 }
215 // Save I and J as usual. The code below is equivalent to calling
216 // "return datastore.SaveStruct(x)", but is done manually for
217 // demonstration purposes.
218 return []datastore.Property{
219 {
220 Name: "I",
221 Value: int64(x.I),
222 },
223 {
224 Name: "J",
225 Value: int64(x.J),
226 },
227 }, nil
228 }
229
230The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
231arbitrary entity's contents.
232
233
234Queries
235
236Queries retrieve entities based on their properties or key's ancestry. Running
237a query yields an iterator of results: either keys or (key, entity) pairs.
238Queries are re-usable and it is safe to call Query.Run from concurrent
239goroutines. Iterators are not safe for concurrent use.
240
241Queries are immutable, and are either created by calling NewQuery, or derived
242from an existing query by calling a method like Filter or Order that returns a
243new query value. A query is typically constructed by calling NewQuery followed
244by a chain of zero or more such methods. These methods are:
245 - Ancestor and Filter constrain the entities returned by running a query.
246 - Order affects the order in which they are returned.
247 - Project constrains the fields returned.
248 - Distinct de-duplicates projected entities.
249 - KeysOnly makes the iterator return only keys, not (key, entity) pairs.
250 - Start, End, Offset and Limit define which sub-sequence of matching entities
251 to return. Start and End take cursors, Offset and Limit take integers. Start
252 and Offset affect the first result, End and Limit affect the last result.
253 If both Start and Offset are set, then the offset is relative to Start.
254 If both End and Limit are set, then the earliest constraint wins. Limit is
255 relative to Start+Offset, not relative to End. As a special case, a
256 negative limit means unlimited.
257
258Example code:
259
260 type Widget struct {
261 Description string
262 Price int
263 }
264
265 func handle(w http.ResponseWriter, r *http.Request) {
266 ctx := appengine.NewContext(r)
267 q := datastore.NewQuery("Widget").
268 Filter("Price <", 1000).
269 Order("-Price")
270 b := new(bytes.Buffer)
271 for t := q.Run(ctx); ; {
272 var x Widget
273 key, err := t.Next(&x)
274 if err == datastore.Done {
275 break
276 }
277 if err != nil {
278 serveError(ctx, w, err)
279 return
280 }
281 fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x)
282 }
283 w.Header().Set("Content-Type", "text/plain; charset=utf-8")
284 io.Copy(w, b)
285 }
286
287
288Transactions
289
290RunInTransaction runs a function in a transaction.
291
292Example code:
293
294 type Counter struct {
295 Count int
296 }
297
298 func inc(ctx context.Context, key *datastore.Key) (int, error) {
299 var x Counter
300 if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity {
301 return 0, err
302 }
303 x.Count++
304 if _, err := datastore.Put(ctx, key, &x); err != nil {
305 return 0, err
306 }
307 return x.Count, nil
308 }
309
310 func handle(w http.ResponseWriter, r *http.Request) {
311 ctx := appengine.NewContext(r)
312 var count int
313 err := datastore.RunInTransaction(ctx, func(ctx context.Context) error {
314 var err1 error
315 count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil))
316 return err1
317 }, nil)
318 if err != nil {
319 serveError(ctx, w, err)
320 return
321 }
322 w.Header().Set("Content-Type", "text/plain; charset=utf-8")
323 fmt.Fprintf(w, "Count=%d", count)
324 }
325
326
327Metadata
328
329The datastore package provides access to some of App Engine's datastore
330metadata. This metadata includes information about the entity groups,
331namespaces, entity kinds, and properties in the datastore, as well as the
332property representations for each property.
333
334Example code:
335
336 func handle(w http.ResponseWriter, r *http.Request) {
337 // Print all the kinds in the datastore, with all the indexed
338 // properties (and their representations) for each.
339 ctx := appengine.NewContext(r)
340
341 kinds, err := datastore.Kinds(ctx)
342 if err != nil {
343 serveError(ctx, w, err)
344 return
345 }
346
347 w.Header().Set("Content-Type", "text/plain; charset=utf-8")
348 for _, kind := range kinds {
349 fmt.Fprintf(w, "%s:\n", kind)
350 props, err := datastore.KindProperties(ctx, kind)
351 if err != nil {
352 fmt.Fprintln(w, "\t(unable to retrieve properties)")
353 continue
354 }
355 for p, rep := range props {
356 fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(rep, ", "))
357 }
358 }
359 }
360*/
361package datastore // import "google.golang.org/appengine/datastore"
diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go
new file mode 100644
index 0000000..6ab83ea
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/key.go
@@ -0,0 +1,396 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package datastore
6
7import (
8 "bytes"
9 "encoding/base64"
10 "encoding/gob"
11 "errors"
12 "fmt"
13 "strconv"
14 "strings"
15
16 "github.com/golang/protobuf/proto"
17 "golang.org/x/net/context"
18
19 "google.golang.org/appengine/internal"
20 pb "google.golang.org/appengine/internal/datastore"
21)
22
23type KeyRangeCollisionError struct {
24 start int64
25 end int64
26}
27
28func (e *KeyRangeCollisionError) Error() string {
29 return fmt.Sprintf("datastore: Collision when attempting to allocate range [%d, %d]",
30 e.start, e.end)
31}
32
33type KeyRangeContentionError struct {
34 start int64
35 end int64
36}
37
38func (e *KeyRangeContentionError) Error() string {
39 return fmt.Sprintf("datastore: Contention when attempting to allocate range [%d, %d]",
40 e.start, e.end)
41}
42
43// Key represents the datastore key for a stored entity, and is immutable.
44type Key struct {
45 kind string
46 stringID string
47 intID int64
48 parent *Key
49 appID string
50 namespace string
51}
52
53// Kind returns the key's kind (also known as entity type).
54func (k *Key) Kind() string {
55 return k.kind
56}
57
58// StringID returns the key's string ID (also known as an entity name or key
59// name), which may be "".
60func (k *Key) StringID() string {
61 return k.stringID
62}
63
64// IntID returns the key's integer ID, which may be 0.
65func (k *Key) IntID() int64 {
66 return k.intID
67}
68
69// Parent returns the key's parent key, which may be nil.
70func (k *Key) Parent() *Key {
71 return k.parent
72}
73
74// AppID returns the key's application ID.
75func (k *Key) AppID() string {
76 return k.appID
77}
78
79// Namespace returns the key's namespace.
80func (k *Key) Namespace() string {
81 return k.namespace
82}
83
84// Incomplete returns whether the key does not refer to a stored entity.
85// In particular, whether the key has a zero StringID and a zero IntID.
86func (k *Key) Incomplete() bool {
87 return k.stringID == "" && k.intID == 0
88}
89
90// valid returns whether the key is valid.
91func (k *Key) valid() bool {
92 if k == nil {
93 return false
94 }
95 for ; k != nil; k = k.parent {
96 if k.kind == "" || k.appID == "" {
97 return false
98 }
99 if k.stringID != "" && k.intID != 0 {
100 return false
101 }
102 if k.parent != nil {
103 if k.parent.Incomplete() {
104 return false
105 }
106 if k.parent.appID != k.appID || k.parent.namespace != k.namespace {
107 return false
108 }
109 }
110 }
111 return true
112}
113
114// Equal returns whether two keys are equal.
115func (k *Key) Equal(o *Key) bool {
116 for k != nil && o != nil {
117 if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace {
118 return false
119 }
120 k, o = k.parent, o.parent
121 }
122 return k == o
123}
124
125// root returns the furthest ancestor of a key, which may be itself.
126func (k *Key) root() *Key {
127 for k.parent != nil {
128 k = k.parent
129 }
130 return k
131}
132
133// marshal marshals the key's string representation to the buffer.
134func (k *Key) marshal(b *bytes.Buffer) {
135 if k.parent != nil {
136 k.parent.marshal(b)
137 }
138 b.WriteByte('/')
139 b.WriteString(k.kind)
140 b.WriteByte(',')
141 if k.stringID != "" {
142 b.WriteString(k.stringID)
143 } else {
144 b.WriteString(strconv.FormatInt(k.intID, 10))
145 }
146}
147
148// String returns a string representation of the key.
149func (k *Key) String() string {
150 if k == nil {
151 return ""
152 }
153 b := bytes.NewBuffer(make([]byte, 0, 512))
154 k.marshal(b)
155 return b.String()
156}
157
158type gobKey struct {
159 Kind string
160 StringID string
161 IntID int64
162 Parent *gobKey
163 AppID string
164 Namespace string
165}
166
167func keyToGobKey(k *Key) *gobKey {
168 if k == nil {
169 return nil
170 }
171 return &gobKey{
172 Kind: k.kind,
173 StringID: k.stringID,
174 IntID: k.intID,
175 Parent: keyToGobKey(k.parent),
176 AppID: k.appID,
177 Namespace: k.namespace,
178 }
179}
180
181func gobKeyToKey(gk *gobKey) *Key {
182 if gk == nil {
183 return nil
184 }
185 return &Key{
186 kind: gk.Kind,
187 stringID: gk.StringID,
188 intID: gk.IntID,
189 parent: gobKeyToKey(gk.Parent),
190 appID: gk.AppID,
191 namespace: gk.Namespace,
192 }
193}
194
195func (k *Key) GobEncode() ([]byte, error) {
196 buf := new(bytes.Buffer)
197 if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
198 return nil, err
199 }
200 return buf.Bytes(), nil
201}
202
203func (k *Key) GobDecode(buf []byte) error {
204 gk := new(gobKey)
205 if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
206 return err
207 }
208 *k = *gobKeyToKey(gk)
209 return nil
210}
211
212func (k *Key) MarshalJSON() ([]byte, error) {
213 return []byte(`"` + k.Encode() + `"`), nil
214}
215
216func (k *Key) UnmarshalJSON(buf []byte) error {
217 if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
218 return errors.New("datastore: bad JSON key")
219 }
220 k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
221 if err != nil {
222 return err
223 }
224 *k = *k2
225 return nil
226}
227
228// Encode returns an opaque representation of the key
229// suitable for use in HTML and URLs.
230// This is compatible with the Python and Java runtimes.
231func (k *Key) Encode() string {
232 ref := keyToProto("", k)
233
234 b, err := proto.Marshal(ref)
235 if err != nil {
236 panic(err)
237 }
238
239 // Trailing padding is stripped.
240 return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
241}
242
243// DecodeKey decodes a key from the opaque representation returned by Encode.
244func DecodeKey(encoded string) (*Key, error) {
245 // Re-add padding.
246 if m := len(encoded) % 4; m != 0 {
247 encoded += strings.Repeat("=", 4-m)
248 }
249
250 b, err := base64.URLEncoding.DecodeString(encoded)
251 if err != nil {
252 return nil, err
253 }
254
255 ref := new(pb.Reference)
256 if err := proto.Unmarshal(b, ref); err != nil {
257 return nil, err
258 }
259
260 return protoToKey(ref)
261}
262
263// NewIncompleteKey creates a new incomplete key.
264// kind cannot be empty.
265func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key {
266 return NewKey(c, kind, "", 0, parent)
267}
268
269// NewKey creates a new key.
270// kind cannot be empty.
271// Either one or both of stringID and intID must be zero. If both are zero,
272// the key returned is incomplete.
273// parent must either be a complete key or nil.
274func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key {
275 // If there's a parent key, use its namespace.
276 // Otherwise, use any namespace attached to the context.
277 var namespace string
278 if parent != nil {
279 namespace = parent.namespace
280 } else {
281 namespace = internal.NamespaceFromContext(c)
282 }
283
284 return &Key{
285 kind: kind,
286 stringID: stringID,
287 intID: intID,
288 parent: parent,
289 appID: internal.FullyQualifiedAppID(c),
290 namespace: namespace,
291 }
292}
293
294// AllocateIDs returns a range of n integer IDs with the given kind and parent
295// combination. kind cannot be empty; parent may be nil. The IDs in the range
296// returned will not be used by the datastore's automatic ID sequence generator
297// and may be used with NewKey without conflict.
298//
299// The range is inclusive at the low end and exclusive at the high end. In
300// other words, valid intIDs x satisfy low <= x && x < high.
301//
302// If no error is returned, low + n == high.
303func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) {
304 if kind == "" {
305 return 0, 0, errors.New("datastore: AllocateIDs given an empty kind")
306 }
307 if n < 0 {
308 return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n)
309 }
310 if n == 0 {
311 return 0, 0, nil
312 }
313 req := &pb.AllocateIdsRequest{
314 ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
315 Size: proto.Int64(int64(n)),
316 }
317 res := &pb.AllocateIdsResponse{}
318 if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil {
319 return 0, 0, err
320 }
321 // The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops)
322 // is inclusive at the low end and exclusive at the high end, so we add 1.
323 low = res.GetStart()
324 high = res.GetEnd() + 1
325 if low+int64(n) != high {
326 return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n)
327 }
328 return low, high, nil
329}
330
331// AllocateIDRange allocates a range of IDs with specific endpoints.
332// The range is inclusive at both the low and high end. Once these IDs have been
333// allocated, you can manually assign them to newly created entities.
334//
335// The Datastore's automatic ID allocator never assigns a key that has already
336// been allocated (either through automatic ID allocation or through an explicit
337// AllocateIDs call). As a result, entities written to the given key range will
338// never be overwritten. However, writing entities with manually assigned keys in
339// this range may overwrite existing entities (or new entities written by a separate
340// request), depending on the error returned.
341//
342// Use this only if you have an existing numeric ID range that you want to reserve
343// (for example, bulk loading entities that already have IDs). If you don't care
344// about which IDs you receive, use AllocateIDs instead.
345//
346// AllocateIDRange returns nil if the range is successfully allocated. If one or more
347// entities with an ID in the given range already exist, it returns a KeyRangeCollisionError.
348// If the Datastore has already cached IDs in this range (e.g. from a previous call to
349// AllocateIDRange), it returns a KeyRangeContentionError. Errors of other types indicate
350// problems with arguments or an error returned directly from the Datastore.
351func AllocateIDRange(c context.Context, kind string, parent *Key, start, end int64) (err error) {
352 if kind == "" {
353 return errors.New("datastore: AllocateIDRange given an empty kind")
354 }
355
356 if start < 1 || end < 1 {
357 return errors.New("datastore: AllocateIDRange start and end must both be greater than 0")
358 }
359
360 if start > end {
361 return errors.New("datastore: AllocateIDRange start must be before end")
362 }
363
364 req := &pb.AllocateIdsRequest{
365 ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
366 Max: proto.Int64(end),
367 }
368 res := &pb.AllocateIdsResponse{}
369 if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil {
370 return err
371 }
372
373 // Check for collisions, i.e. existing entities with IDs in this range.
374 // We could do this before the allocation, but we'd still have to do it
375 // afterward as well to catch the race condition where an entity is inserted
376 // after that initial check but before the allocation. Skip the up-front check
377 // and just do it once.
378 q := NewQuery(kind).Filter("__key__ >=", NewKey(c, kind, "", start, parent)).
379 Filter("__key__ <=", NewKey(c, kind, "", end, parent)).KeysOnly().Limit(1)
380
381 keys, err := q.GetAll(c, nil)
382 if err != nil {
383 return err
384 }
385 if len(keys) != 0 {
386 return &KeyRangeCollisionError{start: start, end: end}
387 }
388
389 // Check for a race condition, i.e. cases where the datastore may have
390 // cached ID batches that contain IDs in this range.
391 if start < res.GetStart() {
392 return &KeyRangeContentionError{start: start, end: end}
393 }
394
395 return nil
396}
diff --git a/vendor/google.golang.org/appengine/datastore/load.go b/vendor/google.golang.org/appengine/datastore/load.go
new file mode 100644
index 0000000..38a6365
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/load.go
@@ -0,0 +1,429 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package datastore
6
7import (
8 "fmt"
9 "reflect"
10 "strings"
11 "time"
12
13 "github.com/golang/protobuf/proto"
14 "google.golang.org/appengine"
15 pb "google.golang.org/appengine/internal/datastore"
16)
17
18var (
19 typeOfBlobKey = reflect.TypeOf(appengine.BlobKey(""))
20 typeOfByteSlice = reflect.TypeOf([]byte(nil))
21 typeOfByteString = reflect.TypeOf(ByteString(nil))
22 typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{})
23 typeOfTime = reflect.TypeOf(time.Time{})
24 typeOfKeyPtr = reflect.TypeOf(&Key{})
25 typeOfEntityPtr = reflect.TypeOf(&Entity{})
26)
27
28// typeMismatchReason returns a string explaining why the property p could not
29// be stored in an entity field of type v.Type().
30func typeMismatchReason(pValue interface{}, v reflect.Value) string {
31 entityType := "empty"
32 switch pValue.(type) {
33 case int64:
34 entityType = "int"
35 case bool:
36 entityType = "bool"
37 case string:
38 entityType = "string"
39 case float64:
40 entityType = "float"
41 case *Key:
42 entityType = "*datastore.Key"
43 case time.Time:
44 entityType = "time.Time"
45 case appengine.BlobKey:
46 entityType = "appengine.BlobKey"
47 case appengine.GeoPoint:
48 entityType = "appengine.GeoPoint"
49 case ByteString:
50 entityType = "datastore.ByteString"
51 case []byte:
52 entityType = "[]byte"
53 }
54 return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
55}
56
57type propertyLoader struct {
58 // m holds the number of times a substruct field like "Foo.Bar.Baz" has
59 // been seen so far. The map is constructed lazily.
60 m map[string]int
61}
62
63func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string {
64 var v reflect.Value
65 var sliceIndex int
66
67 name := p.Name
68
69 // If name ends with a '.', the last field is anonymous.
70 // In this case, strings.Split will give us "" as the
71 // last element of our fields slice, which will match the ""
72 // field name in the substruct codec.
73 fields := strings.Split(name, ".")
74
75 for len(fields) > 0 {
76 var decoder fieldCodec
77 var ok bool
78
79 // Cut off the last field (delimited by ".") and find its parent
80 // in the codec.
81 // eg. for name "A.B.C.D", split off "A.B.C" and try to
82 // find a field in the codec with this name.
83 // Loop again with "A.B", etc.
84 for i := len(fields); i > 0; i-- {
85 parent := strings.Join(fields[:i], ".")
86 decoder, ok = codec.fields[parent]
87 if ok {
88 fields = fields[i:]
89 break
90 }
91 }
92
93 // If we never found a matching field in the codec, return
94 // error message.
95 if !ok {
96 return "no such struct field"
97 }
98
99 v = initField(structValue, decoder.path)
100 if !v.IsValid() {
101 return "no such struct field"
102 }
103 if !v.CanSet() {
104 return "cannot set struct field"
105 }
106
107 if decoder.structCodec != nil {
108 codec = decoder.structCodec
109 structValue = v
110 }
111
112 if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice {
113 if l.m == nil {
114 l.m = make(map[string]int)
115 }
116 sliceIndex = l.m[p.Name]
117 l.m[p.Name] = sliceIndex + 1
118 for v.Len() <= sliceIndex {
119 v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
120 }
121 structValue = v.Index(sliceIndex)
122 requireSlice = false
123 }
124 }
125
126 var slice reflect.Value
127 if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
128 slice = v
129 v = reflect.New(v.Type().Elem()).Elem()
130 } else if requireSlice {
131 return "multiple-valued property requires a slice field type"
132 }
133
134 // Convert indexValues to a Go value with a meaning derived from the
135 // destination type.
136 pValue := p.Value
137 if iv, ok := pValue.(indexValue); ok {
138 meaning := pb.Property_NO_MEANING
139 switch v.Type() {
140 case typeOfBlobKey:
141 meaning = pb.Property_BLOBKEY
142 case typeOfByteSlice:
143 meaning = pb.Property_BLOB
144 case typeOfByteString:
145 meaning = pb.Property_BYTESTRING
146 case typeOfGeoPoint:
147 meaning = pb.Property_GEORSS_POINT
148 case typeOfTime:
149 meaning = pb.Property_GD_WHEN
150 case typeOfEntityPtr:
151 meaning = pb.Property_ENTITY_PROTO
152 }
153 var err error
154 pValue, err = propValue(iv.value, meaning)
155 if err != nil {
156 return err.Error()
157 }
158 }
159
160 if errReason := setVal(v, pValue); errReason != "" {
161 // Set the slice back to its zero value.
162 if slice.IsValid() {
163 slice.Set(reflect.Zero(slice.Type()))
164 }
165 return errReason
166 }
167
168 if slice.IsValid() {
169 slice.Index(sliceIndex).Set(v)
170 }
171
172 return ""
173}
174
175// setVal sets v to the value pValue.
176func setVal(v reflect.Value, pValue interface{}) string {
177 switch v.Kind() {
178 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
179 x, ok := pValue.(int64)
180 if !ok && pValue != nil {
181 return typeMismatchReason(pValue, v)
182 }
183 if v.OverflowInt(x) {
184 return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
185 }
186 v.SetInt(x)
187 case reflect.Bool:
188 x, ok := pValue.(bool)
189 if !ok && pValue != nil {
190 return typeMismatchReason(pValue, v)
191 }
192 v.SetBool(x)
193 case reflect.String:
194 switch x := pValue.(type) {
195 case appengine.BlobKey:
196 v.SetString(string(x))
197 case ByteString:
198 v.SetString(string(x))
199 case string:
200 v.SetString(x)
201 default:
202 if pValue != nil {
203 return typeMismatchReason(pValue, v)
204 }
205 }
206 case reflect.Float32, reflect.Float64:
207 x, ok := pValue.(float64)
208 if !ok && pValue != nil {
209 return typeMismatchReason(pValue, v)
210 }
211 if v.OverflowFloat(x) {
212 return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
213 }
214 v.SetFloat(x)
215 case reflect.Ptr:
216 x, ok := pValue.(*Key)
217 if !ok && pValue != nil {
218 return typeMismatchReason(pValue, v)
219 }
220 if _, ok := v.Interface().(*Key); !ok {
221 return typeMismatchReason(pValue, v)
222 }
223 v.Set(reflect.ValueOf(x))
224 case reflect.Struct:
225 switch v.Type() {
226 case typeOfTime:
227 x, ok := pValue.(time.Time)
228 if !ok && pValue != nil {
229 return typeMismatchReason(pValue, v)
230 }
231 v.Set(reflect.ValueOf(x))
232 case typeOfGeoPoint:
233 x, ok := pValue.(appengine.GeoPoint)
234 if !ok && pValue != nil {
235 return typeMismatchReason(pValue, v)
236 }
237 v.Set(reflect.ValueOf(x))
238 default:
239 ent, ok := pValue.(*Entity)
240 if !ok {
241 return typeMismatchReason(pValue, v)
242 }
243
244 // Recursively load nested struct
245 pls, err := newStructPLS(v.Addr().Interface())
246 if err != nil {
247 return err.Error()
248 }
249
250 // if ent has a Key value and our struct has a Key field,
251 // load the Entity's Key value into the Key field on the struct.
252 if ent.Key != nil && pls.codec.keyField != -1 {
253
254 pls.v.Field(pls.codec.keyField).Set(reflect.ValueOf(ent.Key))
255 }
256
257 err = pls.Load(ent.Properties)
258 if err != nil {
259 return err.Error()
260 }
261 }
262 case reflect.Slice:
263 x, ok := pValue.([]byte)
264 if !ok {
265 if y, yok := pValue.(ByteString); yok {
266 x, ok = []byte(y), true
267 }
268 }
269 if !ok && pValue != nil {
270 return typeMismatchReason(pValue, v)
271 }
272 if v.Type().Elem().Kind() != reflect.Uint8 {
273 return typeMismatchReason(pValue, v)
274 }
275 v.SetBytes(x)
276 default:
277 return typeMismatchReason(pValue, v)
278 }
279 return ""
280}
281
282// initField is similar to reflect's Value.FieldByIndex, in that it
283// returns the nested struct field corresponding to index, but it
284// initialises any nil pointers encountered when traversing the structure.
285func initField(val reflect.Value, index []int) reflect.Value {
286 for _, i := range index[:len(index)-1] {
287 val = val.Field(i)
288 if val.Kind() == reflect.Ptr {
289 if val.IsNil() {
290 val.Set(reflect.New(val.Type().Elem()))
291 }
292 val = val.Elem()
293 }
294 }
295 return val.Field(index[len(index)-1])
296}
297
298// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.
299func loadEntity(dst interface{}, src *pb.EntityProto) (err error) {
300 ent, err := protoToEntity(src)
301 if err != nil {
302 return err
303 }
304 if e, ok := dst.(PropertyLoadSaver); ok {
305 return e.Load(ent.Properties)
306 }
307 return LoadStruct(dst, ent.Properties)
308}
309
310func (s structPLS) Load(props []Property) error {
311 var fieldName, reason string
312 var l propertyLoader
313 for _, p := range props {
314 if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" {
315 // We don't return early, as we try to load as many properties as possible.
316 // It is valid to load an entity into a struct that cannot fully represent it.
317 // That case returns an error, but the caller is free to ignore it.
318 fieldName, reason = p.Name, errStr
319 }
320 }
321 if reason != "" {
322 return &ErrFieldMismatch{
323 StructType: s.v.Type(),
324 FieldName: fieldName,
325 Reason: reason,
326 }
327 }
328 return nil
329}
330
331func protoToEntity(src *pb.EntityProto) (*Entity, error) {
332 props, rawProps := src.Property, src.RawProperty
333 outProps := make([]Property, 0, len(props)+len(rawProps))
334 for {
335 var (
336 x *pb.Property
337 noIndex bool
338 )
339 if len(props) > 0 {
340 x, props = props[0], props[1:]
341 } else if len(rawProps) > 0 {
342 x, rawProps = rawProps[0], rawProps[1:]
343 noIndex = true
344 } else {
345 break
346 }
347
348 var value interface{}
349 if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE {
350 value = indexValue{x.Value}
351 } else {
352 var err error
353 value, err = propValue(x.Value, x.GetMeaning())
354 if err != nil {
355 return nil, err
356 }
357 }
358 outProps = append(outProps, Property{
359 Name: x.GetName(),
360 Value: value,
361 NoIndex: noIndex,
362 Multiple: x.GetMultiple(),
363 })
364 }
365
366 var key *Key
367 if src.Key != nil {
368 // Ignore any error, since nested entity values
369 // are allowed to have an invalid key.
370 key, _ = protoToKey(src.Key)
371 }
372 return &Entity{key, outProps}, nil
373}
374
375// propValue returns a Go value that combines the raw PropertyValue with a
376// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.
377func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) {
378 switch {
379 case v.Int64Value != nil:
380 if m == pb.Property_GD_WHEN {
381 return fromUnixMicro(*v.Int64Value), nil
382 } else {
383 return *v.Int64Value, nil
384 }
385 case v.BooleanValue != nil:
386 return *v.BooleanValue, nil
387 case v.StringValue != nil:
388 if m == pb.Property_BLOB {
389 return []byte(*v.StringValue), nil
390 } else if m == pb.Property_BLOBKEY {
391 return appengine.BlobKey(*v.StringValue), nil
392 } else if m == pb.Property_BYTESTRING {
393 return ByteString(*v.StringValue), nil
394 } else if m == pb.Property_ENTITY_PROTO {
395 var ent pb.EntityProto
396 err := proto.Unmarshal([]byte(*v.StringValue), &ent)
397 if err != nil {
398 return nil, err
399 }
400 return protoToEntity(&ent)
401 } else {
402 return *v.StringValue, nil
403 }
404 case v.DoubleValue != nil:
405 return *v.DoubleValue, nil
406 case v.Referencevalue != nil:
407 key, err := referenceValueToKey(v.Referencevalue)
408 if err != nil {
409 return nil, err
410 }
411 return key, nil
412 case v.Pointvalue != nil:
413 // NOTE: Strangely, latitude maps to X, longitude to Y.
414 return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil
415 }
416 return nil, nil
417}
418
419// indexValue is a Property value that is created when entities are loaded from
420// an index, such as from a projection query.
421//
422// Such Property values do not contain all of the metadata required to be
423// faithfully represented as a Go value, and are instead represented as an
424// opaque indexValue. Load the properties into a concrete struct type (e.g. by
425// passing a struct pointer to Iterator.Next) to reconstruct actual Go values
426// of type int, string, time.Time, etc.
427type indexValue struct {
428 value *pb.PropertyValue
429}
diff --git a/vendor/google.golang.org/appengine/datastore/metadata.go b/vendor/google.golang.org/appengine/datastore/metadata.go
new file mode 100644
index 0000000..6acacc3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/metadata.go
@@ -0,0 +1,78 @@
1// Copyright 2016 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package datastore
6
7import "golang.org/x/net/context"
8
9// Datastore kinds for the metadata entities.
10const (
11 namespaceKind = "__namespace__"
12 kindKind = "__kind__"
13 propertyKind = "__property__"
14)
15
16// Namespaces returns all the datastore namespaces.
17func Namespaces(ctx context.Context) ([]string, error) {
18 // TODO(djd): Support range queries.
19 q := NewQuery(namespaceKind).KeysOnly()
20 keys, err := q.GetAll(ctx, nil)
21 if err != nil {
22 return nil, err
23 }
24 // The empty namespace key uses a numeric ID (==1), but luckily
25 // the string ID defaults to "" for numeric IDs anyway.
26 return keyNames(keys), nil
27}
28
29// Kinds returns the names of all the kinds in the current namespace.
30func Kinds(ctx context.Context) ([]string, error) {
31 // TODO(djd): Support range queries.
32 q := NewQuery(kindKind).KeysOnly()
33 keys, err := q.GetAll(ctx, nil)
34 if err != nil {
35 return nil, err
36 }
37 return keyNames(keys), nil
38}
39
40// keyNames returns a slice of the provided keys' names (string IDs).
41func keyNames(keys []*Key) []string {
42 n := make([]string, 0, len(keys))
43 for _, k := range keys {
44 n = append(n, k.StringID())
45 }
46 return n
47}
48
49// KindProperties returns all the indexed properties for the given kind.
50// The properties are returned as a map of property names to a slice of the
51// representation types. The representation types for the supported Go property
52// types are:
53// "INT64": signed integers and time.Time
54// "DOUBLE": float32 and float64
55// "BOOLEAN": bool
56// "STRING": string, []byte and ByteString
57// "POINT": appengine.GeoPoint
58// "REFERENCE": *Key
59// "USER": (not used in the Go runtime)
60func KindProperties(ctx context.Context, kind string) (map[string][]string, error) {
61 // TODO(djd): Support range queries.
62 kindKey := NewKey(ctx, kindKind, kind, 0, nil)
63 q := NewQuery(propertyKind).Ancestor(kindKey)
64
65 propMap := map[string][]string{}
66 props := []struct {
67 Repr []string `datastore:"property_representation"`
68 }{}
69
70 keys, err := q.GetAll(ctx, &props)
71 if err != nil {
72 return nil, err
73 }
74 for i, p := range props {
75 propMap[keys[i].StringID()] = p.Repr
76 }
77 return propMap, nil
78}
diff --git a/vendor/google.golang.org/appengine/datastore/prop.go b/vendor/google.golang.org/appengine/datastore/prop.go
new file mode 100644
index 0000000..5cb2079
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/prop.go
@@ -0,0 +1,330 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package datastore
6
7import (
8 "fmt"
9 "reflect"
10 "strings"
11 "sync"
12 "unicode"
13)
14
15// Entities with more than this many indexed properties will not be saved.
16const maxIndexedProperties = 20000
17
18// []byte fields more than 1 megabyte long will not be loaded or saved.
19const maxBlobLen = 1 << 20
20
21// Property is a name/value pair plus some metadata. A datastore entity's
22// contents are loaded and saved as a sequence of Properties. An entity can
23// have multiple Properties with the same name, provided that p.Multiple is
24// true on all of that entity's Properties with that name.
25type Property struct {
26 // Name is the property name.
27 Name string
28 // Value is the property value. The valid types are:
29 // - int64
30 // - bool
31 // - string
32 // - float64
33 // - ByteString
34 // - *Key
35 // - time.Time
36 // - appengine.BlobKey
37 // - appengine.GeoPoint
38 // - []byte (up to 1 megabyte in length)
39 // - *Entity (representing a nested struct)
40 // This set is smaller than the set of valid struct field types that the
41 // datastore can load and save. A Property Value cannot be a slice (apart
42 // from []byte); use multiple Properties instead. Also, a Value's type
43 // must be explicitly on the list above; it is not sufficient for the
44 // underlying type to be on that list. For example, a Value of "type
45 // myInt64 int64" is invalid. Smaller-width integers and floats are also
46 // invalid. Again, this is more restrictive than the set of valid struct
47 // field types.
48 //
49 // A Value will have an opaque type when loading entities from an index,
50 // such as via a projection query. Load entities into a struct instead
51 // of a PropertyLoadSaver when using a projection query.
52 //
53 // A Value may also be the nil interface value; this is equivalent to
54 // Python's None but not directly representable by a Go struct. Loading
55 // a nil-valued property into a struct will set that field to the zero
56 // value.
57 Value interface{}
58 // NoIndex is whether the datastore cannot index this property.
59 NoIndex bool
60 // Multiple is whether the entity can have multiple properties with
61 // the same name. Even if a particular instance only has one property with
62 // a certain name, Multiple should be true if a struct would best represent
63 // it as a field of type []T instead of type T.
64 Multiple bool
65}
66
67// An Entity is the value type for a nested struct.
68// This type is only used for a Property's Value.
69type Entity struct {
70 Key *Key
71 Properties []Property
72}
73
74// ByteString is a short byte slice (up to 1500 bytes) that can be indexed.
75type ByteString []byte
76
77// PropertyLoadSaver can be converted from and to a slice of Properties.
78type PropertyLoadSaver interface {
79 Load([]Property) error
80 Save() ([]Property, error)
81}
82
83// PropertyList converts a []Property to implement PropertyLoadSaver.
84type PropertyList []Property
85
86var (
87 typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
88 typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
89)
90
91// Load loads all of the provided properties into l.
92// It does not first reset *l to an empty slice.
93func (l *PropertyList) Load(p []Property) error {
94 *l = append(*l, p...)
95 return nil
96}
97
98// Save saves all of l's properties as a slice or Properties.
99func (l *PropertyList) Save() ([]Property, error) {
100 return *l, nil
101}
102
103// validPropertyName returns whether name consists of one or more valid Go
104// identifiers joined by ".".
105func validPropertyName(name string) bool {
106 if name == "" {
107 return false
108 }
109 for _, s := range strings.Split(name, ".") {
110 if s == "" {
111 return false
112 }
113 first := true
114 for _, c := range s {
115 if first {
116 first = false
117 if c != '_' && !unicode.IsLetter(c) {
118 return false
119 }
120 } else {
121 if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
122 return false
123 }
124 }
125 }
126 }
127 return true
128}
129
130// structCodec describes how to convert a struct to and from a sequence of
131// properties.
132type structCodec struct {
133 // fields gives the field codec for the structTag with the given name.
134 fields map[string]fieldCodec
135 // hasSlice is whether a struct or any of its nested or embedded structs
136 // has a slice-typed field (other than []byte).
137 hasSlice bool
138 // keyField is the index of a *Key field with structTag __key__.
139 // This field is not relevant for the top level struct, only for
140 // nested structs.
141 keyField int
142 // complete is whether the structCodec is complete. An incomplete
143 // structCodec may be encountered when walking a recursive struct.
144 complete bool
145}
146
147// fieldCodec is a struct field's index and, if that struct field's type is
148// itself a struct, that substruct's structCodec.
149type fieldCodec struct {
150 // path is the index path to the field
151 path []int
152 noIndex bool
153 // omitEmpty indicates that the field should be omitted on save
154 // if empty.
155 omitEmpty bool
156 // structCodec is the codec fot the struct field at index 'path',
157 // or nil if the field is not a struct.
158 structCodec *structCodec
159}
160
161// structCodecs collects the structCodecs that have already been calculated.
162var (
163 structCodecsMutex sync.Mutex
164 structCodecs = make(map[reflect.Type]*structCodec)
165)
166
167// getStructCodec returns the structCodec for the given struct type.
168func getStructCodec(t reflect.Type) (*structCodec, error) {
169 structCodecsMutex.Lock()
170 defer structCodecsMutex.Unlock()
171 return getStructCodecLocked(t)
172}
173
174// getStructCodecLocked implements getStructCodec. The structCodecsMutex must
175// be held when calling this function.
176func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {
177 c, ok := structCodecs[t]
178 if ok {
179 return c, nil
180 }
181 c = &structCodec{
182 fields: make(map[string]fieldCodec),
183 // We initialize keyField to -1 so that the zero-value is not
184 // misinterpreted as index 0.
185 keyField: -1,
186 }
187
188 // Add c to the structCodecs map before we are sure it is good. If t is
189 // a recursive type, it needs to find the incomplete entry for itself in
190 // the map.
191 structCodecs[t] = c
192 defer func() {
193 if retErr != nil {
194 delete(structCodecs, t)
195 }
196 }()
197
198 for i := 0; i < t.NumField(); i++ {
199 f := t.Field(i)
200 // Skip unexported fields.
201 // Note that if f is an anonymous, unexported struct field,
202 // we will promote its fields.
203 if f.PkgPath != "" && !f.Anonymous {
204 continue
205 }
206
207 tags := strings.Split(f.Tag.Get("datastore"), ",")
208 name := tags[0]
209 opts := make(map[string]bool)
210 for _, t := range tags[1:] {
211 opts[t] = true
212 }
213 switch {
214 case name == "":
215 if !f.Anonymous {
216 name = f.Name
217 }
218 case name == "-":
219 continue
220 case name == "__key__":
221 if f.Type != typeOfKeyPtr {
222 return nil, fmt.Errorf("datastore: __key__ field on struct %v is not a *datastore.Key", t)
223 }
224 c.keyField = i
225 case !validPropertyName(name):
226 return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name)
227 }
228
229 substructType, fIsSlice := reflect.Type(nil), false
230 switch f.Type.Kind() {
231 case reflect.Struct:
232 substructType = f.Type
233 case reflect.Slice:
234 if f.Type.Elem().Kind() == reflect.Struct {
235 substructType = f.Type.Elem()
236 }
237 fIsSlice = f.Type != typeOfByteSlice
238 c.hasSlice = c.hasSlice || fIsSlice
239 }
240
241 var sub *structCodec
242 if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {
243 var err error
244 sub, err = getStructCodecLocked(substructType)
245 if err != nil {
246 return nil, err
247 }
248 if !sub.complete {
249 return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name)
250 }
251 if fIsSlice && sub.hasSlice {
252 return nil, fmt.Errorf(
253 "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name)
254 }
255 c.hasSlice = c.hasSlice || sub.hasSlice
256 // If f is an anonymous struct field, we promote the substruct's fields up to this level
257 // in the linked list of struct codecs.
258 if f.Anonymous {
259 for subname, subfield := range sub.fields {
260 if name != "" {
261 subname = name + "." + subname
262 }
263 if _, ok := c.fields[subname]; ok {
264 return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", subname)
265 }
266 c.fields[subname] = fieldCodec{
267 path: append([]int{i}, subfield.path...),
268 noIndex: subfield.noIndex || opts["noindex"],
269 omitEmpty: subfield.omitEmpty,
270 structCodec: subfield.structCodec,
271 }
272 }
273 continue
274 }
275 }
276
277 if _, ok := c.fields[name]; ok {
278 return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name)
279 }
280 c.fields[name] = fieldCodec{
281 path: []int{i},
282 noIndex: opts["noindex"],
283 omitEmpty: opts["omitempty"],
284 structCodec: sub,
285 }
286 }
287 c.complete = true
288 return c, nil
289}
290
291// structPLS adapts a struct to be a PropertyLoadSaver.
292type structPLS struct {
293 v reflect.Value
294 codec *structCodec
295}
296
297// newStructPLS returns a structPLS, which implements the
298// PropertyLoadSaver interface, for the struct pointer p.
299func newStructPLS(p interface{}) (*structPLS, error) {
300 v := reflect.ValueOf(p)
301 if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
302 return nil, ErrInvalidEntityType
303 }
304 v = v.Elem()
305 codec, err := getStructCodec(v.Type())
306 if err != nil {
307 return nil, err
308 }
309 return &structPLS{v, codec}, nil
310}
311
312// LoadStruct loads the properties from p to dst.
313// dst must be a struct pointer.
314func LoadStruct(dst interface{}, p []Property) error {
315 x, err := newStructPLS(dst)
316 if err != nil {
317 return err
318 }
319 return x.Load(p)
320}
321
322// SaveStruct returns the properties from src as a slice of Properties.
323// src must be a struct pointer.
324func SaveStruct(src interface{}) ([]Property, error) {
325 x, err := newStructPLS(src)
326 if err != nil {
327 return nil, err
328 }
329 return x.Save()
330}
diff --git a/vendor/google.golang.org/appengine/datastore/query.go b/vendor/google.golang.org/appengine/datastore/query.go
new file mode 100644
index 0000000..c1ea4ad
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/query.go
@@ -0,0 +1,757 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package datastore
6
7import (
8 "encoding/base64"
9 "errors"
10 "fmt"
11 "math"
12 "reflect"
13 "strings"
14
15 "github.com/golang/protobuf/proto"
16 "golang.org/x/net/context"
17
18 "google.golang.org/appengine/internal"
19 pb "google.golang.org/appengine/internal/datastore"
20)
21
22type operator int
23
24const (
25 lessThan operator = iota
26 lessEq
27 equal
28 greaterEq
29 greaterThan
30)
31
32var operatorToProto = map[operator]*pb.Query_Filter_Operator{
33 lessThan: pb.Query_Filter_LESS_THAN.Enum(),
34 lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(),
35 equal: pb.Query_Filter_EQUAL.Enum(),
36 greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(),
37 greaterThan: pb.Query_Filter_GREATER_THAN.Enum(),
38}
39
40// filter is a conditional filter on query results.
41type filter struct {
42 FieldName string
43 Op operator
44 Value interface{}
45}
46
47type sortDirection int
48
49const (
50 ascending sortDirection = iota
51 descending
52)
53
54var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{
55 ascending: pb.Query_Order_ASCENDING.Enum(),
56 descending: pb.Query_Order_DESCENDING.Enum(),
57}
58
59// order is a sort order on query results.
60type order struct {
61 FieldName string
62 Direction sortDirection
63}
64
65// NewQuery creates a new Query for a specific entity kind.
66//
67// An empty kind means to return all entities, including entities created and
68// managed by other App Engine features, and is called a kindless query.
69// Kindless queries cannot include filters or sort orders on property values.
70func NewQuery(kind string) *Query {
71 return &Query{
72 kind: kind,
73 limit: -1,
74 }
75}
76
77// Query represents a datastore query.
78type Query struct {
79 kind string
80 ancestor *Key
81 filter []filter
82 order []order
83 projection []string
84
85 distinct bool
86 keysOnly bool
87 eventual bool
88 limit int32
89 offset int32
90 count int32
91 start *pb.CompiledCursor
92 end *pb.CompiledCursor
93
94 err error
95}
96
97func (q *Query) clone() *Query {
98 x := *q
99 // Copy the contents of the slice-typed fields to a new backing store.
100 if len(q.filter) > 0 {
101 x.filter = make([]filter, len(q.filter))
102 copy(x.filter, q.filter)
103 }
104 if len(q.order) > 0 {
105 x.order = make([]order, len(q.order))
106 copy(x.order, q.order)
107 }
108 return &x
109}
110
111// Ancestor returns a derivative query with an ancestor filter.
112// The ancestor should not be nil.
113func (q *Query) Ancestor(ancestor *Key) *Query {
114 q = q.clone()
115 if ancestor == nil {
116 q.err = errors.New("datastore: nil query ancestor")
117 return q
118 }
119 q.ancestor = ancestor
120 return q
121}
122
123// EventualConsistency returns a derivative query that returns eventually
124// consistent results.
125// It only has an effect on ancestor queries.
126func (q *Query) EventualConsistency() *Query {
127 q = q.clone()
128 q.eventual = true
129 return q
130}
131
132// Filter returns a derivative query with a field-based filter.
133// The filterStr argument must be a field name followed by optional space,
134// followed by an operator, one of ">", "<", ">=", "<=", or "=".
135// Fields are compared against the provided value using the operator.
136// Multiple filters are AND'ed together.
137func (q *Query) Filter(filterStr string, value interface{}) *Query {
138 q = q.clone()
139 filterStr = strings.TrimSpace(filterStr)
140 if len(filterStr) < 1 {
141 q.err = errors.New("datastore: invalid filter: " + filterStr)
142 return q
143 }
144 f := filter{
145 FieldName: strings.TrimRight(filterStr, " ><=!"),
146 Value: value,
147 }
148 switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
149 case "<=":
150 f.Op = lessEq
151 case ">=":
152 f.Op = greaterEq
153 case "<":
154 f.Op = lessThan
155 case ">":
156 f.Op = greaterThan
157 case "=":
158 f.Op = equal
159 default:
160 q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
161 return q
162 }
163 q.filter = append(q.filter, f)
164 return q
165}
166
167// Order returns a derivative query with a field-based sort order. Orders are
168// applied in the order they are added. The default order is ascending; to sort
169// in descending order prefix the fieldName with a minus sign (-).
170func (q *Query) Order(fieldName string) *Query {
171 q = q.clone()
172 fieldName = strings.TrimSpace(fieldName)
173 o := order{
174 Direction: ascending,
175 FieldName: fieldName,
176 }
177 if strings.HasPrefix(fieldName, "-") {
178 o.Direction = descending
179 o.FieldName = strings.TrimSpace(fieldName[1:])
180 } else if strings.HasPrefix(fieldName, "+") {
181 q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
182 return q
183 }
184 if len(o.FieldName) == 0 {
185 q.err = errors.New("datastore: empty order")
186 return q
187 }
188 q.order = append(q.order, o)
189 return q
190}
191
192// Project returns a derivative query that yields only the given fields. It
193// cannot be used with KeysOnly.
194func (q *Query) Project(fieldNames ...string) *Query {
195 q = q.clone()
196 q.projection = append([]string(nil), fieldNames...)
197 return q
198}
199
200// Distinct returns a derivative query that yields de-duplicated entities with
201// respect to the set of projected fields. It is only used for projection
202// queries.
203func (q *Query) Distinct() *Query {
204 q = q.clone()
205 q.distinct = true
206 return q
207}
208
209// KeysOnly returns a derivative query that yields only keys, not keys and
210// entities. It cannot be used with projection queries.
211func (q *Query) KeysOnly() *Query {
212 q = q.clone()
213 q.keysOnly = true
214 return q
215}
216
217// Limit returns a derivative query that has a limit on the number of results
218// returned. A negative value means unlimited.
219func (q *Query) Limit(limit int) *Query {
220 q = q.clone()
221 if limit < math.MinInt32 || limit > math.MaxInt32 {
222 q.err = errors.New("datastore: query limit overflow")
223 return q
224 }
225 q.limit = int32(limit)
226 return q
227}
228
229// Offset returns a derivative query that has an offset of how many keys to
230// skip over before returning results. A negative value is invalid.
231func (q *Query) Offset(offset int) *Query {
232 q = q.clone()
233 if offset < 0 {
234 q.err = errors.New("datastore: negative query offset")
235 return q
236 }
237 if offset > math.MaxInt32 {
238 q.err = errors.New("datastore: query offset overflow")
239 return q
240 }
241 q.offset = int32(offset)
242 return q
243}
244
245// BatchSize returns a derivative query to fetch the supplied number of results
246// at once. This value should be greater than zero, and equal to or less than
247// the Limit.
248func (q *Query) BatchSize(size int) *Query {
249 q = q.clone()
250 if size <= 0 || size > math.MaxInt32 {
251 q.err = errors.New("datastore: query batch size overflow")
252 return q
253 }
254 q.count = int32(size)
255 return q
256}
257
258// Start returns a derivative query with the given start point.
259func (q *Query) Start(c Cursor) *Query {
260 q = q.clone()
261 if c.cc == nil {
262 q.err = errors.New("datastore: invalid cursor")
263 return q
264 }
265 q.start = c.cc
266 return q
267}
268
269// End returns a derivative query with the given end point.
270func (q *Query) End(c Cursor) *Query {
271 q = q.clone()
272 if c.cc == nil {
273 q.err = errors.New("datastore: invalid cursor")
274 return q
275 }
276 q.end = c.cc
277 return q
278}
279
280// toProto converts the query to a protocol buffer.
281func (q *Query) toProto(dst *pb.Query, appID string) error {
282 if len(q.projection) != 0 && q.keysOnly {
283 return errors.New("datastore: query cannot both project and be keys-only")
284 }
285 dst.Reset()
286 dst.App = proto.String(appID)
287 if q.kind != "" {
288 dst.Kind = proto.String(q.kind)
289 }
290 if q.ancestor != nil {
291 dst.Ancestor = keyToProto(appID, q.ancestor)
292 if q.eventual {
293 dst.Strong = proto.Bool(false)
294 }
295 }
296 if q.projection != nil {
297 dst.PropertyName = q.projection
298 if q.distinct {
299 dst.GroupByPropertyName = q.projection
300 }
301 }
302 if q.keysOnly {
303 dst.KeysOnly = proto.Bool(true)
304 dst.RequirePerfectPlan = proto.Bool(true)
305 }
306 for _, qf := range q.filter {
307 if qf.FieldName == "" {
308 return errors.New("datastore: empty query filter field name")
309 }
310 p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false)
311 if errStr != "" {
312 return errors.New("datastore: bad query filter value type: " + errStr)
313 }
314 xf := &pb.Query_Filter{
315 Op: operatorToProto[qf.Op],
316 Property: []*pb.Property{p},
317 }
318 if xf.Op == nil {
319 return errors.New("datastore: unknown query filter operator")
320 }
321 dst.Filter = append(dst.Filter, xf)
322 }
323 for _, qo := range q.order {
324 if qo.FieldName == "" {
325 return errors.New("datastore: empty query order field name")
326 }
327 xo := &pb.Query_Order{
328 Property: proto.String(qo.FieldName),
329 Direction: sortDirectionToProto[qo.Direction],
330 }
331 if xo.Direction == nil {
332 return errors.New("datastore: unknown query order direction")
333 }
334 dst.Order = append(dst.Order, xo)
335 }
336 if q.limit >= 0 {
337 dst.Limit = proto.Int32(q.limit)
338 }
339 if q.offset != 0 {
340 dst.Offset = proto.Int32(q.offset)
341 }
342 if q.count != 0 {
343 dst.Count = proto.Int32(q.count)
344 }
345 dst.CompiledCursor = q.start
346 dst.EndCompiledCursor = q.end
347 dst.Compile = proto.Bool(true)
348 return nil
349}
350
351// Count returns the number of results for the query.
352//
353// The running time and number of API calls made by Count scale linearly with
354// the sum of the query's offset and limit. Unless the result count is
355// expected to be small, it is best to specify a limit; otherwise Count will
356// continue until it finishes counting or the provided context expires.
357func (q *Query) Count(c context.Context) (int, error) {
358 // Check that the query is well-formed.
359 if q.err != nil {
360 return 0, q.err
361 }
362
363 // Run a copy of the query, with keysOnly true (if we're not a projection,
364 // since the two are incompatible), and an adjusted offset. We also set the
365 // limit to zero, as we don't want any actual entity data, just the number
366 // of skipped results.
367 newQ := q.clone()
368 newQ.keysOnly = len(newQ.projection) == 0
369 newQ.limit = 0
370 if q.limit < 0 {
371 // If the original query was unlimited, set the new query's offset to maximum.
372 newQ.offset = math.MaxInt32
373 } else {
374 newQ.offset = q.offset + q.limit
375 if newQ.offset < 0 {
376 // Do the best we can, in the presence of overflow.
377 newQ.offset = math.MaxInt32
378 }
379 }
380 req := &pb.Query{}
381 if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil {
382 return 0, err
383 }
384 res := &pb.QueryResult{}
385 if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil {
386 return 0, err
387 }
388
389 // n is the count we will return. For example, suppose that our original
390 // query had an offset of 4 and a limit of 2008: the count will be 2008,
391 // provided that there are at least 2012 matching entities. However, the
392 // RPCs will only skip 1000 results at a time. The RPC sequence is:
393 // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset
394 // response has (skippedResults, moreResults) = (1000, true)
395 // n += 1000 // n == 1000
396 // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n
397 // response has (skippedResults, moreResults) = (1000, true)
398 // n += 1000 // n == 2000
399 // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n
400 // response has (skippedResults, moreResults) = (12, false)
401 // n += 12 // n == 2012
402 // // exit the loop
403 // n -= 4 // n == 2008
404 var n int32
405 for {
406 // The QueryResult should have no actual entity data, just skipped results.
407 if len(res.Result) != 0 {
408 return 0, errors.New("datastore: internal error: Count request returned too much data")
409 }
410 n += res.GetSkippedResults()
411 if !res.GetMoreResults() {
412 break
413 }
414 if err := callNext(c, res, newQ.offset-n, q.count); err != nil {
415 return 0, err
416 }
417 }
418 n -= q.offset
419 if n < 0 {
420 // If the offset was greater than the number of matching entities,
421 // return 0 instead of negative.
422 n = 0
423 }
424 return int(n), nil
425}
426
427// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that
428// returned by a query with more results.
429func callNext(c context.Context, res *pb.QueryResult, offset, count int32) error {
430 if res.Cursor == nil {
431 return errors.New("datastore: internal error: server did not return a cursor")
432 }
433 req := &pb.NextRequest{
434 Cursor: res.Cursor,
435 }
436 if count >= 0 {
437 req.Count = proto.Int32(count)
438 }
439 if offset != 0 {
440 req.Offset = proto.Int32(offset)
441 }
442 if res.CompiledCursor != nil {
443 req.Compile = proto.Bool(true)
444 }
445 res.Reset()
446 return internal.Call(c, "datastore_v3", "Next", req, res)
447}
448
449// GetAll runs the query in the given context and returns all keys that match
450// that query, as well as appending the values to dst.
451//
452// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
453// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
454//
455// As a special case, *PropertyList is an invalid type for dst, even though a
456// PropertyList is a slice of structs. It is treated as invalid to avoid being
457// mistakenly passed when *[]PropertyList was intended.
458//
459// The keys returned by GetAll will be in a 1-1 correspondence with the entities
460// added to dst.
461//
462// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
463//
464// The running time and number of API calls made by GetAll scale linearly with
465// the sum of the query's offset and limit. Unless the result count is
466// expected to be small, it is best to specify a limit; otherwise GetAll will
467// continue until it finishes collecting results or the provided context
468// expires.
469func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) {
470 var (
471 dv reflect.Value
472 mat multiArgType
473 elemType reflect.Type
474 errFieldMismatch error
475 )
476 if !q.keysOnly {
477 dv = reflect.ValueOf(dst)
478 if dv.Kind() != reflect.Ptr || dv.IsNil() {
479 return nil, ErrInvalidEntityType
480 }
481 dv = dv.Elem()
482 mat, elemType = checkMultiArg(dv)
483 if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
484 return nil, ErrInvalidEntityType
485 }
486 }
487
488 var keys []*Key
489 for t := q.Run(c); ; {
490 k, e, err := t.next()
491 if err == Done {
492 break
493 }
494 if err != nil {
495 return keys, err
496 }
497 if !q.keysOnly {
498 ev := reflect.New(elemType)
499 if elemType.Kind() == reflect.Map {
500 // This is a special case. The zero values of a map type are
501 // not immediately useful; they have to be make'd.
502 //
503 // Funcs and channels are similar, in that a zero value is not useful,
504 // but even a freshly make'd channel isn't useful: there's no fixed
505 // channel buffer size that is always going to be large enough, and
506 // there's no goroutine to drain the other end. Theoretically, these
507 // types could be supported, for example by sniffing for a constructor
508 // method or requiring prior registration, but for now it's not a
509 // frequent enough concern to be worth it. Programmers can work around
510 // it by explicitly using Iterator.Next instead of the Query.GetAll
511 // convenience method.
512 x := reflect.MakeMap(elemType)
513 ev.Elem().Set(x)
514 }
515 if err = loadEntity(ev.Interface(), e); err != nil {
516 if _, ok := err.(*ErrFieldMismatch); ok {
517 // We continue loading entities even in the face of field mismatch errors.
518 // If we encounter any other error, that other error is returned. Otherwise,
519 // an ErrFieldMismatch is returned.
520 errFieldMismatch = err
521 } else {
522 return keys, err
523 }
524 }
525 if mat != multiArgTypeStructPtr {
526 ev = ev.Elem()
527 }
528 dv.Set(reflect.Append(dv, ev))
529 }
530 keys = append(keys, k)
531 }
532 return keys, errFieldMismatch
533}
534
535// Run runs the query in the given context.
536func (q *Query) Run(c context.Context) *Iterator {
537 if q.err != nil {
538 return &Iterator{err: q.err}
539 }
540 t := &Iterator{
541 c: c,
542 limit: q.limit,
543 count: q.count,
544 q: q,
545 prevCC: q.start,
546 }
547 var req pb.Query
548 if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil {
549 t.err = err
550 return t
551 }
552 if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil {
553 t.err = err
554 return t
555 }
556 offset := q.offset - t.res.GetSkippedResults()
557 var count int32
558 if t.count > 0 && (t.limit < 0 || t.count < t.limit) {
559 count = t.count
560 } else {
561 count = t.limit
562 }
563 for offset > 0 && t.res.GetMoreResults() {
564 t.prevCC = t.res.CompiledCursor
565 if err := callNext(t.c, &t.res, offset, count); err != nil {
566 t.err = err
567 break
568 }
569 skip := t.res.GetSkippedResults()
570 if skip < 0 {
571 t.err = errors.New("datastore: internal error: negative number of skipped_results")
572 break
573 }
574 offset -= skip
575 }
576 if offset < 0 {
577 t.err = errors.New("datastore: internal error: query offset was overshot")
578 }
579 return t
580}
581
582// Iterator is the result of running a query.
583type Iterator struct {
584 c context.Context
585 err error
586 // res is the result of the most recent RunQuery or Next API call.
587 res pb.QueryResult
588 // i is how many elements of res.Result we have iterated over.
589 i int
590 // limit is the limit on the number of results this iterator should return.
591 // A negative value means unlimited.
592 limit int32
593 // count is the number of results this iterator should fetch at once. This
594 // should be equal to or greater than zero.
595 count int32
596 // q is the original query which yielded this iterator.
597 q *Query
598 // prevCC is the compiled cursor that marks the end of the previous batch
599 // of results.
600 prevCC *pb.CompiledCursor
601}
602
603// Done is returned when a query iteration has completed.
604var Done = errors.New("datastore: query has no more results")
605
606// Next returns the key of the next result. When there are no more results,
607// Done is returned as the error.
608//
609// If the query is not keys only and dst is non-nil, it also loads the entity
610// stored for that key into the struct pointer or PropertyLoadSaver dst, with
611// the same semantics and possible errors as for the Get function.
612func (t *Iterator) Next(dst interface{}) (*Key, error) {
613 k, e, err := t.next()
614 if err != nil {
615 return nil, err
616 }
617 if dst != nil && !t.q.keysOnly {
618 err = loadEntity(dst, e)
619 }
620 return k, err
621}
622
623func (t *Iterator) next() (*Key, *pb.EntityProto, error) {
624 if t.err != nil {
625 return nil, nil, t.err
626 }
627
628 // Issue datastore_v3/Next RPCs as necessary.
629 for t.i == len(t.res.Result) {
630 if !t.res.GetMoreResults() {
631 t.err = Done
632 return nil, nil, t.err
633 }
634 t.prevCC = t.res.CompiledCursor
635 var count int32
636 if t.count > 0 && (t.limit < 0 || t.count < t.limit) {
637 count = t.count
638 } else {
639 count = t.limit
640 }
641 if err := callNext(t.c, &t.res, 0, count); err != nil {
642 t.err = err
643 return nil, nil, t.err
644 }
645 if t.res.GetSkippedResults() != 0 {
646 t.err = errors.New("datastore: internal error: iterator has skipped results")
647 return nil, nil, t.err
648 }
649 t.i = 0
650 if t.limit >= 0 {
651 t.limit -= int32(len(t.res.Result))
652 if t.limit < 0 {
653 t.err = errors.New("datastore: internal error: query returned more results than the limit")
654 return nil, nil, t.err
655 }
656 }
657 }
658
659 // Extract the key from the t.i'th element of t.res.Result.
660 e := t.res.Result[t.i]
661 t.i++
662 if e.Key == nil {
663 return nil, nil, errors.New("datastore: internal error: server did not return a key")
664 }
665 k, err := protoToKey(e.Key)
666 if err != nil || k.Incomplete() {
667 return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
668 }
669 return k, e, nil
670}
671
672// Cursor returns a cursor for the iterator's current location.
673func (t *Iterator) Cursor() (Cursor, error) {
674 if t.err != nil && t.err != Done {
675 return Cursor{}, t.err
676 }
677 // If we are at either end of the current batch of results,
678 // return the compiled cursor at that end.
679 skipped := t.res.GetSkippedResults()
680 if t.i == 0 && skipped == 0 {
681 if t.prevCC == nil {
682 // A nil pointer (of type *pb.CompiledCursor) means no constraint:
683 // passing it as the end cursor of a new query means unlimited results
684 // (glossing over the integer limit parameter for now).
685 // A non-nil pointer to an empty pb.CompiledCursor means the start:
686 // passing it as the end cursor of a new query means 0 results.
687 // If prevCC was nil, then the original query had no start cursor, but
688 // Iterator.Cursor should return "the start" instead of unlimited.
689 return Cursor{&zeroCC}, nil
690 }
691 return Cursor{t.prevCC}, nil
692 }
693 if t.i == len(t.res.Result) {
694 return Cursor{t.res.CompiledCursor}, nil
695 }
696 // Otherwise, re-run the query offset to this iterator's position, starting from
697 // the most recent compiled cursor. This is done on a best-effort basis, as it
698 // is racy; if a concurrent process has added or removed entities, then the
699 // cursor returned may be inconsistent.
700 q := t.q.clone()
701 q.start = t.prevCC
702 q.offset = skipped + int32(t.i)
703 q.limit = 0
704 q.keysOnly = len(q.projection) == 0
705 t1 := q.Run(t.c)
706 _, _, err := t1.next()
707 if err != Done {
708 if err == nil {
709 err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results")
710 }
711 return Cursor{}, err
712 }
713 return Cursor{t1.res.CompiledCursor}, nil
714}
715
716var zeroCC pb.CompiledCursor
717
718// Cursor is an iterator's position. It can be converted to and from an opaque
719// string. A cursor can be used from different HTTP requests, but only with a
720// query with the same kind, ancestor, filter and order constraints.
721type Cursor struct {
722 cc *pb.CompiledCursor
723}
724
725// String returns a base-64 string representation of a cursor.
726func (c Cursor) String() string {
727 if c.cc == nil {
728 return ""
729 }
730 b, err := proto.Marshal(c.cc)
731 if err != nil {
732 // The only way to construct a Cursor with a non-nil cc field is to
733 // unmarshal from the byte representation. We panic if the unmarshal
734 // succeeds but the marshaling of the unchanged protobuf value fails.
735 panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err))
736 }
737 return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
738}
739
740// Decode decodes a cursor from its base-64 string representation.
741func DecodeCursor(s string) (Cursor, error) {
742 if s == "" {
743 return Cursor{&zeroCC}, nil
744 }
745 if n := len(s) % 4; n != 0 {
746 s += strings.Repeat("=", 4-n)
747 }
748 b, err := base64.URLEncoding.DecodeString(s)
749 if err != nil {
750 return Cursor{}, err
751 }
752 cc := &pb.CompiledCursor{}
753 if err := proto.Unmarshal(b, cc); err != nil {
754 return Cursor{}, err
755 }
756 return Cursor{cc}, nil
757}
diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go
new file mode 100644
index 0000000..7b045a5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/save.go
@@ -0,0 +1,333 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package datastore
6
7import (
8 "errors"
9 "fmt"
10 "math"
11 "reflect"
12 "time"
13
14 "github.com/golang/protobuf/proto"
15
16 "google.golang.org/appengine"
17 pb "google.golang.org/appengine/internal/datastore"
18)
19
20func toUnixMicro(t time.Time) int64 {
21 // We cannot use t.UnixNano() / 1e3 because we want to handle times more than
22 // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
23 // be represented in the numerator of a single int64 divide.
24 return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
25}
26
27func fromUnixMicro(t int64) time.Time {
28 return time.Unix(t/1e6, (t%1e6)*1e3).UTC()
29}
30
31var (
32 minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
33 maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
34)
35
36// valueToProto converts a named value to a newly allocated Property.
37// The returned error string is empty on success.
38func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) {
39 var (
40 pv pb.PropertyValue
41 unsupported bool
42 )
43 switch v.Kind() {
44 case reflect.Invalid:
45 // No-op.
46 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
47 pv.Int64Value = proto.Int64(v.Int())
48 case reflect.Bool:
49 pv.BooleanValue = proto.Bool(v.Bool())
50 case reflect.String:
51 pv.StringValue = proto.String(v.String())
52 case reflect.Float32, reflect.Float64:
53 pv.DoubleValue = proto.Float64(v.Float())
54 case reflect.Ptr:
55 if k, ok := v.Interface().(*Key); ok {
56 if k != nil {
57 pv.Referencevalue = keyToReferenceValue(defaultAppID, k)
58 }
59 } else {
60 unsupported = true
61 }
62 case reflect.Struct:
63 switch t := v.Interface().(type) {
64 case time.Time:
65 if t.Before(minTime) || t.After(maxTime) {
66 return nil, "time value out of range"
67 }
68 pv.Int64Value = proto.Int64(toUnixMicro(t))
69 case appengine.GeoPoint:
70 if !t.Valid() {
71 return nil, "invalid GeoPoint value"
72 }
73 // NOTE: Strangely, latitude maps to X, longitude to Y.
74 pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng}
75 default:
76 unsupported = true
77 }
78 case reflect.Slice:
79 if b, ok := v.Interface().([]byte); ok {
80 pv.StringValue = proto.String(string(b))
81 } else {
82 // nvToProto should already catch slice values.
83 // If we get here, we have a slice of slice values.
84 unsupported = true
85 }
86 default:
87 unsupported = true
88 }
89 if unsupported {
90 return nil, "unsupported datastore value type: " + v.Type().String()
91 }
92 p = &pb.Property{
93 Name: proto.String(name),
94 Value: &pv,
95 Multiple: proto.Bool(multiple),
96 }
97 if v.IsValid() {
98 switch v.Interface().(type) {
99 case []byte:
100 p.Meaning = pb.Property_BLOB.Enum()
101 case ByteString:
102 p.Meaning = pb.Property_BYTESTRING.Enum()
103 case appengine.BlobKey:
104 p.Meaning = pb.Property_BLOBKEY.Enum()
105 case time.Time:
106 p.Meaning = pb.Property_GD_WHEN.Enum()
107 case appengine.GeoPoint:
108 p.Meaning = pb.Property_GEORSS_POINT.Enum()
109 }
110 }
111 return p, ""
112}
113
114type saveOpts struct {
115 noIndex bool
116 multiple bool
117 omitEmpty bool
118}
119
120// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
121func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) {
122 var err error
123 var props []Property
124 if e, ok := src.(PropertyLoadSaver); ok {
125 props, err = e.Save()
126 } else {
127 props, err = SaveStruct(src)
128 }
129 if err != nil {
130 return nil, err
131 }
132 return propertiesToProto(defaultAppID, key, props)
133}
134
135func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error {
136 if opts.omitEmpty && isEmptyValue(v) {
137 return nil
138 }
139 p := Property{
140 Name: name,
141 NoIndex: opts.noIndex,
142 Multiple: opts.multiple,
143 }
144 switch x := v.Interface().(type) {
145 case *Key:
146 p.Value = x
147 case time.Time:
148 p.Value = x
149 case appengine.BlobKey:
150 p.Value = x
151 case appengine.GeoPoint:
152 p.Value = x
153 case ByteString:
154 p.Value = x
155 default:
156 switch v.Kind() {
157 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
158 p.Value = v.Int()
159 case reflect.Bool:
160 p.Value = v.Bool()
161 case reflect.String:
162 p.Value = v.String()
163 case reflect.Float32, reflect.Float64:
164 p.Value = v.Float()
165 case reflect.Slice:
166 if v.Type().Elem().Kind() == reflect.Uint8 {
167 p.NoIndex = true
168 p.Value = v.Bytes()
169 }
170 case reflect.Struct:
171 if !v.CanAddr() {
172 return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
173 }
174 sub, err := newStructPLS(v.Addr().Interface())
175 if err != nil {
176 return fmt.Errorf("datastore: unsupported struct field: %v", err)
177 }
178 return sub.save(props, name+".", opts)
179 }
180 }
181 if p.Value == nil {
182 return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
183 }
184 *props = append(*props, p)
185 return nil
186}
187
188func (s structPLS) Save() ([]Property, error) {
189 var props []Property
190 if err := s.save(&props, "", saveOpts{}); err != nil {
191 return nil, err
192 }
193 return props, nil
194}
195
196func (s structPLS) save(props *[]Property, prefix string, opts saveOpts) error {
197 for name, f := range s.codec.fields {
198 name = prefix + name
199 v := s.v.FieldByIndex(f.path)
200 if !v.IsValid() || !v.CanSet() {
201 continue
202 }
203 var opts1 saveOpts
204 opts1.noIndex = opts.noIndex || f.noIndex
205 opts1.multiple = opts.multiple
206 opts1.omitEmpty = f.omitEmpty // don't propagate
207 // For slice fields that aren't []byte, save each element.
208 if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
209 opts1.multiple = true
210 for j := 0; j < v.Len(); j++ {
211 if err := saveStructProperty(props, name, opts1, v.Index(j)); err != nil {
212 return err
213 }
214 }
215 continue
216 }
217 // Otherwise, save the field itself.
218 if err := saveStructProperty(props, name, opts1, v); err != nil {
219 return err
220 }
221 }
222 return nil
223}
224
225func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) {
226 e := &pb.EntityProto{
227 Key: keyToProto(defaultAppID, key),
228 }
229 if key.parent == nil {
230 e.EntityGroup = &pb.Path{}
231 } else {
232 e.EntityGroup = keyToProto(defaultAppID, key.root()).Path
233 }
234 prevMultiple := make(map[string]bool)
235
236 for _, p := range props {
237 if pm, ok := prevMultiple[p.Name]; ok {
238 if !pm || !p.Multiple {
239 return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name)
240 }
241 } else {
242 prevMultiple[p.Name] = p.Multiple
243 }
244
245 x := &pb.Property{
246 Name: proto.String(p.Name),
247 Value: new(pb.PropertyValue),
248 Multiple: proto.Bool(p.Multiple),
249 }
250 switch v := p.Value.(type) {
251 case int64:
252 x.Value.Int64Value = proto.Int64(v)
253 case bool:
254 x.Value.BooleanValue = proto.Bool(v)
255 case string:
256 x.Value.StringValue = proto.String(v)
257 if p.NoIndex {
258 x.Meaning = pb.Property_TEXT.Enum()
259 }
260 case float64:
261 x.Value.DoubleValue = proto.Float64(v)
262 case *Key:
263 if v != nil {
264 x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v)
265 }
266 case time.Time:
267 if v.Before(minTime) || v.After(maxTime) {
268 return nil, fmt.Errorf("datastore: time value out of range")
269 }
270 x.Value.Int64Value = proto.Int64(toUnixMicro(v))
271 x.Meaning = pb.Property_GD_WHEN.Enum()
272 case appengine.BlobKey:
273 x.Value.StringValue = proto.String(string(v))
274 x.Meaning = pb.Property_BLOBKEY.Enum()
275 case appengine.GeoPoint:
276 if !v.Valid() {
277 return nil, fmt.Errorf("datastore: invalid GeoPoint value")
278 }
279 // NOTE: Strangely, latitude maps to X, longitude to Y.
280 x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng}
281 x.Meaning = pb.Property_GEORSS_POINT.Enum()
282 case []byte:
283 x.Value.StringValue = proto.String(string(v))
284 x.Meaning = pb.Property_BLOB.Enum()
285 if !p.NoIndex {
286 return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name)
287 }
288 case ByteString:
289 x.Value.StringValue = proto.String(string(v))
290 x.Meaning = pb.Property_BYTESTRING.Enum()
291 default:
292 if p.Value != nil {
293 return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name)
294 }
295 }
296
297 if p.NoIndex {
298 e.RawProperty = append(e.RawProperty, x)
299 } else {
300 e.Property = append(e.Property, x)
301 if len(e.Property) > maxIndexedProperties {
302 return nil, errors.New("datastore: too many indexed properties")
303 }
304 }
305 }
306 return e, nil
307}
308
309// isEmptyValue is taken from the encoding/json package in the standard library.
310func isEmptyValue(v reflect.Value) bool {
311 switch v.Kind() {
312 case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
313 // TODO(perfomance): Only reflect.String needed, other property types are not supported (copy/paste from json package)
314 return v.Len() == 0
315 case reflect.Bool:
316 return !v.Bool()
317 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
318 return v.Int() == 0
319 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
320 // TODO(perfomance): Uint* are unsupported property types - should be removed (copy/paste from json package)
321 return v.Uint() == 0
322 case reflect.Float32, reflect.Float64:
323 return v.Float() == 0
324 case reflect.Interface, reflect.Ptr:
325 return v.IsNil()
326 case reflect.Struct:
327 switch x := v.Interface().(type) {
328 case time.Time:
329 return x.IsZero()
330 }
331 }
332 return false
333}
diff --git a/vendor/google.golang.org/appengine/datastore/transaction.go b/vendor/google.golang.org/appengine/datastore/transaction.go
new file mode 100644
index 0000000..2ae8428
--- /dev/null
+++ b/vendor/google.golang.org/appengine/datastore/transaction.go
@@ -0,0 +1,96 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package datastore
6
7import (
8 "errors"
9
10 "golang.org/x/net/context"
11
12 "google.golang.org/appengine/internal"
13 pb "google.golang.org/appengine/internal/datastore"
14)
15
16func init() {
17 internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) {
18 x.Transaction = t
19 })
20 internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) {
21 x.Transaction = t
22 })
23 internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) {
24 x.Transaction = t
25 })
26 internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) {
27 x.Transaction = t
28 })
29}
30
31// ErrConcurrentTransaction is returned when a transaction is rolled back due
32// to a conflict with a concurrent transaction.
33var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
34
35// RunInTransaction runs f in a transaction. It calls f with a transaction
36// context tc that f should use for all App Engine operations.
37//
38// If f returns nil, RunInTransaction attempts to commit the transaction,
39// returning nil if it succeeds. If the commit fails due to a conflicting
40// transaction, RunInTransaction retries f, each time with a new transaction
41// context. It gives up and returns ErrConcurrentTransaction after three
42// failed attempts. The number of attempts can be configured by specifying
43// TransactionOptions.Attempts.
44//
45// If f returns non-nil, then any datastore changes will not be applied and
46// RunInTransaction returns that same error. The function f is not retried.
47//
48// Note that when f returns, the transaction is not yet committed. Calling code
49// must be careful not to assume that any of f's changes have been committed
50// until RunInTransaction returns nil.
51//
52// Since f may be called multiple times, f should usually be idempotent.
53// datastore.Get is not idempotent when unmarshaling slice fields.
54//
55// Nested transactions are not supported; c may not be a transaction context.
56func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error {
57 xg := false
58 if opts != nil {
59 xg = opts.XG
60 }
61 readOnly := false
62 if opts != nil {
63 readOnly = opts.ReadOnly
64 }
65 attempts := 3
66 if opts != nil && opts.Attempts > 0 {
67 attempts = opts.Attempts
68 }
69 var t *pb.Transaction
70 var err error
71 for i := 0; i < attempts; i++ {
72 if t, err = internal.RunTransactionOnce(c, f, xg, readOnly, t); err != internal.ErrConcurrentTransaction {
73 return err
74 }
75 }
76 return ErrConcurrentTransaction
77}
78
79// TransactionOptions are the options for running a transaction.
80type TransactionOptions struct {
81 // XG is whether the transaction can cross multiple entity groups. In
82 // comparison, a single group transaction is one where all datastore keys
83 // used have the same root key. Note that cross group transactions do not
84 // have the same behavior as single group transactions. In particular, it
85 // is much more likely to see partially applied transactions in different
86 // entity groups, in global queries.
87 // It is valid to set XG to true even if the transaction is within a
88 // single entity group.
89 XG bool
90 // Attempts controls the number of retries to perform when commits fail
91 // due to a conflicting transaction. If omitted, it defaults to 3.
92 Attempts int
93 // ReadOnly controls whether the transaction is a read only transaction.
94 // Read only transactions are potentially more efficient.
95 ReadOnly bool
96}
diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go
new file mode 100644
index 0000000..16d0772
--- /dev/null
+++ b/vendor/google.golang.org/appengine/errors.go
@@ -0,0 +1,46 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// This file provides error functions for common API failure modes.
6
7package appengine
8
9import (
10 "fmt"
11
12 "google.golang.org/appengine/internal"
13)
14
15// IsOverQuota reports whether err represents an API call failure
16// due to insufficient available quota.
17func IsOverQuota(err error) bool {
18 callErr, ok := err.(*internal.CallError)
19 return ok && callErr.Code == 4
20}
21
22// MultiError is returned by batch operations when there are errors with
23// particular elements. Errors will be in a one-to-one correspondence with
24// the input elements; successful elements will have a nil entry.
25type MultiError []error
26
27func (m MultiError) Error() string {
28 s, n := "", 0
29 for _, e := range m {
30 if e != nil {
31 if n == 0 {
32 s = e.Error()
33 }
34 n++
35 }
36 }
37 switch n {
38 case 0:
39 return "(0 errors)"
40 case 1:
41 return s
42 case 2:
43 return s + " (and 1 other error)"
44 }
45 return fmt.Sprintf("%s (and %d other errors)", s, n-1)
46}
diff --git a/vendor/google.golang.org/appengine/go.mod b/vendor/google.golang.org/appengine/go.mod
new file mode 100644
index 0000000..f449359
--- /dev/null
+++ b/vendor/google.golang.org/appengine/go.mod
@@ -0,0 +1,7 @@
1module google.golang.org/appengine
2
3require (
4 github.com/golang/protobuf v1.2.0
5 golang.org/x/net v0.0.0-20180724234803-3673e40ba225
6 golang.org/x/text v0.3.0
7)
diff --git a/vendor/google.golang.org/appengine/go.sum b/vendor/google.golang.org/appengine/go.sum
new file mode 100644
index 0000000..1a221c0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/go.sum
@@ -0,0 +1,6 @@
1github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
2github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
3golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8=
4golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
5golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
6golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
new file mode 100644
index 0000000..b8dcf8f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/identity.go
@@ -0,0 +1,142 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package appengine
6
7import (
8 "time"
9
10 "golang.org/x/net/context"
11
12 "google.golang.org/appengine/internal"
13 pb "google.golang.org/appengine/internal/app_identity"
14 modpb "google.golang.org/appengine/internal/modules"
15)
16
17// AppID returns the application ID for the current application.
18// The string will be a plain application ID (e.g. "appid"), with a
19// domain prefix for custom domain deployments (e.g. "example.com:appid").
20func AppID(c context.Context) string { return internal.AppID(c) }
21
22// DefaultVersionHostname returns the standard hostname of the default version
23// of the current application (e.g. "my-app.appspot.com"). This is suitable for
24// use in constructing URLs.
25func DefaultVersionHostname(c context.Context) string {
26 return internal.DefaultVersionHostname(c)
27}
28
29// ModuleName returns the module name of the current instance.
30func ModuleName(c context.Context) string {
31 return internal.ModuleName(c)
32}
33
34// ModuleHostname returns a hostname of a module instance.
35// If module is the empty string, it refers to the module of the current instance.
36// If version is empty, it refers to the version of the current instance if valid,
37// or the default version of the module of the current instance.
38// If instance is empty, ModuleHostname returns the load-balancing hostname.
39func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
40 req := &modpb.GetHostnameRequest{}
41 if module != "" {
42 req.Module = &module
43 }
44 if version != "" {
45 req.Version = &version
46 }
47 if instance != "" {
48 req.Instance = &instance
49 }
50 res := &modpb.GetHostnameResponse{}
51 if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
52 return "", err
53 }
54 return *res.Hostname, nil
55}
56
57// VersionID returns the version ID for the current application.
58// It will be of the form "X.Y", where X is specified in app.yaml,
59// and Y is a number generated when each version of the app is uploaded.
60// It does not include a module name.
61func VersionID(c context.Context) string { return internal.VersionID(c) }
62
63// InstanceID returns a mostly-unique identifier for this instance.
64func InstanceID() string { return internal.InstanceID() }
65
66// Datacenter returns an identifier for the datacenter that the instance is running in.
67func Datacenter(c context.Context) string { return internal.Datacenter(c) }
68
69// ServerSoftware returns the App Engine release version.
70// In production, it looks like "Google App Engine/X.Y.Z".
71// In the development appserver, it looks like "Development/X.Y".
72func ServerSoftware() string { return internal.ServerSoftware() }
73
74// RequestID returns a string that uniquely identifies the request.
75func RequestID(c context.Context) string { return internal.RequestID(c) }
76
77// AccessToken generates an OAuth2 access token for the specified scopes on
78// behalf of service account of this application. This token will expire after
79// the returned time.
80func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
81 req := &pb.GetAccessTokenRequest{Scope: scopes}
82 res := &pb.GetAccessTokenResponse{}
83
84 err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
85 if err != nil {
86 return "", time.Time{}, err
87 }
88 return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
89}
90
91// Certificate represents a public certificate for the app.
92type Certificate struct {
93 KeyName string
94 Data []byte // PEM-encoded X.509 certificate
95}
96
97// PublicCertificates retrieves the public certificates for the app.
98// They can be used to verify a signature returned by SignBytes.
99func PublicCertificates(c context.Context) ([]Certificate, error) {
100 req := &pb.GetPublicCertificateForAppRequest{}
101 res := &pb.GetPublicCertificateForAppResponse{}
102 if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
103 return nil, err
104 }
105 var cs []Certificate
106 for _, pc := range res.PublicCertificateList {
107 cs = append(cs, Certificate{
108 KeyName: pc.GetKeyName(),
109 Data: []byte(pc.GetX509CertificatePem()),
110 })
111 }
112 return cs, nil
113}
114
115// ServiceAccount returns a string representing the service account name, in
116// the form of an email address (typically app_id@appspot.gserviceaccount.com).
117func ServiceAccount(c context.Context) (string, error) {
118 req := &pb.GetServiceAccountNameRequest{}
119 res := &pb.GetServiceAccountNameResponse{}
120
121 err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
122 if err != nil {
123 return "", err
124 }
125 return res.GetServiceAccountName(), err
126}
127
128// SignBytes signs bytes using a private key unique to your application.
129func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
130 req := &pb.SignForAppRequest{BytesToSign: bytes}
131 res := &pb.SignForAppResponse{}
132
133 if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
134 return "", nil, err
135 }
136 return res.GetKeyName(), res.GetSignatureBytes(), nil
137}
138
139func init() {
140 internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
141 internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
142}
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
new file mode 100644
index 0000000..bbc1cb9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,671 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// +build !appengine
6
7package internal
8
9import (
10 "bytes"
11 "errors"
12 "fmt"
13 "io/ioutil"
14 "log"
15 "net"
16 "net/http"
17 "net/url"
18 "os"
19 "runtime"
20 "strconv"
21 "strings"
22 "sync"
23 "sync/atomic"
24 "time"
25
26 "github.com/golang/protobuf/proto"
27 netcontext "golang.org/x/net/context"
28
29 basepb "google.golang.org/appengine/internal/base"
30 logpb "google.golang.org/appengine/internal/log"
31 remotepb "google.golang.org/appengine/internal/remote_api"
32)
33
34const (
35 apiPath = "/rpc_http"
36 defaultTicketSuffix = "/default.20150612t184001.0"
37)
38
39var (
40 // Incoming headers.
41 ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
42 dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
43 traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
44 curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
45 userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
46 remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
47
48 // Outgoing headers.
49 apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
50 apiEndpointHeaderValue = []string{"app-engine-apis"}
51 apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
52 apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
53 apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
54 apiContentType = http.CanonicalHeaderKey("Content-Type")
55 apiContentTypeValue = []string{"application/octet-stream"}
56 logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
57
58 apiHTTPClient = &http.Client{
59 Transport: &http.Transport{
60 Proxy: http.ProxyFromEnvironment,
61 Dial: limitDial,
62 },
63 }
64
65 defaultTicketOnce sync.Once
66 defaultTicket string
67 backgroundContextOnce sync.Once
68 backgroundContext netcontext.Context
69)
70
71func apiURL() *url.URL {
72 host, port := "appengine.googleapis.internal", "10001"
73 if h := os.Getenv("API_HOST"); h != "" {
74 host = h
75 }
76 if p := os.Getenv("API_PORT"); p != "" {
77 port = p
78 }
79 return &url.URL{
80 Scheme: "http",
81 Host: host + ":" + port,
82 Path: apiPath,
83 }
84}
85
86func handleHTTP(w http.ResponseWriter, r *http.Request) {
87 c := &context{
88 req: r,
89 outHeader: w.Header(),
90 apiURL: apiURL(),
91 }
92 r = r.WithContext(withContext(r.Context(), c))
93 c.req = r
94
95 stopFlushing := make(chan int)
96
97 // Patch up RemoteAddr so it looks reasonable.
98 if addr := r.Header.Get(userIPHeader); addr != "" {
99 r.RemoteAddr = addr
100 } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
101 r.RemoteAddr = addr
102 } else {
103 // Should not normally reach here, but pick a sensible default anyway.
104 r.RemoteAddr = "127.0.0.1"
105 }
106 // The address in the headers will most likely be of these forms:
107 // 123.123.123.123
108 // 2001:db8::1
109 // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
110 if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
111 // Assume the remote address is only a host; add a default port.
112 r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
113 }
114
115 // Start goroutine responsible for flushing app logs.
116 // This is done after adding c to ctx.m (and stopped before removing it)
117 // because flushing logs requires making an API call.
118 go c.logFlusher(stopFlushing)
119
120 executeRequestSafely(c, r)
121 c.outHeader = nil // make sure header changes aren't respected any more
122
123 stopFlushing <- 1 // any logging beyond this point will be dropped
124
125 // Flush any pending logs asynchronously.
126 c.pendingLogs.Lock()
127 flushes := c.pendingLogs.flushes
128 if len(c.pendingLogs.lines) > 0 {
129 flushes++
130 }
131 c.pendingLogs.Unlock()
132 flushed := make(chan struct{})
133 go func() {
134 defer close(flushed)
135 // Force a log flush, because with very short requests we
136 // may not ever flush logs.
137 c.flushLog(true)
138 }()
139 w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
140
141 // Avoid nil Write call if c.Write is never called.
142 if c.outCode != 0 {
143 w.WriteHeader(c.outCode)
144 }
145 if c.outBody != nil {
146 w.Write(c.outBody)
147 }
148 // Wait for the last flush to complete before returning,
149 // otherwise the security ticket will not be valid.
150 <-flushed
151}
152
153func executeRequestSafely(c *context, r *http.Request) {
154 defer func() {
155 if x := recover(); x != nil {
156 logf(c, 4, "%s", renderPanic(x)) // 4 == critical
157 c.outCode = 500
158 }
159 }()
160
161 http.DefaultServeMux.ServeHTTP(c, r)
162}
163
164func renderPanic(x interface{}) string {
165 buf := make([]byte, 16<<10) // 16 KB should be plenty
166 buf = buf[:runtime.Stack(buf, false)]
167
168 // Remove the first few stack frames:
169 // this func
170 // the recover closure in the caller
171 // That will root the stack trace at the site of the panic.
172 const (
173 skipStart = "internal.renderPanic"
174 skipFrames = 2
175 )
176 start := bytes.Index(buf, []byte(skipStart))
177 p := start
178 for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
179 p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
180 if p < 0 {
181 break
182 }
183 }
184 if p >= 0 {
185 // buf[start:p+1] is the block to remove.
186 // Copy buf[p+1:] over buf[start:] and shrink buf.
187 copy(buf[start:], buf[p+1:])
188 buf = buf[:len(buf)-(p+1-start)]
189 }
190
191 // Add panic heading.
192 head := fmt.Sprintf("panic: %v\n\n", x)
193 if len(head) > len(buf) {
194 // Extremely unlikely to happen.
195 return head
196 }
197 copy(buf[len(head):], buf)
198 copy(buf, head)
199
200 return string(buf)
201}
202
203// context represents the context of an in-flight HTTP request.
204// It implements the appengine.Context and http.ResponseWriter interfaces.
205type context struct {
206 req *http.Request
207
208 outCode int
209 outHeader http.Header
210 outBody []byte
211
212 pendingLogs struct {
213 sync.Mutex
214 lines []*logpb.UserAppLogLine
215 flushes int
216 }
217
218 apiURL *url.URL
219}
220
221var contextKey = "holds a *context"
222
223// jointContext joins two contexts in a superficial way.
224// It takes values and timeouts from a base context, and only values from another context.
225type jointContext struct {
226 base netcontext.Context
227 valuesOnly netcontext.Context
228}
229
230func (c jointContext) Deadline() (time.Time, bool) {
231 return c.base.Deadline()
232}
233
234func (c jointContext) Done() <-chan struct{} {
235 return c.base.Done()
236}
237
238func (c jointContext) Err() error {
239 return c.base.Err()
240}
241
242func (c jointContext) Value(key interface{}) interface{} {
243 if val := c.base.Value(key); val != nil {
244 return val
245 }
246 return c.valuesOnly.Value(key)
247}
248
249// fromContext returns the App Engine context or nil if ctx is not
250// derived from an App Engine context.
251func fromContext(ctx netcontext.Context) *context {
252 c, _ := ctx.Value(&contextKey).(*context)
253 return c
254}
255
256func withContext(parent netcontext.Context, c *context) netcontext.Context {
257 ctx := netcontext.WithValue(parent, &contextKey, c)
258 if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
259 ctx = withNamespace(ctx, ns)
260 }
261 return ctx
262}
263
264func toContext(c *context) netcontext.Context {
265 return withContext(netcontext.Background(), c)
266}
267
268func IncomingHeaders(ctx netcontext.Context) http.Header {
269 if c := fromContext(ctx); c != nil {
270 return c.req.Header
271 }
272 return nil
273}
274
275func ReqContext(req *http.Request) netcontext.Context {
276 return req.Context()
277}
278
279func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
280 return jointContext{
281 base: parent,
282 valuesOnly: req.Context(),
283 }
284}
285
286// DefaultTicket returns a ticket used for background context or dev_appserver.
287func DefaultTicket() string {
288 defaultTicketOnce.Do(func() {
289 if IsDevAppServer() {
290 defaultTicket = "testapp" + defaultTicketSuffix
291 return
292 }
293 appID := partitionlessAppID()
294 escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
295 majVersion := VersionID(nil)
296 if i := strings.Index(majVersion, "."); i > 0 {
297 majVersion = majVersion[:i]
298 }
299 defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
300 })
301 return defaultTicket
302}
303
304func BackgroundContext() netcontext.Context {
305 backgroundContextOnce.Do(func() {
306 // Compute background security ticket.
307 ticket := DefaultTicket()
308
309 c := &context{
310 req: &http.Request{
311 Header: http.Header{
312 ticketHeader: []string{ticket},
313 },
314 },
315 apiURL: apiURL(),
316 }
317 backgroundContext = toContext(c)
318
319 // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
320 go c.logFlusher(make(chan int))
321 })
322
323 return backgroundContext
324}
325
326// RegisterTestRequest registers the HTTP request req for testing, such that
327// any API calls are sent to the provided URL. It returns a closure to delete
328// the registration.
329// It should only be used by aetest package.
330func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
331 c := &context{
332 req: req,
333 apiURL: apiURL,
334 }
335 ctx := withContext(decorate(req.Context()), c)
336 req = req.WithContext(ctx)
337 c.req = req
338 return req, func() {}
339}
340
341var errTimeout = &CallError{
342 Detail: "Deadline exceeded",
343 Code: int32(remotepb.RpcError_CANCELLED),
344 Timeout: true,
345}
346
347func (c *context) Header() http.Header { return c.outHeader }
348
349// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
350// codes do not permit a response body (nor response entity headers such as
351// Content-Length, Content-Type, etc).
352func bodyAllowedForStatus(status int) bool {
353 switch {
354 case status >= 100 && status <= 199:
355 return false
356 case status == 204:
357 return false
358 case status == 304:
359 return false
360 }
361 return true
362}
363
364func (c *context) Write(b []byte) (int, error) {
365 if c.outCode == 0 {
366 c.WriteHeader(http.StatusOK)
367 }
368 if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
369 return 0, http.ErrBodyNotAllowed
370 }
371 c.outBody = append(c.outBody, b...)
372 return len(b), nil
373}
374
375func (c *context) WriteHeader(code int) {
376 if c.outCode != 0 {
377 logf(c, 3, "WriteHeader called multiple times on request.") // error level
378 return
379 }
380 c.outCode = code
381}
382
383func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
384 hreq := &http.Request{
385 Method: "POST",
386 URL: c.apiURL,
387 Header: http.Header{
388 apiEndpointHeader: apiEndpointHeaderValue,
389 apiMethodHeader: apiMethodHeaderValue,
390 apiContentType: apiContentTypeValue,
391 apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
392 },
393 Body: ioutil.NopCloser(bytes.NewReader(body)),
394 ContentLength: int64(len(body)),
395 Host: c.apiURL.Host,
396 }
397 if info := c.req.Header.Get(dapperHeader); info != "" {
398 hreq.Header.Set(dapperHeader, info)
399 }
400 if info := c.req.Header.Get(traceHeader); info != "" {
401 hreq.Header.Set(traceHeader, info)
402 }
403
404 tr := apiHTTPClient.Transport.(*http.Transport)
405
406 var timedOut int32 // atomic; set to 1 if timed out
407 t := time.AfterFunc(timeout, func() {
408 atomic.StoreInt32(&timedOut, 1)
409 tr.CancelRequest(hreq)
410 })
411 defer t.Stop()
412 defer func() {
413 // Check if timeout was exceeded.
414 if atomic.LoadInt32(&timedOut) != 0 {
415 err = errTimeout
416 }
417 }()
418
419 hresp, err := apiHTTPClient.Do(hreq)
420 if err != nil {
421 return nil, &CallError{
422 Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
423 Code: int32(remotepb.RpcError_UNKNOWN),
424 }
425 }
426 defer hresp.Body.Close()
427 hrespBody, err := ioutil.ReadAll(hresp.Body)
428 if hresp.StatusCode != 200 {
429 return nil, &CallError{
430 Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
431 Code: int32(remotepb.RpcError_UNKNOWN),
432 }
433 }
434 if err != nil {
435 return nil, &CallError{
436 Detail: fmt.Sprintf("service bridge response bad: %v", err),
437 Code: int32(remotepb.RpcError_UNKNOWN),
438 }
439 }
440 return hrespBody, nil
441}
442
443func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
444 if ns := NamespaceFromContext(ctx); ns != "" {
445 if fn, ok := NamespaceMods[service]; ok {
446 fn(in, ns)
447 }
448 }
449
450 if f, ctx, ok := callOverrideFromContext(ctx); ok {
451 return f(ctx, service, method, in, out)
452 }
453
454 // Handle already-done contexts quickly.
455 select {
456 case <-ctx.Done():
457 return ctx.Err()
458 default:
459 }
460
461 c := fromContext(ctx)
462 if c == nil {
463 // Give a good error message rather than a panic lower down.
464 return errNotAppEngineContext
465 }
466
467 // Apply transaction modifications if we're in a transaction.
468 if t := transactionFromContext(ctx); t != nil {
469 if t.finished {
470 return errors.New("transaction context has expired")
471 }
472 applyTransaction(in, &t.transaction)
473 }
474
475 // Default RPC timeout is 60s.
476 timeout := 60 * time.Second
477 if deadline, ok := ctx.Deadline(); ok {
478 timeout = deadline.Sub(time.Now())
479 }
480
481 data, err := proto.Marshal(in)
482 if err != nil {
483 return err
484 }
485
486 ticket := c.req.Header.Get(ticketHeader)
487 // Use a test ticket under test environment.
488 if ticket == "" {
489 if appid := ctx.Value(&appIDOverrideKey); appid != nil {
490 ticket = appid.(string) + defaultTicketSuffix
491 }
492 }
493 // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
494 if ticket == "" {
495 ticket = DefaultTicket()
496 }
497 req := &remotepb.Request{
498 ServiceName: &service,
499 Method: &method,
500 Request: data,
501 RequestId: &ticket,
502 }
503 hreqBody, err := proto.Marshal(req)
504 if err != nil {
505 return err
506 }
507
508 hrespBody, err := c.post(hreqBody, timeout)
509 if err != nil {
510 return err
511 }
512
513 res := &remotepb.Response{}
514 if err := proto.Unmarshal(hrespBody, res); err != nil {
515 return err
516 }
517 if res.RpcError != nil {
518 ce := &CallError{
519 Detail: res.RpcError.GetDetail(),
520 Code: *res.RpcError.Code,
521 }
522 switch remotepb.RpcError_ErrorCode(ce.Code) {
523 case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
524 ce.Timeout = true
525 }
526 return ce
527 }
528 if res.ApplicationError != nil {
529 return &APIError{
530 Service: *req.ServiceName,
531 Detail: res.ApplicationError.GetDetail(),
532 Code: *res.ApplicationError.Code,
533 }
534 }
535 if res.Exception != nil || res.JavaException != nil {
536 // This shouldn't happen, but let's be defensive.
537 return &CallError{
538 Detail: "service bridge returned exception",
539 Code: int32(remotepb.RpcError_UNKNOWN),
540 }
541 }
542 return proto.Unmarshal(res.Response, out)
543}
544
545func (c *context) Request() *http.Request {
546 return c.req
547}
548
549func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
550 // Truncate long log lines.
551 // TODO(dsymonds): Check if this is still necessary.
552 const lim = 8 << 10
553 if len(*ll.Message) > lim {
554 suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
555 ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
556 }
557
558 c.pendingLogs.Lock()
559 c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
560 c.pendingLogs.Unlock()
561}
562
563var logLevelName = map[int64]string{
564 0: "DEBUG",
565 1: "INFO",
566 2: "WARNING",
567 3: "ERROR",
568 4: "CRITICAL",
569}
570
571func logf(c *context, level int64, format string, args ...interface{}) {
572 if c == nil {
573 panic("not an App Engine context")
574 }
575 s := fmt.Sprintf(format, args...)
576 s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
577 c.addLogLine(&logpb.UserAppLogLine{
578 TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
579 Level: &level,
580 Message: &s,
581 })
582 // Only duplicate log to stderr if not running on App Engine second generation
583 if !IsSecondGen() {
584 log.Print(logLevelName[level] + ": " + s)
585 }
586}
587
588// flushLog attempts to flush any pending logs to the appserver.
589// It should not be called concurrently.
590func (c *context) flushLog(force bool) (flushed bool) {
591 c.pendingLogs.Lock()
592 // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
593 n, rem := 0, 30<<20
594 for ; n < len(c.pendingLogs.lines); n++ {
595 ll := c.pendingLogs.lines[n]
596 // Each log line will require about 3 bytes of overhead.
597 nb := proto.Size(ll) + 3
598 if nb > rem {
599 break
600 }
601 rem -= nb
602 }
603 lines := c.pendingLogs.lines[:n]
604 c.pendingLogs.lines = c.pendingLogs.lines[n:]
605 c.pendingLogs.Unlock()
606
607 if len(lines) == 0 && !force {
608 // Nothing to flush.
609 return false
610 }
611
612 rescueLogs := false
613 defer func() {
614 if rescueLogs {
615 c.pendingLogs.Lock()
616 c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
617 c.pendingLogs.Unlock()
618 }
619 }()
620
621 buf, err := proto.Marshal(&logpb.UserAppLogGroup{
622 LogLine: lines,
623 })
624 if err != nil {
625 log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
626 rescueLogs = true
627 return false
628 }
629
630 req := &logpb.FlushRequest{
631 Logs: buf,
632 }
633 res := &basepb.VoidProto{}
634 c.pendingLogs.Lock()
635 c.pendingLogs.flushes++
636 c.pendingLogs.Unlock()
637 if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
638 log.Printf("internal.flushLog: Flush RPC: %v", err)
639 rescueLogs = true
640 return false
641 }
642 return true
643}
644
645const (
646 // Log flushing parameters.
647 flushInterval = 1 * time.Second
648 forceFlushInterval = 60 * time.Second
649)
650
651func (c *context) logFlusher(stop <-chan int) {
652 lastFlush := time.Now()
653 tick := time.NewTicker(flushInterval)
654 for {
655 select {
656 case <-stop:
657 // Request finished.
658 tick.Stop()
659 return
660 case <-tick.C:
661 force := time.Now().Sub(lastFlush) > forceFlushInterval
662 if c.flushLog(force) {
663 lastFlush = time.Now()
664 }
665 }
666 }
667}
668
669func ContextForTesting(req *http.Request) netcontext.Context {
670 return toContext(&context{req: req})
671}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
new file mode 100644
index 0000000..f0f40b2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -0,0 +1,169 @@
1// Copyright 2015 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// +build appengine
6
7package internal
8
9import (
10 "errors"
11 "fmt"
12 "net/http"
13 "time"
14
15 "appengine"
16 "appengine_internal"
17 basepb "appengine_internal/base"
18
19 "github.com/golang/protobuf/proto"
20 netcontext "golang.org/x/net/context"
21)
22
23var contextKey = "holds an appengine.Context"
24
25// fromContext returns the App Engine context or nil if ctx is not
26// derived from an App Engine context.
27func fromContext(ctx netcontext.Context) appengine.Context {
28 c, _ := ctx.Value(&contextKey).(appengine.Context)
29 return c
30}
31
32// This is only for classic App Engine adapters.
33func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
34 c := fromContext(ctx)
35 if c == nil {
36 return nil, errNotAppEngineContext
37 }
38 return c, nil
39}
40
41func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
42 ctx := netcontext.WithValue(parent, &contextKey, c)
43
44 s := &basepb.StringProto{}
45 c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
46 if ns := s.GetValue(); ns != "" {
47 ctx = NamespacedContext(ctx, ns)
48 }
49
50 return ctx
51}
52
53func IncomingHeaders(ctx netcontext.Context) http.Header {
54 if c := fromContext(ctx); c != nil {
55 if req, ok := c.Request().(*http.Request); ok {
56 return req.Header
57 }
58 }
59 return nil
60}
61
62func ReqContext(req *http.Request) netcontext.Context {
63 return WithContext(netcontext.Background(), req)
64}
65
66func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
67 c := appengine.NewContext(req)
68 return withContext(parent, c)
69}
70
71type testingContext struct {
72 appengine.Context
73
74 req *http.Request
75}
76
77func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
78func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
79 if service == "__go__" && method == "GetNamespace" {
80 return nil
81 }
82 return fmt.Errorf("testingContext: unsupported Call")
83}
84func (t *testingContext) Request() interface{} { return t.req }
85
86func ContextForTesting(req *http.Request) netcontext.Context {
87 return withContext(netcontext.Background(), &testingContext{req: req})
88}
89
90func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
91 if ns := NamespaceFromContext(ctx); ns != "" {
92 if fn, ok := NamespaceMods[service]; ok {
93 fn(in, ns)
94 }
95 }
96
97 if f, ctx, ok := callOverrideFromContext(ctx); ok {
98 return f(ctx, service, method, in, out)
99 }
100
101 // Handle already-done contexts quickly.
102 select {
103 case <-ctx.Done():
104 return ctx.Err()
105 default:
106 }
107
108 c := fromContext(ctx)
109 if c == nil {
110 // Give a good error message rather than a panic lower down.
111 return errNotAppEngineContext
112 }
113
114 // Apply transaction modifications if we're in a transaction.
115 if t := transactionFromContext(ctx); t != nil {
116 if t.finished {
117 return errors.New("transaction context has expired")
118 }
119 applyTransaction(in, &t.transaction)
120 }
121
122 var opts *appengine_internal.CallOptions
123 if d, ok := ctx.Deadline(); ok {
124 opts = &appengine_internal.CallOptions{
125 Timeout: d.Sub(time.Now()),
126 }
127 }
128
129 err := c.Call(service, method, in, out, opts)
130 switch v := err.(type) {
131 case *appengine_internal.APIError:
132 return &APIError{
133 Service: v.Service,
134 Detail: v.Detail,
135 Code: v.Code,
136 }
137 case *appengine_internal.CallError:
138 return &CallError{
139 Detail: v.Detail,
140 Code: v.Code,
141 Timeout: v.Timeout,
142 }
143 }
144 return err
145}
146
147func handleHTTP(w http.ResponseWriter, r *http.Request) {
148 panic("handleHTTP called; this should be impossible")
149}
150
151func logf(c appengine.Context, level int64, format string, args ...interface{}) {
152 var fn func(format string, args ...interface{})
153 switch level {
154 case 0:
155 fn = c.Debugf
156 case 1:
157 fn = c.Infof
158 case 2:
159 fn = c.Warningf
160 case 3:
161 fn = c.Errorf
162 case 4:
163 fn = c.Criticalf
164 default:
165 // This shouldn't happen.
166 fn = c.Criticalf
167 }
168 fn(format, args...)
169}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
new file mode 100644
index 0000000..e0c0b21
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -0,0 +1,123 @@
1// Copyright 2015 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package internal
6
7import (
8 "errors"
9 "os"
10
11 "github.com/golang/protobuf/proto"
12 netcontext "golang.org/x/net/context"
13)
14
15var errNotAppEngineContext = errors.New("not an App Engine context")
16
17type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
18
19var callOverrideKey = "holds []CallOverrideFunc"
20
21func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
22 // We avoid appending to any existing call override
23 // so we don't risk overwriting a popped stack below.
24 var cofs []CallOverrideFunc
25 if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
26 cofs = append(cofs, uf...)
27 }
28 cofs = append(cofs, f)
29 return netcontext.WithValue(ctx, &callOverrideKey, cofs)
30}
31
32func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
33 cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
34 if len(cofs) == 0 {
35 return nil, nil, false
36 }
37 // We found a list of overrides; grab the last, and reconstitute a
38 // context that will hide it.
39 f := cofs[len(cofs)-1]
40 ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
41 return f, ctx, true
42}
43
44type logOverrideFunc func(level int64, format string, args ...interface{})
45
46var logOverrideKey = "holds a logOverrideFunc"
47
48func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
49 return netcontext.WithValue(ctx, &logOverrideKey, f)
50}
51
52var appIDOverrideKey = "holds a string, being the full app ID"
53
54func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
55 return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
56}
57
58var namespaceKey = "holds the namespace string"
59
60func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
61 return netcontext.WithValue(ctx, &namespaceKey, ns)
62}
63
64func NamespaceFromContext(ctx netcontext.Context) string {
65 // If there's no namespace, return the empty string.
66 ns, _ := ctx.Value(&namespaceKey).(string)
67 return ns
68}
69
70// FullyQualifiedAppID returns the fully-qualified application ID.
71// This may contain a partition prefix (e.g. "s~" for High Replication apps),
72// or a domain prefix (e.g. "example.com:").
73func FullyQualifiedAppID(ctx netcontext.Context) string {
74 if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
75 return id
76 }
77 return fullyQualifiedAppID(ctx)
78}
79
80func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
81 if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
82 f(level, format, args...)
83 return
84 }
85 c := fromContext(ctx)
86 if c == nil {
87 panic(errNotAppEngineContext)
88 }
89 logf(c, level, format, args...)
90}
91
92// NamespacedContext wraps a Context to support namespaces.
93func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
94 return withNamespace(ctx, namespace)
95}
96
97// SetTestEnv sets the env variables for testing background ticket in Flex.
98func SetTestEnv() func() {
99 var environ = []struct {
100 key, value string
101 }{
102 {"GAE_LONG_APP_ID", "my-app-id"},
103 {"GAE_MINOR_VERSION", "067924799508853122"},
104 {"GAE_MODULE_INSTANCE", "0"},
105 {"GAE_MODULE_NAME", "default"},
106 {"GAE_MODULE_VERSION", "20150612t184001"},
107 }
108
109 for _, v := range environ {
110 old := os.Getenv(v.key)
111 os.Setenv(v.key, v.value)
112 v.value = old
113 }
114 return func() { // Restore old environment after the test completes.
115 for _, v := range environ {
116 if v.value == "" {
117 os.Unsetenv(v.key)
118 continue
119 }
120 os.Setenv(v.key, v.value)
121 }
122 }
123}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 0000000..11df8c0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package internal
6
7import (
8 "strings"
9)
10
11func parseFullAppID(appid string) (partition, domain, displayID string) {
12 if i := strings.Index(appid, "~"); i != -1 {
13 partition, appid = appid[:i], appid[i+1:]
14 }
15 if i := strings.Index(appid, ":"); i != -1 {
16 domain, appid = appid[:i], appid[i+1:]
17 }
18 return partition, domain, appid
19}
20
21// appID returns "appid" or "domain.com:appid".
22func appID(fullAppID string) string {
23 _, dom, dis := parseFullAppID(fullAppID)
24 if dom != "" {
25 return dom + ":" + dis
26 }
27 return dis
28}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
new file mode 100644
index 0000000..9a2ff77
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
@@ -0,0 +1,611 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
3
4package app_identity
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21type AppIdentityServiceError_ErrorCode int32
22
23const (
24 AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0
25 AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9
26 AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000
27 AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
28 AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002
29 AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003
30 AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005
31 AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006
32)
33
34var AppIdentityServiceError_ErrorCode_name = map[int32]string{
35 0: "SUCCESS",
36 9: "UNKNOWN_SCOPE",
37 1000: "BLOB_TOO_LARGE",
38 1001: "DEADLINE_EXCEEDED",
39 1002: "NOT_A_VALID_APP",
40 1003: "UNKNOWN_ERROR",
41 1005: "NOT_ALLOWED",
42 1006: "NOT_IMPLEMENTED",
43}
44var AppIdentityServiceError_ErrorCode_value = map[string]int32{
45 "SUCCESS": 0,
46 "UNKNOWN_SCOPE": 9,
47 "BLOB_TOO_LARGE": 1000,
48 "DEADLINE_EXCEEDED": 1001,
49 "NOT_A_VALID_APP": 1002,
50 "UNKNOWN_ERROR": 1003,
51 "NOT_ALLOWED": 1005,
52 "NOT_IMPLEMENTED": 1006,
53}
54
55func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
56 p := new(AppIdentityServiceError_ErrorCode)
57 *p = x
58 return p
59}
60func (x AppIdentityServiceError_ErrorCode) String() string {
61 return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
62}
63func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
64 value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
65 if err != nil {
66 return err
67 }
68 *x = AppIdentityServiceError_ErrorCode(value)
69 return nil
70}
71func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
72 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0}
73}
74
75type AppIdentityServiceError struct {
76 XXX_NoUnkeyedLiteral struct{} `json:"-"`
77 XXX_unrecognized []byte `json:"-"`
78 XXX_sizecache int32 `json:"-"`
79}
80
81func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
82func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
83func (*AppIdentityServiceError) ProtoMessage() {}
84func (*AppIdentityServiceError) Descriptor() ([]byte, []int) {
85 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0}
86}
87func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error {
88 return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b)
89}
90func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
91 return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic)
92}
93func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) {
94 xxx_messageInfo_AppIdentityServiceError.Merge(dst, src)
95}
96func (m *AppIdentityServiceError) XXX_Size() int {
97 return xxx_messageInfo_AppIdentityServiceError.Size(m)
98}
99func (m *AppIdentityServiceError) XXX_DiscardUnknown() {
100 xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m)
101}
102
103var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo
104
105type SignForAppRequest struct {
106 BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"`
107 XXX_NoUnkeyedLiteral struct{} `json:"-"`
108 XXX_unrecognized []byte `json:"-"`
109 XXX_sizecache int32 `json:"-"`
110}
111
112func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
113func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
114func (*SignForAppRequest) ProtoMessage() {}
115func (*SignForAppRequest) Descriptor() ([]byte, []int) {
116 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1}
117}
118func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error {
119 return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b)
120}
121func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
122 return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic)
123}
124func (dst *SignForAppRequest) XXX_Merge(src proto.Message) {
125 xxx_messageInfo_SignForAppRequest.Merge(dst, src)
126}
127func (m *SignForAppRequest) XXX_Size() int {
128 return xxx_messageInfo_SignForAppRequest.Size(m)
129}
130func (m *SignForAppRequest) XXX_DiscardUnknown() {
131 xxx_messageInfo_SignForAppRequest.DiscardUnknown(m)
132}
133
134var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo
135
136func (m *SignForAppRequest) GetBytesToSign() []byte {
137 if m != nil {
138 return m.BytesToSign
139 }
140 return nil
141}
142
143type SignForAppResponse struct {
144 KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"`
145 SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"`
146 XXX_NoUnkeyedLiteral struct{} `json:"-"`
147 XXX_unrecognized []byte `json:"-"`
148 XXX_sizecache int32 `json:"-"`
149}
150
151func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
152func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
153func (*SignForAppResponse) ProtoMessage() {}
154func (*SignForAppResponse) Descriptor() ([]byte, []int) {
155 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2}
156}
157func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error {
158 return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b)
159}
160func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
161 return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic)
162}
163func (dst *SignForAppResponse) XXX_Merge(src proto.Message) {
164 xxx_messageInfo_SignForAppResponse.Merge(dst, src)
165}
166func (m *SignForAppResponse) XXX_Size() int {
167 return xxx_messageInfo_SignForAppResponse.Size(m)
168}
169func (m *SignForAppResponse) XXX_DiscardUnknown() {
170 xxx_messageInfo_SignForAppResponse.DiscardUnknown(m)
171}
172
173var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo
174
175func (m *SignForAppResponse) GetKeyName() string {
176 if m != nil && m.KeyName != nil {
177 return *m.KeyName
178 }
179 return ""
180}
181
182func (m *SignForAppResponse) GetSignatureBytes() []byte {
183 if m != nil {
184 return m.SignatureBytes
185 }
186 return nil
187}
188
189type GetPublicCertificateForAppRequest struct {
190 XXX_NoUnkeyedLiteral struct{} `json:"-"`
191 XXX_unrecognized []byte `json:"-"`
192 XXX_sizecache int32 `json:"-"`
193}
194
195func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} }
196func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
197func (*GetPublicCertificateForAppRequest) ProtoMessage() {}
198func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) {
199 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3}
200}
201func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error {
202 return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b)
203}
204func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
205 return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic)
206}
207func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) {
208 xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src)
209}
210func (m *GetPublicCertificateForAppRequest) XXX_Size() int {
211 return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m)
212}
213func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() {
214 xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m)
215}
216
217var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo
218
219type PublicCertificate struct {
220 KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"`
221 X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"`
222 XXX_NoUnkeyedLiteral struct{} `json:"-"`
223 XXX_unrecognized []byte `json:"-"`
224 XXX_sizecache int32 `json:"-"`
225}
226
227func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
228func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
229func (*PublicCertificate) ProtoMessage() {}
230func (*PublicCertificate) Descriptor() ([]byte, []int) {
231 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4}
232}
233func (m *PublicCertificate) XXX_Unmarshal(b []byte) error {
234 return xxx_messageInfo_PublicCertificate.Unmarshal(m, b)
235}
236func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
237 return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic)
238}
239func (dst *PublicCertificate) XXX_Merge(src proto.Message) {
240 xxx_messageInfo_PublicCertificate.Merge(dst, src)
241}
242func (m *PublicCertificate) XXX_Size() int {
243 return xxx_messageInfo_PublicCertificate.Size(m)
244}
245func (m *PublicCertificate) XXX_DiscardUnknown() {
246 xxx_messageInfo_PublicCertificate.DiscardUnknown(m)
247}
248
249var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo
250
251func (m *PublicCertificate) GetKeyName() string {
252 if m != nil && m.KeyName != nil {
253 return *m.KeyName
254 }
255 return ""
256}
257
258func (m *PublicCertificate) GetX509CertificatePem() string {
259 if m != nil && m.X509CertificatePem != nil {
260 return *m.X509CertificatePem
261 }
262 return ""
263}
264
265type GetPublicCertificateForAppResponse struct {
266 PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"`
267 MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"`
268 XXX_NoUnkeyedLiteral struct{} `json:"-"`
269 XXX_unrecognized []byte `json:"-"`
270 XXX_sizecache int32 `json:"-"`
271}
272
273func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} }
274func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
275func (*GetPublicCertificateForAppResponse) ProtoMessage() {}
276func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) {
277 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5}
278}
279func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error {
280 return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b)
281}
282func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
283 return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic)
284}
285func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) {
286 xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src)
287}
288func (m *GetPublicCertificateForAppResponse) XXX_Size() int {
289 return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m)
290}
291func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() {
292 xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m)
293}
294
295var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo
296
297func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
298 if m != nil {
299 return m.PublicCertificateList
300 }
301 return nil
302}
303
304func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
305 if m != nil && m.MaxClientCacheTimeInSecond != nil {
306 return *m.MaxClientCacheTimeInSecond
307 }
308 return 0
309}
310
311type GetServiceAccountNameRequest struct {
312 XXX_NoUnkeyedLiteral struct{} `json:"-"`
313 XXX_unrecognized []byte `json:"-"`
314 XXX_sizecache int32 `json:"-"`
315}
316
317func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
318func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
319func (*GetServiceAccountNameRequest) ProtoMessage() {}
320func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) {
321 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6}
322}
323func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error {
324 return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b)
325}
326func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
327 return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic)
328}
329func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) {
330 xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src)
331}
332func (m *GetServiceAccountNameRequest) XXX_Size() int {
333 return xxx_messageInfo_GetServiceAccountNameRequest.Size(m)
334}
335func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() {
336 xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m)
337}
338
339var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo
340
341type GetServiceAccountNameResponse struct {
342 ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"`
343 XXX_NoUnkeyedLiteral struct{} `json:"-"`
344 XXX_unrecognized []byte `json:"-"`
345 XXX_sizecache int32 `json:"-"`
346}
347
348func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
349func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
350func (*GetServiceAccountNameResponse) ProtoMessage() {}
351func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) {
352 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7}
353}
354func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error {
355 return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b)
356}
357func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
358 return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic)
359}
360func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) {
361 xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src)
362}
363func (m *GetServiceAccountNameResponse) XXX_Size() int {
364 return xxx_messageInfo_GetServiceAccountNameResponse.Size(m)
365}
366func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() {
367 xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m)
368}
369
370var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo
371
372func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
373 if m != nil && m.ServiceAccountName != nil {
374 return *m.ServiceAccountName
375 }
376 return ""
377}
378
379type GetAccessTokenRequest struct {
380 Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
381 ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"`
382 ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"`
383 XXX_NoUnkeyedLiteral struct{} `json:"-"`
384 XXX_unrecognized []byte `json:"-"`
385 XXX_sizecache int32 `json:"-"`
386}
387
388func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
389func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
390func (*GetAccessTokenRequest) ProtoMessage() {}
391func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) {
392 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8}
393}
394func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error {
395 return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b)
396}
397func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
398 return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic)
399}
400func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) {
401 xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src)
402}
403func (m *GetAccessTokenRequest) XXX_Size() int {
404 return xxx_messageInfo_GetAccessTokenRequest.Size(m)
405}
406func (m *GetAccessTokenRequest) XXX_DiscardUnknown() {
407 xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m)
408}
409
410var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo
411
412func (m *GetAccessTokenRequest) GetScope() []string {
413 if m != nil {
414 return m.Scope
415 }
416 return nil
417}
418
419func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
420 if m != nil && m.ServiceAccountId != nil {
421 return *m.ServiceAccountId
422 }
423 return 0
424}
425
426func (m *GetAccessTokenRequest) GetServiceAccountName() string {
427 if m != nil && m.ServiceAccountName != nil {
428 return *m.ServiceAccountName
429 }
430 return ""
431}
432
433type GetAccessTokenResponse struct {
434 AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"`
435 ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"`
436 XXX_NoUnkeyedLiteral struct{} `json:"-"`
437 XXX_unrecognized []byte `json:"-"`
438 XXX_sizecache int32 `json:"-"`
439}
440
441func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
442func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
443func (*GetAccessTokenResponse) ProtoMessage() {}
444func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) {
445 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9}
446}
447func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error {
448 return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b)
449}
450func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
451 return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic)
452}
453func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) {
454 xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src)
455}
456func (m *GetAccessTokenResponse) XXX_Size() int {
457 return xxx_messageInfo_GetAccessTokenResponse.Size(m)
458}
459func (m *GetAccessTokenResponse) XXX_DiscardUnknown() {
460 xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m)
461}
462
463var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo
464
465func (m *GetAccessTokenResponse) GetAccessToken() string {
466 if m != nil && m.AccessToken != nil {
467 return *m.AccessToken
468 }
469 return ""
470}
471
472func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
473 if m != nil && m.ExpirationTime != nil {
474 return *m.ExpirationTime
475 }
476 return 0
477}
478
479type GetDefaultGcsBucketNameRequest struct {
480 XXX_NoUnkeyedLiteral struct{} `json:"-"`
481 XXX_unrecognized []byte `json:"-"`
482 XXX_sizecache int32 `json:"-"`
483}
484
485func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
486func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
487func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
488func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) {
489 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10}
490}
491func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error {
492 return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b)
493}
494func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
495 return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic)
496}
497func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) {
498 xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src)
499}
500func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int {
501 return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m)
502}
503func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() {
504 xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m)
505}
506
507var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo
508
509type GetDefaultGcsBucketNameResponse struct {
510 DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"`
511 XXX_NoUnkeyedLiteral struct{} `json:"-"`
512 XXX_unrecognized []byte `json:"-"`
513 XXX_sizecache int32 `json:"-"`
514}
515
516func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} }
517func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
518func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {}
519func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) {
520 return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11}
521}
522func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error {
523 return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b)
524}
525func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
526 return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic)
527}
528func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) {
529 xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src)
530}
531func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int {
532 return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m)
533}
534func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() {
535 xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m)
536}
537
538var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo
539
540func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
541 if m != nil && m.DefaultGcsBucketName != nil {
542 return *m.DefaultGcsBucketName
543 }
544 return ""
545}
546
547func init() {
548 proto.RegisterType((*AppIdentityServiceError)(nil), "appengine.AppIdentityServiceError")
549 proto.RegisterType((*SignForAppRequest)(nil), "appengine.SignForAppRequest")
550 proto.RegisterType((*SignForAppResponse)(nil), "appengine.SignForAppResponse")
551 proto.RegisterType((*GetPublicCertificateForAppRequest)(nil), "appengine.GetPublicCertificateForAppRequest")
552 proto.RegisterType((*PublicCertificate)(nil), "appengine.PublicCertificate")
553 proto.RegisterType((*GetPublicCertificateForAppResponse)(nil), "appengine.GetPublicCertificateForAppResponse")
554 proto.RegisterType((*GetServiceAccountNameRequest)(nil), "appengine.GetServiceAccountNameRequest")
555 proto.RegisterType((*GetServiceAccountNameResponse)(nil), "appengine.GetServiceAccountNameResponse")
556 proto.RegisterType((*GetAccessTokenRequest)(nil), "appengine.GetAccessTokenRequest")
557 proto.RegisterType((*GetAccessTokenResponse)(nil), "appengine.GetAccessTokenResponse")
558 proto.RegisterType((*GetDefaultGcsBucketNameRequest)(nil), "appengine.GetDefaultGcsBucketNameRequest")
559 proto.RegisterType((*GetDefaultGcsBucketNameResponse)(nil), "appengine.GetDefaultGcsBucketNameResponse")
560}
561
562func init() {
563 proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4)
564}
565
566var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{
567 // 676 bytes of a gzipped FileDescriptorProto
568 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58,
569 0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e,
570 0x26, 0x0f, 0x15, 0x89, 0x2a, 0x45, 0x55, 0x1f, 0x8d, 0xed, 0x22, 0x54, 0x07, 0x53, 0x43, 0x9a,
571 0xa8, 0x2f, 0xa7, 0xce, 0x61, 0xc7, 0x3d, 0x02, 0x9f, 0xe3, 0xda, 0x87, 0x0a, 0x3e, 0xa2, 0x3f,
572 0xd2, 0x9f, 0xe8, 0x5b, 0xbf, 0xa5, 0x17, 0xb5, 0xdf, 0x50, 0xd9, 0x38, 0x5c, 0x92, 0x92, 0x37,
573 0xbc, 0xf6, 0x5a, 0xcb, 0x6b, 0x2f, 0x6d, 0x0c, 0x4e, 0x20, 0x65, 0x30, 0xc4, 0x7a, 0x20, 0x87,
574 0xbe, 0x08, 0xea, 0x32, 0x0e, 0x4e, 0xfc, 0x28, 0x42, 0x11, 0x70, 0x81, 0x27, 0x5c, 0x28, 0x8c,
575 0x85, 0x3f, 0x4c, 0x21, 0xca, 0xfb, 0x28, 0x14, 0x57, 0x93, 0xa5, 0x07, 0x9a, 0x60, 0xfc, 0x8e,
576 0x33, 0xac, 0x47, 0xb1, 0x54, 0x92, 0x94, 0x66, 0x5a, 0xfd, 0x53, 0x01, 0x76, 0x8c, 0x28, 0x6a,
577 0xe5, 0xc4, 0xee, 0x94, 0x67, 0xc7, 0xb1, 0x8c, 0xf5, 0x0f, 0x05, 0x28, 0x65, 0xbf, 0x4c, 0xd9,
578 0x47, 0x52, 0x86, 0x62, 0xf7, 0xc2, 0x34, 0xed, 0x6e, 0x57, 0xfb, 0x8d, 0x54, 0x61, 0xe3, 0xa2,
579 0xfd, 0xbc, 0xed, 0x5e, 0xb6, 0x69, 0xd7, 0x74, 0x3b, 0xb6, 0x56, 0x22, 0x7f, 0x41, 0xa5, 0xe1,
580 0xb8, 0x0d, 0xda, 0x73, 0x5d, 0xea, 0x18, 0x5e, 0xd3, 0xd6, 0x3e, 0x17, 0xc9, 0x36, 0x54, 0x2d,
581 0xdb, 0xb0, 0x9c, 0x56, 0xdb, 0xa6, 0xf6, 0x95, 0x69, 0xdb, 0x96, 0x6d, 0x69, 0x5f, 0x8a, 0xa4,
582 0x06, 0x9b, 0x6d, 0xb7, 0x47, 0x0d, 0xfa, 0xd2, 0x70, 0x5a, 0x16, 0x35, 0x3a, 0x1d, 0xed, 0x6b,
583 0x91, 0x90, 0xb9, 0xab, 0xed, 0x79, 0xae, 0xa7, 0x7d, 0x2b, 0x12, 0x0d, 0xca, 0x19, 0xd3, 0x71,
584 0xdc, 0x4b, 0xdb, 0xd2, 0xbe, 0xcf, 0xb4, 0xad, 0xf3, 0x8e, 0x63, 0x9f, 0xdb, 0xed, 0x9e, 0x6d,
585 0x69, 0x3f, 0x8a, 0xfa, 0x13, 0xa8, 0x76, 0x79, 0x20, 0x9e, 0xc9, 0xd8, 0x88, 0x22, 0x0f, 0xdf,
586 0x8e, 0x30, 0x51, 0x44, 0x87, 0x8d, 0xeb, 0x89, 0xc2, 0x84, 0x2a, 0x49, 0x13, 0x1e, 0x88, 0xdd,
587 0xc2, 0x61, 0xe1, 0x78, 0xdd, 0x2b, 0x67, 0x60, 0x4f, 0xa6, 0x02, 0xfd, 0x0a, 0xc8, 0xa2, 0x30,
588 0x89, 0xa4, 0x48, 0x90, 0xfc, 0x0d, 0x7f, 0x0e, 0x70, 0x42, 0x85, 0x1f, 0x62, 0x26, 0x2a, 0x79,
589 0xc5, 0x01, 0x4e, 0xda, 0x7e, 0x88, 0xe4, 0x7f, 0xd8, 0x4c, 0xbd, 0x7c, 0x35, 0x8a, 0x91, 0x66,
590 0x4e, 0xbb, 0xbf, 0x67, 0xb6, 0x95, 0x19, 0xdc, 0x48, 0x51, 0xfd, 0x3f, 0x38, 0x6a, 0xa2, 0xea,
591 0x8c, 0xae, 0x87, 0x9c, 0x99, 0x18, 0x2b, 0x7e, 0xc3, 0x99, 0xaf, 0x70, 0x29, 0xa2, 0xfe, 0x1a,
592 0xaa, 0xf7, 0x18, 0x0f, 0xbd, 0xfd, 0x14, 0x6a, 0xe3, 0xb3, 0xd3, 0xa7, 0x94, 0xcd, 0xe9, 0x34,
593 0xc2, 0x30, 0x8b, 0x50, 0xf2, 0x48, 0x3a, 0x5b, 0x70, 0xea, 0x60, 0xa8, 0x7f, 0x2c, 0x80, 0xfe,
594 0x50, 0x8e, 0x7c, 0xe3, 0x1e, 0xec, 0x44, 0x19, 0x65, 0xc9, 0x7a, 0xc8, 0x13, 0xb5, 0x5b, 0x38,
595 0x5c, 0x3b, 0x2e, 0x3f, 0xde, 0xab, 0xcf, 0xce, 0xa6, 0x7e, 0xcf, 0xcc, 0xdb, 0x8a, 0xee, 0x42,
596 0x0e, 0x4f, 0x14, 0x31, 0xe1, 0x20, 0xf4, 0xc7, 0x94, 0x0d, 0x39, 0x0a, 0x45, 0x99, 0xcf, 0xde,
597 0x20, 0x55, 0x3c, 0x44, 0xca, 0x05, 0x4d, 0x90, 0x49, 0xd1, 0xcf, 0x92, 0xaf, 0x79, 0xff, 0x84,
598 0xfe, 0xd8, 0xcc, 0x58, 0x66, 0x4a, 0xea, 0xf1, 0x10, 0x5b, 0xa2, 0x9b, 0x31, 0xf4, 0x7d, 0xd8,
599 0x6b, 0xa2, 0xca, 0x6f, 0xd3, 0x60, 0x4c, 0x8e, 0x84, 0x4a, 0xcb, 0xb8, 0xed, 0xf0, 0x05, 0xfc,
600 0xbb, 0x62, 0x9e, 0xef, 0x76, 0x0a, 0xb5, 0xfc, 0x1f, 0x40, 0xfd, 0xe9, 0x78, 0xb1, 0x5b, 0x92,
601 0xdc, 0x53, 0xea, 0xef, 0x0b, 0xb0, 0xd5, 0x44, 0x65, 0x30, 0x86, 0x49, 0xd2, 0x93, 0x03, 0x14,
602 0xb7, 0x37, 0x55, 0x83, 0x3f, 0x12, 0x26, 0x23, 0xcc, 0x5a, 0x29, 0x79, 0xd3, 0x07, 0xf2, 0x08,
603 0xc8, 0xdd, 0x37, 0xf0, 0xdb, 0xd5, 0xb4, 0x65, 0xff, 0x56, 0x7f, 0x65, 0x9e, 0xb5, 0x95, 0x79,
604 0xfa, 0xb0, 0x7d, 0x37, 0x4e, 0xbe, 0xdb, 0x11, 0xac, 0xfb, 0x19, 0x4c, 0x55, 0x8a, 0xe7, 0x3b,
605 0x95, 0xfd, 0x39, 0x35, 0xbd, 0x58, 0x1c, 0x47, 0x3c, 0xf6, 0x15, 0x97, 0x22, 0xab, 0x3f, 0x4f,
606 0x56, 0x99, 0xc3, 0x69, 0xe1, 0xfa, 0x21, 0xec, 0x37, 0x51, 0x59, 0x78, 0xe3, 0x8f, 0x86, 0xaa,
607 0xc9, 0x92, 0xc6, 0x88, 0x0d, 0x70, 0xa9, 0xea, 0x2b, 0x38, 0x58, 0xc9, 0xc8, 0x03, 0x9d, 0xc1,
608 0x4e, 0x7f, 0x3a, 0xa7, 0x01, 0x4b, 0xe8, 0x75, 0xc6, 0x58, 0xec, 0xbb, 0xd6, 0xff, 0x85, 0xbc,
609 0x51, 0x79, 0xb5, 0xbe, 0xf8, 0xc9, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, 0x37, 0x4c, 0x56, 0x38,
610 0xf3, 0x04, 0x00, 0x00,
611}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
new file mode 100644
index 0000000..19610ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
@@ -0,0 +1,64 @@
1syntax = "proto2";
2option go_package = "app_identity";
3
4package appengine;
5
6message AppIdentityServiceError {
7 enum ErrorCode {
8 SUCCESS = 0;
9 UNKNOWN_SCOPE = 9;
10 BLOB_TOO_LARGE = 1000;
11 DEADLINE_EXCEEDED = 1001;
12 NOT_A_VALID_APP = 1002;
13 UNKNOWN_ERROR = 1003;
14 NOT_ALLOWED = 1005;
15 NOT_IMPLEMENTED = 1006;
16 }
17}
18
19message SignForAppRequest {
20 optional bytes bytes_to_sign = 1;
21}
22
23message SignForAppResponse {
24 optional string key_name = 1;
25 optional bytes signature_bytes = 2;
26}
27
28message GetPublicCertificateForAppRequest {
29}
30
31message PublicCertificate {
32 optional string key_name = 1;
33 optional string x509_certificate_pem = 2;
34}
35
36message GetPublicCertificateForAppResponse {
37 repeated PublicCertificate public_certificate_list = 1;
38 optional int64 max_client_cache_time_in_second = 2;
39}
40
41message GetServiceAccountNameRequest {
42}
43
44message GetServiceAccountNameResponse {
45 optional string service_account_name = 1;
46}
47
48message GetAccessTokenRequest {
49 repeated string scope = 1;
50 optional int64 service_account_id = 2;
51 optional string service_account_name = 3;
52}
53
54message GetAccessTokenResponse {
55 optional string access_token = 1;
56 optional int64 expiration_time = 2;
57}
58
59message GetDefaultGcsBucketNameRequest {
60}
61
62message GetDefaultGcsBucketNameResponse {
63 optional string default_gcs_bucket_name = 1;
64}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
new file mode 100644
index 0000000..db4777e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -0,0 +1,308 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google.golang.org/appengine/internal/base/api_base.proto
3
4package base
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21type StringProto struct {
22 Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
23 XXX_NoUnkeyedLiteral struct{} `json:"-"`
24 XXX_unrecognized []byte `json:"-"`
25 XXX_sizecache int32 `json:"-"`
26}
27
28func (m *StringProto) Reset() { *m = StringProto{} }
29func (m *StringProto) String() string { return proto.CompactTextString(m) }
30func (*StringProto) ProtoMessage() {}
31func (*StringProto) Descriptor() ([]byte, []int) {
32 return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
33}
34func (m *StringProto) XXX_Unmarshal(b []byte) error {
35 return xxx_messageInfo_StringProto.Unmarshal(m, b)
36}
37func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
38 return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
39}
40func (dst *StringProto) XXX_Merge(src proto.Message) {
41 xxx_messageInfo_StringProto.Merge(dst, src)
42}
43func (m *StringProto) XXX_Size() int {
44 return xxx_messageInfo_StringProto.Size(m)
45}
46func (m *StringProto) XXX_DiscardUnknown() {
47 xxx_messageInfo_StringProto.DiscardUnknown(m)
48}
49
50var xxx_messageInfo_StringProto proto.InternalMessageInfo
51
52func (m *StringProto) GetValue() string {
53 if m != nil && m.Value != nil {
54 return *m.Value
55 }
56 return ""
57}
58
59type Integer32Proto struct {
60 Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
61 XXX_NoUnkeyedLiteral struct{} `json:"-"`
62 XXX_unrecognized []byte `json:"-"`
63 XXX_sizecache int32 `json:"-"`
64}
65
66func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
67func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
68func (*Integer32Proto) ProtoMessage() {}
69func (*Integer32Proto) Descriptor() ([]byte, []int) {
70 return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
71}
72func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
73 return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
74}
75func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
76 return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
77}
78func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
79 xxx_messageInfo_Integer32Proto.Merge(dst, src)
80}
81func (m *Integer32Proto) XXX_Size() int {
82 return xxx_messageInfo_Integer32Proto.Size(m)
83}
84func (m *Integer32Proto) XXX_DiscardUnknown() {
85 xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
86}
87
88var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
89
90func (m *Integer32Proto) GetValue() int32 {
91 if m != nil && m.Value != nil {
92 return *m.Value
93 }
94 return 0
95}
96
97type Integer64Proto struct {
98 Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
99 XXX_NoUnkeyedLiteral struct{} `json:"-"`
100 XXX_unrecognized []byte `json:"-"`
101 XXX_sizecache int32 `json:"-"`
102}
103
104func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
105func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
106func (*Integer64Proto) ProtoMessage() {}
107func (*Integer64Proto) Descriptor() ([]byte, []int) {
108 return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
109}
110func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
111 return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
112}
113func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
114 return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
115}
116func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
117 xxx_messageInfo_Integer64Proto.Merge(dst, src)
118}
119func (m *Integer64Proto) XXX_Size() int {
120 return xxx_messageInfo_Integer64Proto.Size(m)
121}
122func (m *Integer64Proto) XXX_DiscardUnknown() {
123 xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
124}
125
126var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
127
128func (m *Integer64Proto) GetValue() int64 {
129 if m != nil && m.Value != nil {
130 return *m.Value
131 }
132 return 0
133}
134
135type BoolProto struct {
136 Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
137 XXX_NoUnkeyedLiteral struct{} `json:"-"`
138 XXX_unrecognized []byte `json:"-"`
139 XXX_sizecache int32 `json:"-"`
140}
141
142func (m *BoolProto) Reset() { *m = BoolProto{} }
143func (m *BoolProto) String() string { return proto.CompactTextString(m) }
144func (*BoolProto) ProtoMessage() {}
145func (*BoolProto) Descriptor() ([]byte, []int) {
146 return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
147}
148func (m *BoolProto) XXX_Unmarshal(b []byte) error {
149 return xxx_messageInfo_BoolProto.Unmarshal(m, b)
150}
151func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
152 return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
153}
154func (dst *BoolProto) XXX_Merge(src proto.Message) {
155 xxx_messageInfo_BoolProto.Merge(dst, src)
156}
157func (m *BoolProto) XXX_Size() int {
158 return xxx_messageInfo_BoolProto.Size(m)
159}
160func (m *BoolProto) XXX_DiscardUnknown() {
161 xxx_messageInfo_BoolProto.DiscardUnknown(m)
162}
163
164var xxx_messageInfo_BoolProto proto.InternalMessageInfo
165
166func (m *BoolProto) GetValue() bool {
167 if m != nil && m.Value != nil {
168 return *m.Value
169 }
170 return false
171}
172
173type DoubleProto struct {
174 Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
175 XXX_NoUnkeyedLiteral struct{} `json:"-"`
176 XXX_unrecognized []byte `json:"-"`
177 XXX_sizecache int32 `json:"-"`
178}
179
180func (m *DoubleProto) Reset() { *m = DoubleProto{} }
181func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
182func (*DoubleProto) ProtoMessage() {}
183func (*DoubleProto) Descriptor() ([]byte, []int) {
184 return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
185}
186func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
187 return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
188}
189func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
190 return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
191}
192func (dst *DoubleProto) XXX_Merge(src proto.Message) {
193 xxx_messageInfo_DoubleProto.Merge(dst, src)
194}
195func (m *DoubleProto) XXX_Size() int {
196 return xxx_messageInfo_DoubleProto.Size(m)
197}
198func (m *DoubleProto) XXX_DiscardUnknown() {
199 xxx_messageInfo_DoubleProto.DiscardUnknown(m)
200}
201
202var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
203
204func (m *DoubleProto) GetValue() float64 {
205 if m != nil && m.Value != nil {
206 return *m.Value
207 }
208 return 0
209}
210
211type BytesProto struct {
212 Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
213 XXX_NoUnkeyedLiteral struct{} `json:"-"`
214 XXX_unrecognized []byte `json:"-"`
215 XXX_sizecache int32 `json:"-"`
216}
217
218func (m *BytesProto) Reset() { *m = BytesProto{} }
219func (m *BytesProto) String() string { return proto.CompactTextString(m) }
220func (*BytesProto) ProtoMessage() {}
221func (*BytesProto) Descriptor() ([]byte, []int) {
222 return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
223}
224func (m *BytesProto) XXX_Unmarshal(b []byte) error {
225 return xxx_messageInfo_BytesProto.Unmarshal(m, b)
226}
227func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
228 return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
229}
230func (dst *BytesProto) XXX_Merge(src proto.Message) {
231 xxx_messageInfo_BytesProto.Merge(dst, src)
232}
233func (m *BytesProto) XXX_Size() int {
234 return xxx_messageInfo_BytesProto.Size(m)
235}
236func (m *BytesProto) XXX_DiscardUnknown() {
237 xxx_messageInfo_BytesProto.DiscardUnknown(m)
238}
239
240var xxx_messageInfo_BytesProto proto.InternalMessageInfo
241
242func (m *BytesProto) GetValue() []byte {
243 if m != nil {
244 return m.Value
245 }
246 return nil
247}
248
249type VoidProto struct {
250 XXX_NoUnkeyedLiteral struct{} `json:"-"`
251 XXX_unrecognized []byte `json:"-"`
252 XXX_sizecache int32 `json:"-"`
253}
254
255func (m *VoidProto) Reset() { *m = VoidProto{} }
256func (m *VoidProto) String() string { return proto.CompactTextString(m) }
257func (*VoidProto) ProtoMessage() {}
258func (*VoidProto) Descriptor() ([]byte, []int) {
259 return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
260}
261func (m *VoidProto) XXX_Unmarshal(b []byte) error {
262 return xxx_messageInfo_VoidProto.Unmarshal(m, b)
263}
264func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
265 return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
266}
267func (dst *VoidProto) XXX_Merge(src proto.Message) {
268 xxx_messageInfo_VoidProto.Merge(dst, src)
269}
270func (m *VoidProto) XXX_Size() int {
271 return xxx_messageInfo_VoidProto.Size(m)
272}
273func (m *VoidProto) XXX_DiscardUnknown() {
274 xxx_messageInfo_VoidProto.DiscardUnknown(m)
275}
276
277var xxx_messageInfo_VoidProto proto.InternalMessageInfo
278
279func init() {
280 proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
281 proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto")
282 proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto")
283 proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto")
284 proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto")
285 proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto")
286 proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto")
287}
288
289func init() {
290 proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
291}
292
293var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
294 // 199 bytes of a gzipped FileDescriptorProto
295 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
296 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
297 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6,
298 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62,
299 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71,
300 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe,
301 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1,
302 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71,
303 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d,
304 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff,
305 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d,
306 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba,
307 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00,
308}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 0000000..56cd7a3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
1// Built-in base types for API calls. Primarily useful as return types.
2
3syntax = "proto2";
4option go_package = "base";
5
6package appengine.base;
7
8message StringProto {
9 required string value = 1;
10}
11
12message Integer32Proto {
13 required int32 value = 1;
14}
15
16message Integer64Proto {
17 required int64 value = 1;
18}
19
20message BoolProto {
21 required bool value = 1;
22}
23
24message DoubleProto {
25 required double value = 1;
26}
27
28message BytesProto {
29 required bytes value = 1 [ctype=CORD];
30}
31
32message VoidProto {
33}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
new file mode 100644
index 0000000..2fb7482
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -0,0 +1,4367 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
3
4package datastore
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21type Property_Meaning int32
22
23const (
24 Property_NO_MEANING Property_Meaning = 0
25 Property_BLOB Property_Meaning = 14
26 Property_TEXT Property_Meaning = 15
27 Property_BYTESTRING Property_Meaning = 16
28 Property_ATOM_CATEGORY Property_Meaning = 1
29 Property_ATOM_LINK Property_Meaning = 2
30 Property_ATOM_TITLE Property_Meaning = 3
31 Property_ATOM_CONTENT Property_Meaning = 4
32 Property_ATOM_SUMMARY Property_Meaning = 5
33 Property_ATOM_AUTHOR Property_Meaning = 6
34 Property_GD_WHEN Property_Meaning = 7
35 Property_GD_EMAIL Property_Meaning = 8
36 Property_GEORSS_POINT Property_Meaning = 9
37 Property_GD_IM Property_Meaning = 10
38 Property_GD_PHONENUMBER Property_Meaning = 11
39 Property_GD_POSTALADDRESS Property_Meaning = 12
40 Property_GD_RATING Property_Meaning = 13
41 Property_BLOBKEY Property_Meaning = 17
42 Property_ENTITY_PROTO Property_Meaning = 19
43 Property_INDEX_VALUE Property_Meaning = 18
44)
45
46var Property_Meaning_name = map[int32]string{
47 0: "NO_MEANING",
48 14: "BLOB",
49 15: "TEXT",
50 16: "BYTESTRING",
51 1: "ATOM_CATEGORY",
52 2: "ATOM_LINK",
53 3: "ATOM_TITLE",
54 4: "ATOM_CONTENT",
55 5: "ATOM_SUMMARY",
56 6: "ATOM_AUTHOR",
57 7: "GD_WHEN",
58 8: "GD_EMAIL",
59 9: "GEORSS_POINT",
60 10: "GD_IM",
61 11: "GD_PHONENUMBER",
62 12: "GD_POSTALADDRESS",
63 13: "GD_RATING",
64 17: "BLOBKEY",
65 19: "ENTITY_PROTO",
66 18: "INDEX_VALUE",
67}
68var Property_Meaning_value = map[string]int32{
69 "NO_MEANING": 0,
70 "BLOB": 14,
71 "TEXT": 15,
72 "BYTESTRING": 16,
73 "ATOM_CATEGORY": 1,
74 "ATOM_LINK": 2,
75 "ATOM_TITLE": 3,
76 "ATOM_CONTENT": 4,
77 "ATOM_SUMMARY": 5,
78 "ATOM_AUTHOR": 6,
79 "GD_WHEN": 7,
80 "GD_EMAIL": 8,
81 "GEORSS_POINT": 9,
82 "GD_IM": 10,
83 "GD_PHONENUMBER": 11,
84 "GD_POSTALADDRESS": 12,
85 "GD_RATING": 13,
86 "BLOBKEY": 17,
87 "ENTITY_PROTO": 19,
88 "INDEX_VALUE": 18,
89}
90
91func (x Property_Meaning) Enum() *Property_Meaning {
92 p := new(Property_Meaning)
93 *p = x
94 return p
95}
96func (x Property_Meaning) String() string {
97 return proto.EnumName(Property_Meaning_name, int32(x))
98}
99func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
100 value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
101 if err != nil {
102 return err
103 }
104 *x = Property_Meaning(value)
105 return nil
106}
107func (Property_Meaning) EnumDescriptor() ([]byte, []int) {
108 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0}
109}
110
111type Property_FtsTokenizationOption int32
112
113const (
114 Property_HTML Property_FtsTokenizationOption = 1
115 Property_ATOM Property_FtsTokenizationOption = 2
116)
117
118var Property_FtsTokenizationOption_name = map[int32]string{
119 1: "HTML",
120 2: "ATOM",
121}
122var Property_FtsTokenizationOption_value = map[string]int32{
123 "HTML": 1,
124 "ATOM": 2,
125}
126
127func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
128 p := new(Property_FtsTokenizationOption)
129 *p = x
130 return p
131}
132func (x Property_FtsTokenizationOption) String() string {
133 return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
134}
135func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
136 value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
137 if err != nil {
138 return err
139 }
140 *x = Property_FtsTokenizationOption(value)
141 return nil
142}
143func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) {
144 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1}
145}
146
147type EntityProto_Kind int32
148
149const (
150 EntityProto_GD_CONTACT EntityProto_Kind = 1
151 EntityProto_GD_EVENT EntityProto_Kind = 2
152 EntityProto_GD_MESSAGE EntityProto_Kind = 3
153)
154
155var EntityProto_Kind_name = map[int32]string{
156 1: "GD_CONTACT",
157 2: "GD_EVENT",
158 3: "GD_MESSAGE",
159}
160var EntityProto_Kind_value = map[string]int32{
161 "GD_CONTACT": 1,
162 "GD_EVENT": 2,
163 "GD_MESSAGE": 3,
164}
165
166func (x EntityProto_Kind) Enum() *EntityProto_Kind {
167 p := new(EntityProto_Kind)
168 *p = x
169 return p
170}
171func (x EntityProto_Kind) String() string {
172 return proto.EnumName(EntityProto_Kind_name, int32(x))
173}
174func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
175 value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
176 if err != nil {
177 return err
178 }
179 *x = EntityProto_Kind(value)
180 return nil
181}
182func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) {
183 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0}
184}
185
186type Index_Property_Direction int32
187
188const (
189 Index_Property_ASCENDING Index_Property_Direction = 1
190 Index_Property_DESCENDING Index_Property_Direction = 2
191)
192
193var Index_Property_Direction_name = map[int32]string{
194 1: "ASCENDING",
195 2: "DESCENDING",
196}
197var Index_Property_Direction_value = map[string]int32{
198 "ASCENDING": 1,
199 "DESCENDING": 2,
200}
201
202func (x Index_Property_Direction) Enum() *Index_Property_Direction {
203 p := new(Index_Property_Direction)
204 *p = x
205 return p
206}
207func (x Index_Property_Direction) String() string {
208 return proto.EnumName(Index_Property_Direction_name, int32(x))
209}
210func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
211 value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
212 if err != nil {
213 return err
214 }
215 *x = Index_Property_Direction(value)
216 return nil
217}
218func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) {
219 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0}
220}
221
222type CompositeIndex_State int32
223
224const (
225 CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
226 CompositeIndex_READ_WRITE CompositeIndex_State = 2
227 CompositeIndex_DELETED CompositeIndex_State = 3
228 CompositeIndex_ERROR CompositeIndex_State = 4
229)
230
231var CompositeIndex_State_name = map[int32]string{
232 1: "WRITE_ONLY",
233 2: "READ_WRITE",
234 3: "DELETED",
235 4: "ERROR",
236}
237var CompositeIndex_State_value = map[string]int32{
238 "WRITE_ONLY": 1,
239 "READ_WRITE": 2,
240 "DELETED": 3,
241 "ERROR": 4,
242}
243
244func (x CompositeIndex_State) Enum() *CompositeIndex_State {
245 p := new(CompositeIndex_State)
246 *p = x
247 return p
248}
249func (x CompositeIndex_State) String() string {
250 return proto.EnumName(CompositeIndex_State_name, int32(x))
251}
252func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
253 value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
254 if err != nil {
255 return err
256 }
257 *x = CompositeIndex_State(value)
258 return nil
259}
260func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) {
261 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0}
262}
263
264type Snapshot_Status int32
265
266const (
267 Snapshot_INACTIVE Snapshot_Status = 0
268 Snapshot_ACTIVE Snapshot_Status = 1
269)
270
271var Snapshot_Status_name = map[int32]string{
272 0: "INACTIVE",
273 1: "ACTIVE",
274}
275var Snapshot_Status_value = map[string]int32{
276 "INACTIVE": 0,
277 "ACTIVE": 1,
278}
279
280func (x Snapshot_Status) Enum() *Snapshot_Status {
281 p := new(Snapshot_Status)
282 *p = x
283 return p
284}
285func (x Snapshot_Status) String() string {
286 return proto.EnumName(Snapshot_Status_name, int32(x))
287}
288func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
289 value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
290 if err != nil {
291 return err
292 }
293 *x = Snapshot_Status(value)
294 return nil
295}
296func (Snapshot_Status) EnumDescriptor() ([]byte, []int) {
297 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0}
298}
299
300type Query_Hint int32
301
302const (
303 Query_ORDER_FIRST Query_Hint = 1
304 Query_ANCESTOR_FIRST Query_Hint = 2
305 Query_FILTER_FIRST Query_Hint = 3
306)
307
308var Query_Hint_name = map[int32]string{
309 1: "ORDER_FIRST",
310 2: "ANCESTOR_FIRST",
311 3: "FILTER_FIRST",
312}
313var Query_Hint_value = map[string]int32{
314 "ORDER_FIRST": 1,
315 "ANCESTOR_FIRST": 2,
316 "FILTER_FIRST": 3,
317}
318
319func (x Query_Hint) Enum() *Query_Hint {
320 p := new(Query_Hint)
321 *p = x
322 return p
323}
324func (x Query_Hint) String() string {
325 return proto.EnumName(Query_Hint_name, int32(x))
326}
327func (x *Query_Hint) UnmarshalJSON(data []byte) error {
328 value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
329 if err != nil {
330 return err
331 }
332 *x = Query_Hint(value)
333 return nil
334}
335func (Query_Hint) EnumDescriptor() ([]byte, []int) {
336 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
337}
338
339type Query_Filter_Operator int32
340
341const (
342 Query_Filter_LESS_THAN Query_Filter_Operator = 1
343 Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
344 Query_Filter_GREATER_THAN Query_Filter_Operator = 3
345 Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
346 Query_Filter_EQUAL Query_Filter_Operator = 5
347 Query_Filter_IN Query_Filter_Operator = 6
348 Query_Filter_EXISTS Query_Filter_Operator = 7
349)
350
351var Query_Filter_Operator_name = map[int32]string{
352 1: "LESS_THAN",
353 2: "LESS_THAN_OR_EQUAL",
354 3: "GREATER_THAN",
355 4: "GREATER_THAN_OR_EQUAL",
356 5: "EQUAL",
357 6: "IN",
358 7: "EXISTS",
359}
360var Query_Filter_Operator_value = map[string]int32{
361 "LESS_THAN": 1,
362 "LESS_THAN_OR_EQUAL": 2,
363 "GREATER_THAN": 3,
364 "GREATER_THAN_OR_EQUAL": 4,
365 "EQUAL": 5,
366 "IN": 6,
367 "EXISTS": 7,
368}
369
370func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
371 p := new(Query_Filter_Operator)
372 *p = x
373 return p
374}
375func (x Query_Filter_Operator) String() string {
376 return proto.EnumName(Query_Filter_Operator_name, int32(x))
377}
378func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
379 value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
380 if err != nil {
381 return err
382 }
383 *x = Query_Filter_Operator(value)
384 return nil
385}
386func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) {
387 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0}
388}
389
390type Query_Order_Direction int32
391
392const (
393 Query_Order_ASCENDING Query_Order_Direction = 1
394 Query_Order_DESCENDING Query_Order_Direction = 2
395)
396
397var Query_Order_Direction_name = map[int32]string{
398 1: "ASCENDING",
399 2: "DESCENDING",
400}
401var Query_Order_Direction_value = map[string]int32{
402 "ASCENDING": 1,
403 "DESCENDING": 2,
404}
405
406func (x Query_Order_Direction) Enum() *Query_Order_Direction {
407 p := new(Query_Order_Direction)
408 *p = x
409 return p
410}
411func (x Query_Order_Direction) String() string {
412 return proto.EnumName(Query_Order_Direction_name, int32(x))
413}
414func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
415 value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
416 if err != nil {
417 return err
418 }
419 *x = Query_Order_Direction(value)
420 return nil
421}
422func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) {
423 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0}
424}
425
426type Error_ErrorCode int32
427
428const (
429 Error_BAD_REQUEST Error_ErrorCode = 1
430 Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
431 Error_INTERNAL_ERROR Error_ErrorCode = 3
432 Error_NEED_INDEX Error_ErrorCode = 4
433 Error_TIMEOUT Error_ErrorCode = 5
434 Error_PERMISSION_DENIED Error_ErrorCode = 6
435 Error_BIGTABLE_ERROR Error_ErrorCode = 7
436 Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
437 Error_CAPABILITY_DISABLED Error_ErrorCode = 9
438 Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
439 Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
440)
441
442var Error_ErrorCode_name = map[int32]string{
443 1: "BAD_REQUEST",
444 2: "CONCURRENT_TRANSACTION",
445 3: "INTERNAL_ERROR",
446 4: "NEED_INDEX",
447 5: "TIMEOUT",
448 6: "PERMISSION_DENIED",
449 7: "BIGTABLE_ERROR",
450 8: "COMMITTED_BUT_STILL_APPLYING",
451 9: "CAPABILITY_DISABLED",
452 10: "TRY_ALTERNATE_BACKEND",
453 11: "SAFE_TIME_TOO_OLD",
454}
455var Error_ErrorCode_value = map[string]int32{
456 "BAD_REQUEST": 1,
457 "CONCURRENT_TRANSACTION": 2,
458 "INTERNAL_ERROR": 3,
459 "NEED_INDEX": 4,
460 "TIMEOUT": 5,
461 "PERMISSION_DENIED": 6,
462 "BIGTABLE_ERROR": 7,
463 "COMMITTED_BUT_STILL_APPLYING": 8,
464 "CAPABILITY_DISABLED": 9,
465 "TRY_ALTERNATE_BACKEND": 10,
466 "SAFE_TIME_TOO_OLD": 11,
467}
468
469func (x Error_ErrorCode) Enum() *Error_ErrorCode {
470 p := new(Error_ErrorCode)
471 *p = x
472 return p
473}
474func (x Error_ErrorCode) String() string {
475 return proto.EnumName(Error_ErrorCode_name, int32(x))
476}
477func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
478 value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
479 if err != nil {
480 return err
481 }
482 *x = Error_ErrorCode(value)
483 return nil
484}
485func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) {
486 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0}
487}
488
489type PutRequest_AutoIdPolicy int32
490
491const (
492 PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
493 PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
494)
495
496var PutRequest_AutoIdPolicy_name = map[int32]string{
497 0: "CURRENT",
498 1: "SEQUENTIAL",
499}
500var PutRequest_AutoIdPolicy_value = map[string]int32{
501 "CURRENT": 0,
502 "SEQUENTIAL": 1,
503}
504
505func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
506 p := new(PutRequest_AutoIdPolicy)
507 *p = x
508 return p
509}
510func (x PutRequest_AutoIdPolicy) String() string {
511 return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
512}
513func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
514 value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
515 if err != nil {
516 return err
517 }
518 *x = PutRequest_AutoIdPolicy(value)
519 return nil
520}
521func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) {
522 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0}
523}
524
525type BeginTransactionRequest_TransactionMode int32
526
527const (
528 BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0
529 BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1
530 BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2
531)
532
533var BeginTransactionRequest_TransactionMode_name = map[int32]string{
534 0: "UNKNOWN",
535 1: "READ_ONLY",
536 2: "READ_WRITE",
537}
538var BeginTransactionRequest_TransactionMode_value = map[string]int32{
539 "UNKNOWN": 0,
540 "READ_ONLY": 1,
541 "READ_WRITE": 2,
542}
543
544func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode {
545 p := new(BeginTransactionRequest_TransactionMode)
546 *p = x
547 return p
548}
549func (x BeginTransactionRequest_TransactionMode) String() string {
550 return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x))
551}
552func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error {
553 value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode")
554 if err != nil {
555 return err
556 }
557 *x = BeginTransactionRequest_TransactionMode(value)
558 return nil
559}
560func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) {
561 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0}
562}
563
564type Action struct {
565 XXX_NoUnkeyedLiteral struct{} `json:"-"`
566 XXX_unrecognized []byte `json:"-"`
567 XXX_sizecache int32 `json:"-"`
568}
569
570func (m *Action) Reset() { *m = Action{} }
571func (m *Action) String() string { return proto.CompactTextString(m) }
572func (*Action) ProtoMessage() {}
573func (*Action) Descriptor() ([]byte, []int) {
574 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0}
575}
576func (m *Action) XXX_Unmarshal(b []byte) error {
577 return xxx_messageInfo_Action.Unmarshal(m, b)
578}
579func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
580 return xxx_messageInfo_Action.Marshal(b, m, deterministic)
581}
582func (dst *Action) XXX_Merge(src proto.Message) {
583 xxx_messageInfo_Action.Merge(dst, src)
584}
585func (m *Action) XXX_Size() int {
586 return xxx_messageInfo_Action.Size(m)
587}
588func (m *Action) XXX_DiscardUnknown() {
589 xxx_messageInfo_Action.DiscardUnknown(m)
590}
591
592var xxx_messageInfo_Action proto.InternalMessageInfo
593
594type PropertyValue struct {
595 Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
596 BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
597 StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
598 DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
599 Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"`
600 Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"`
601 Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"`
602 XXX_NoUnkeyedLiteral struct{} `json:"-"`
603 XXX_unrecognized []byte `json:"-"`
604 XXX_sizecache int32 `json:"-"`
605}
606
607func (m *PropertyValue) Reset() { *m = PropertyValue{} }
608func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
609func (*PropertyValue) ProtoMessage() {}
610func (*PropertyValue) Descriptor() ([]byte, []int) {
611 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1}
612}
613func (m *PropertyValue) XXX_Unmarshal(b []byte) error {
614 return xxx_messageInfo_PropertyValue.Unmarshal(m, b)
615}
616func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
617 return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic)
618}
619func (dst *PropertyValue) XXX_Merge(src proto.Message) {
620 xxx_messageInfo_PropertyValue.Merge(dst, src)
621}
622func (m *PropertyValue) XXX_Size() int {
623 return xxx_messageInfo_PropertyValue.Size(m)
624}
625func (m *PropertyValue) XXX_DiscardUnknown() {
626 xxx_messageInfo_PropertyValue.DiscardUnknown(m)
627}
628
629var xxx_messageInfo_PropertyValue proto.InternalMessageInfo
630
631func (m *PropertyValue) GetInt64Value() int64 {
632 if m != nil && m.Int64Value != nil {
633 return *m.Int64Value
634 }
635 return 0
636}
637
638func (m *PropertyValue) GetBooleanValue() bool {
639 if m != nil && m.BooleanValue != nil {
640 return *m.BooleanValue
641 }
642 return false
643}
644
645func (m *PropertyValue) GetStringValue() string {
646 if m != nil && m.StringValue != nil {
647 return *m.StringValue
648 }
649 return ""
650}
651
652func (m *PropertyValue) GetDoubleValue() float64 {
653 if m != nil && m.DoubleValue != nil {
654 return *m.DoubleValue
655 }
656 return 0
657}
658
659func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
660 if m != nil {
661 return m.Pointvalue
662 }
663 return nil
664}
665
666func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
667 if m != nil {
668 return m.Uservalue
669 }
670 return nil
671}
672
673func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
674 if m != nil {
675 return m.Referencevalue
676 }
677 return nil
678}
679
680type PropertyValue_PointValue struct {
681 X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
682 Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
683 XXX_NoUnkeyedLiteral struct{} `json:"-"`
684 XXX_unrecognized []byte `json:"-"`
685 XXX_sizecache int32 `json:"-"`
686}
687
688func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
689func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
690func (*PropertyValue_PointValue) ProtoMessage() {}
691func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) {
692 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0}
693}
694func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error {
695 return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b)
696}
697func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
698 return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic)
699}
700func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) {
701 xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src)
702}
703func (m *PropertyValue_PointValue) XXX_Size() int {
704 return xxx_messageInfo_PropertyValue_PointValue.Size(m)
705}
706func (m *PropertyValue_PointValue) XXX_DiscardUnknown() {
707 xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m)
708}
709
710var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo
711
712func (m *PropertyValue_PointValue) GetX() float64 {
713 if m != nil && m.X != nil {
714 return *m.X
715 }
716 return 0
717}
718
719func (m *PropertyValue_PointValue) GetY() float64 {
720 if m != nil && m.Y != nil {
721 return *m.Y
722 }
723 return 0
724}
725
726type PropertyValue_UserValue struct {
727 Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
728 AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
729 Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
730 FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
731 FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
732 XXX_NoUnkeyedLiteral struct{} `json:"-"`
733 XXX_unrecognized []byte `json:"-"`
734 XXX_sizecache int32 `json:"-"`
735}
736
737func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
738func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
739func (*PropertyValue_UserValue) ProtoMessage() {}
740func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) {
741 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1}
742}
743func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error {
744 return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b)
745}
746func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
747 return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic)
748}
749func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) {
750 xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src)
751}
752func (m *PropertyValue_UserValue) XXX_Size() int {
753 return xxx_messageInfo_PropertyValue_UserValue.Size(m)
754}
755func (m *PropertyValue_UserValue) XXX_DiscardUnknown() {
756 xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m)
757}
758
759var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo
760
761func (m *PropertyValue_UserValue) GetEmail() string {
762 if m != nil && m.Email != nil {
763 return *m.Email
764 }
765 return ""
766}
767
768func (m *PropertyValue_UserValue) GetAuthDomain() string {
769 if m != nil && m.AuthDomain != nil {
770 return *m.AuthDomain
771 }
772 return ""
773}
774
775func (m *PropertyValue_UserValue) GetNickname() string {
776 if m != nil && m.Nickname != nil {
777 return *m.Nickname
778 }
779 return ""
780}
781
782func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
783 if m != nil && m.FederatedIdentity != nil {
784 return *m.FederatedIdentity
785 }
786 return ""
787}
788
789func (m *PropertyValue_UserValue) GetFederatedProvider() string {
790 if m != nil && m.FederatedProvider != nil {
791 return *m.FederatedProvider
792 }
793 return ""
794}
795
796type PropertyValue_ReferenceValue struct {
797 App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
798 NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
799 Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"`
800 XXX_NoUnkeyedLiteral struct{} `json:"-"`
801 XXX_unrecognized []byte `json:"-"`
802 XXX_sizecache int32 `json:"-"`
803}
804
805func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
806func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
807func (*PropertyValue_ReferenceValue) ProtoMessage() {}
808func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) {
809 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2}
810}
811func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error {
812 return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b)
813}
814func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
815 return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic)
816}
817func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) {
818 xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src)
819}
820func (m *PropertyValue_ReferenceValue) XXX_Size() int {
821 return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m)
822}
823func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() {
824 xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m)
825}
826
827var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo
828
829func (m *PropertyValue_ReferenceValue) GetApp() string {
830 if m != nil && m.App != nil {
831 return *m.App
832 }
833 return ""
834}
835
836func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
837 if m != nil && m.NameSpace != nil {
838 return *m.NameSpace
839 }
840 return ""
841}
842
843func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
844 if m != nil {
845 return m.Pathelement
846 }
847 return nil
848}
849
850type PropertyValue_ReferenceValue_PathElement struct {
851 Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
852 Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
853 Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
854 XXX_NoUnkeyedLiteral struct{} `json:"-"`
855 XXX_unrecognized []byte `json:"-"`
856 XXX_sizecache int32 `json:"-"`
857}
858
859func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
860 *m = PropertyValue_ReferenceValue_PathElement{}
861}
862func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
863func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
864func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) {
865 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0}
866}
867func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error {
868 return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b)
869}
870func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
871 return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic)
872}
873func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) {
874 xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src)
875}
876func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int {
877 return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m)
878}
879func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() {
880 xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m)
881}
882
883var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo
884
885func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
886 if m != nil && m.Type != nil {
887 return *m.Type
888 }
889 return ""
890}
891
892func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
893 if m != nil && m.Id != nil {
894 return *m.Id
895 }
896 return 0
897}
898
899func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
900 if m != nil && m.Name != nil {
901 return *m.Name
902 }
903 return ""
904}
905
906type Property struct {
907 Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
908 MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"`
909 Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
910 Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
911 Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
912 Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
913 FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
914 Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
915 XXX_NoUnkeyedLiteral struct{} `json:"-"`
916 XXX_unrecognized []byte `json:"-"`
917 XXX_sizecache int32 `json:"-"`
918}
919
920func (m *Property) Reset() { *m = Property{} }
921func (m *Property) String() string { return proto.CompactTextString(m) }
922func (*Property) ProtoMessage() {}
923func (*Property) Descriptor() ([]byte, []int) {
924 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2}
925}
926func (m *Property) XXX_Unmarshal(b []byte) error {
927 return xxx_messageInfo_Property.Unmarshal(m, b)
928}
929func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
930 return xxx_messageInfo_Property.Marshal(b, m, deterministic)
931}
932func (dst *Property) XXX_Merge(src proto.Message) {
933 xxx_messageInfo_Property.Merge(dst, src)
934}
935func (m *Property) XXX_Size() int {
936 return xxx_messageInfo_Property.Size(m)
937}
938func (m *Property) XXX_DiscardUnknown() {
939 xxx_messageInfo_Property.DiscardUnknown(m)
940}
941
942var xxx_messageInfo_Property proto.InternalMessageInfo
943
944const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
945const Default_Property_Searchable bool = false
946const Default_Property_Locale string = "en"
947
948func (m *Property) GetMeaning() Property_Meaning {
949 if m != nil && m.Meaning != nil {
950 return *m.Meaning
951 }
952 return Default_Property_Meaning
953}
954
955func (m *Property) GetMeaningUri() string {
956 if m != nil && m.MeaningUri != nil {
957 return *m.MeaningUri
958 }
959 return ""
960}
961
962func (m *Property) GetName() string {
963 if m != nil && m.Name != nil {
964 return *m.Name
965 }
966 return ""
967}
968
969func (m *Property) GetValue() *PropertyValue {
970 if m != nil {
971 return m.Value
972 }
973 return nil
974}
975
976func (m *Property) GetMultiple() bool {
977 if m != nil && m.Multiple != nil {
978 return *m.Multiple
979 }
980 return false
981}
982
983func (m *Property) GetSearchable() bool {
984 if m != nil && m.Searchable != nil {
985 return *m.Searchable
986 }
987 return Default_Property_Searchable
988}
989
990func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
991 if m != nil && m.FtsTokenizationOption != nil {
992 return *m.FtsTokenizationOption
993 }
994 return Property_HTML
995}
996
997func (m *Property) GetLocale() string {
998 if m != nil && m.Locale != nil {
999 return *m.Locale
1000 }
1001 return Default_Property_Locale
1002}
1003
1004type Path struct {
1005 Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"`
1006 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1007 XXX_unrecognized []byte `json:"-"`
1008 XXX_sizecache int32 `json:"-"`
1009}
1010
1011func (m *Path) Reset() { *m = Path{} }
1012func (m *Path) String() string { return proto.CompactTextString(m) }
1013func (*Path) ProtoMessage() {}
1014func (*Path) Descriptor() ([]byte, []int) {
1015 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3}
1016}
1017func (m *Path) XXX_Unmarshal(b []byte) error {
1018 return xxx_messageInfo_Path.Unmarshal(m, b)
1019}
1020func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1021 return xxx_messageInfo_Path.Marshal(b, m, deterministic)
1022}
1023func (dst *Path) XXX_Merge(src proto.Message) {
1024 xxx_messageInfo_Path.Merge(dst, src)
1025}
1026func (m *Path) XXX_Size() int {
1027 return xxx_messageInfo_Path.Size(m)
1028}
1029func (m *Path) XXX_DiscardUnknown() {
1030 xxx_messageInfo_Path.DiscardUnknown(m)
1031}
1032
1033var xxx_messageInfo_Path proto.InternalMessageInfo
1034
1035func (m *Path) GetElement() []*Path_Element {
1036 if m != nil {
1037 return m.Element
1038 }
1039 return nil
1040}
1041
1042type Path_Element struct {
1043 Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
1044 Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
1045 Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
1046 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1047 XXX_unrecognized []byte `json:"-"`
1048 XXX_sizecache int32 `json:"-"`
1049}
1050
1051func (m *Path_Element) Reset() { *m = Path_Element{} }
1052func (m *Path_Element) String() string { return proto.CompactTextString(m) }
1053func (*Path_Element) ProtoMessage() {}
1054func (*Path_Element) Descriptor() ([]byte, []int) {
1055 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0}
1056}
1057func (m *Path_Element) XXX_Unmarshal(b []byte) error {
1058 return xxx_messageInfo_Path_Element.Unmarshal(m, b)
1059}
1060func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1061 return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic)
1062}
1063func (dst *Path_Element) XXX_Merge(src proto.Message) {
1064 xxx_messageInfo_Path_Element.Merge(dst, src)
1065}
1066func (m *Path_Element) XXX_Size() int {
1067 return xxx_messageInfo_Path_Element.Size(m)
1068}
1069func (m *Path_Element) XXX_DiscardUnknown() {
1070 xxx_messageInfo_Path_Element.DiscardUnknown(m)
1071}
1072
1073var xxx_messageInfo_Path_Element proto.InternalMessageInfo
1074
1075func (m *Path_Element) GetType() string {
1076 if m != nil && m.Type != nil {
1077 return *m.Type
1078 }
1079 return ""
1080}
1081
1082func (m *Path_Element) GetId() int64 {
1083 if m != nil && m.Id != nil {
1084 return *m.Id
1085 }
1086 return 0
1087}
1088
1089func (m *Path_Element) GetName() string {
1090 if m != nil && m.Name != nil {
1091 return *m.Name
1092 }
1093 return ""
1094}
1095
1096type Reference struct {
1097 App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
1098 NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
1099 Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
1100 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1101 XXX_unrecognized []byte `json:"-"`
1102 XXX_sizecache int32 `json:"-"`
1103}
1104
1105func (m *Reference) Reset() { *m = Reference{} }
1106func (m *Reference) String() string { return proto.CompactTextString(m) }
1107func (*Reference) ProtoMessage() {}
1108func (*Reference) Descriptor() ([]byte, []int) {
1109 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4}
1110}
1111func (m *Reference) XXX_Unmarshal(b []byte) error {
1112 return xxx_messageInfo_Reference.Unmarshal(m, b)
1113}
1114func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1115 return xxx_messageInfo_Reference.Marshal(b, m, deterministic)
1116}
1117func (dst *Reference) XXX_Merge(src proto.Message) {
1118 xxx_messageInfo_Reference.Merge(dst, src)
1119}
1120func (m *Reference) XXX_Size() int {
1121 return xxx_messageInfo_Reference.Size(m)
1122}
1123func (m *Reference) XXX_DiscardUnknown() {
1124 xxx_messageInfo_Reference.DiscardUnknown(m)
1125}
1126
1127var xxx_messageInfo_Reference proto.InternalMessageInfo
1128
1129func (m *Reference) GetApp() string {
1130 if m != nil && m.App != nil {
1131 return *m.App
1132 }
1133 return ""
1134}
1135
1136func (m *Reference) GetNameSpace() string {
1137 if m != nil && m.NameSpace != nil {
1138 return *m.NameSpace
1139 }
1140 return ""
1141}
1142
1143func (m *Reference) GetPath() *Path {
1144 if m != nil {
1145 return m.Path
1146 }
1147 return nil
1148}
1149
1150type User struct {
1151 Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
1152 AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
1153 Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
1154 FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
1155 FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
1156 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1157 XXX_unrecognized []byte `json:"-"`
1158 XXX_sizecache int32 `json:"-"`
1159}
1160
1161func (m *User) Reset() { *m = User{} }
1162func (m *User) String() string { return proto.CompactTextString(m) }
1163func (*User) ProtoMessage() {}
1164func (*User) Descriptor() ([]byte, []int) {
1165 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5}
1166}
1167func (m *User) XXX_Unmarshal(b []byte) error {
1168 return xxx_messageInfo_User.Unmarshal(m, b)
1169}
1170func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1171 return xxx_messageInfo_User.Marshal(b, m, deterministic)
1172}
1173func (dst *User) XXX_Merge(src proto.Message) {
1174 xxx_messageInfo_User.Merge(dst, src)
1175}
1176func (m *User) XXX_Size() int {
1177 return xxx_messageInfo_User.Size(m)
1178}
1179func (m *User) XXX_DiscardUnknown() {
1180 xxx_messageInfo_User.DiscardUnknown(m)
1181}
1182
1183var xxx_messageInfo_User proto.InternalMessageInfo
1184
1185func (m *User) GetEmail() string {
1186 if m != nil && m.Email != nil {
1187 return *m.Email
1188 }
1189 return ""
1190}
1191
1192func (m *User) GetAuthDomain() string {
1193 if m != nil && m.AuthDomain != nil {
1194 return *m.AuthDomain
1195 }
1196 return ""
1197}
1198
1199func (m *User) GetNickname() string {
1200 if m != nil && m.Nickname != nil {
1201 return *m.Nickname
1202 }
1203 return ""
1204}
1205
1206func (m *User) GetFederatedIdentity() string {
1207 if m != nil && m.FederatedIdentity != nil {
1208 return *m.FederatedIdentity
1209 }
1210 return ""
1211}
1212
1213func (m *User) GetFederatedProvider() string {
1214 if m != nil && m.FederatedProvider != nil {
1215 return *m.FederatedProvider
1216 }
1217 return ""
1218}
1219
1220type EntityProto struct {
1221 Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
1222 EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"`
1223 Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
1224 Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
1225 KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"`
1226 Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
1227 RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"`
1228 Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
1229 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1230 XXX_unrecognized []byte `json:"-"`
1231 XXX_sizecache int32 `json:"-"`
1232}
1233
1234func (m *EntityProto) Reset() { *m = EntityProto{} }
1235func (m *EntityProto) String() string { return proto.CompactTextString(m) }
1236func (*EntityProto) ProtoMessage() {}
1237func (*EntityProto) Descriptor() ([]byte, []int) {
1238 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6}
1239}
1240func (m *EntityProto) XXX_Unmarshal(b []byte) error {
1241 return xxx_messageInfo_EntityProto.Unmarshal(m, b)
1242}
1243func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1244 return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic)
1245}
1246func (dst *EntityProto) XXX_Merge(src proto.Message) {
1247 xxx_messageInfo_EntityProto.Merge(dst, src)
1248}
1249func (m *EntityProto) XXX_Size() int {
1250 return xxx_messageInfo_EntityProto.Size(m)
1251}
1252func (m *EntityProto) XXX_DiscardUnknown() {
1253 xxx_messageInfo_EntityProto.DiscardUnknown(m)
1254}
1255
1256var xxx_messageInfo_EntityProto proto.InternalMessageInfo
1257
1258func (m *EntityProto) GetKey() *Reference {
1259 if m != nil {
1260 return m.Key
1261 }
1262 return nil
1263}
1264
1265func (m *EntityProto) GetEntityGroup() *Path {
1266 if m != nil {
1267 return m.EntityGroup
1268 }
1269 return nil
1270}
1271
1272func (m *EntityProto) GetOwner() *User {
1273 if m != nil {
1274 return m.Owner
1275 }
1276 return nil
1277}
1278
1279func (m *EntityProto) GetKind() EntityProto_Kind {
1280 if m != nil && m.Kind != nil {
1281 return *m.Kind
1282 }
1283 return EntityProto_GD_CONTACT
1284}
1285
1286func (m *EntityProto) GetKindUri() string {
1287 if m != nil && m.KindUri != nil {
1288 return *m.KindUri
1289 }
1290 return ""
1291}
1292
1293func (m *EntityProto) GetProperty() []*Property {
1294 if m != nil {
1295 return m.Property
1296 }
1297 return nil
1298}
1299
1300func (m *EntityProto) GetRawProperty() []*Property {
1301 if m != nil {
1302 return m.RawProperty
1303 }
1304 return nil
1305}
1306
1307func (m *EntityProto) GetRank() int32 {
1308 if m != nil && m.Rank != nil {
1309 return *m.Rank
1310 }
1311 return 0
1312}
1313
1314type CompositeProperty struct {
1315 IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"`
1316 Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
1317 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1318 XXX_unrecognized []byte `json:"-"`
1319 XXX_sizecache int32 `json:"-"`
1320}
1321
1322func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
1323func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
1324func (*CompositeProperty) ProtoMessage() {}
1325func (*CompositeProperty) Descriptor() ([]byte, []int) {
1326 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7}
1327}
1328func (m *CompositeProperty) XXX_Unmarshal(b []byte) error {
1329 return xxx_messageInfo_CompositeProperty.Unmarshal(m, b)
1330}
1331func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1332 return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic)
1333}
1334func (dst *CompositeProperty) XXX_Merge(src proto.Message) {
1335 xxx_messageInfo_CompositeProperty.Merge(dst, src)
1336}
1337func (m *CompositeProperty) XXX_Size() int {
1338 return xxx_messageInfo_CompositeProperty.Size(m)
1339}
1340func (m *CompositeProperty) XXX_DiscardUnknown() {
1341 xxx_messageInfo_CompositeProperty.DiscardUnknown(m)
1342}
1343
1344var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo
1345
1346func (m *CompositeProperty) GetIndexId() int64 {
1347 if m != nil && m.IndexId != nil {
1348 return *m.IndexId
1349 }
1350 return 0
1351}
1352
1353func (m *CompositeProperty) GetValue() []string {
1354 if m != nil {
1355 return m.Value
1356 }
1357 return nil
1358}
1359
1360type Index struct {
1361 EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"`
1362 Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
1363 Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"`
1364 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1365 XXX_unrecognized []byte `json:"-"`
1366 XXX_sizecache int32 `json:"-"`
1367}
1368
1369func (m *Index) Reset() { *m = Index{} }
1370func (m *Index) String() string { return proto.CompactTextString(m) }
1371func (*Index) ProtoMessage() {}
1372func (*Index) Descriptor() ([]byte, []int) {
1373 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8}
1374}
1375func (m *Index) XXX_Unmarshal(b []byte) error {
1376 return xxx_messageInfo_Index.Unmarshal(m, b)
1377}
1378func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1379 return xxx_messageInfo_Index.Marshal(b, m, deterministic)
1380}
1381func (dst *Index) XXX_Merge(src proto.Message) {
1382 xxx_messageInfo_Index.Merge(dst, src)
1383}
1384func (m *Index) XXX_Size() int {
1385 return xxx_messageInfo_Index.Size(m)
1386}
1387func (m *Index) XXX_DiscardUnknown() {
1388 xxx_messageInfo_Index.DiscardUnknown(m)
1389}
1390
1391var xxx_messageInfo_Index proto.InternalMessageInfo
1392
1393func (m *Index) GetEntityType() string {
1394 if m != nil && m.EntityType != nil {
1395 return *m.EntityType
1396 }
1397 return ""
1398}
1399
1400func (m *Index) GetAncestor() bool {
1401 if m != nil && m.Ancestor != nil {
1402 return *m.Ancestor
1403 }
1404 return false
1405}
1406
1407func (m *Index) GetProperty() []*Index_Property {
1408 if m != nil {
1409 return m.Property
1410 }
1411 return nil
1412}
1413
1414type Index_Property struct {
1415 Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
1416 Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
1417 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1418 XXX_unrecognized []byte `json:"-"`
1419 XXX_sizecache int32 `json:"-"`
1420}
1421
1422func (m *Index_Property) Reset() { *m = Index_Property{} }
1423func (m *Index_Property) String() string { return proto.CompactTextString(m) }
1424func (*Index_Property) ProtoMessage() {}
1425func (*Index_Property) Descriptor() ([]byte, []int) {
1426 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0}
1427}
1428func (m *Index_Property) XXX_Unmarshal(b []byte) error {
1429 return xxx_messageInfo_Index_Property.Unmarshal(m, b)
1430}
1431func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1432 return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic)
1433}
1434func (dst *Index_Property) XXX_Merge(src proto.Message) {
1435 xxx_messageInfo_Index_Property.Merge(dst, src)
1436}
1437func (m *Index_Property) XXX_Size() int {
1438 return xxx_messageInfo_Index_Property.Size(m)
1439}
1440func (m *Index_Property) XXX_DiscardUnknown() {
1441 xxx_messageInfo_Index_Property.DiscardUnknown(m)
1442}
1443
1444var xxx_messageInfo_Index_Property proto.InternalMessageInfo
1445
1446const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
1447
1448func (m *Index_Property) GetName() string {
1449 if m != nil && m.Name != nil {
1450 return *m.Name
1451 }
1452 return ""
1453}
1454
1455func (m *Index_Property) GetDirection() Index_Property_Direction {
1456 if m != nil && m.Direction != nil {
1457 return *m.Direction
1458 }
1459 return Default_Index_Property_Direction
1460}
1461
1462type CompositeIndex struct {
1463 AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
1464 Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
1465 Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
1466 State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
1467 OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"`
1468 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1469 XXX_unrecognized []byte `json:"-"`
1470 XXX_sizecache int32 `json:"-"`
1471}
1472
1473func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
1474func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
1475func (*CompositeIndex) ProtoMessage() {}
1476func (*CompositeIndex) Descriptor() ([]byte, []int) {
1477 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9}
1478}
1479func (m *CompositeIndex) XXX_Unmarshal(b []byte) error {
1480 return xxx_messageInfo_CompositeIndex.Unmarshal(m, b)
1481}
1482func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1483 return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic)
1484}
1485func (dst *CompositeIndex) XXX_Merge(src proto.Message) {
1486 xxx_messageInfo_CompositeIndex.Merge(dst, src)
1487}
1488func (m *CompositeIndex) XXX_Size() int {
1489 return xxx_messageInfo_CompositeIndex.Size(m)
1490}
1491func (m *CompositeIndex) XXX_DiscardUnknown() {
1492 xxx_messageInfo_CompositeIndex.DiscardUnknown(m)
1493}
1494
1495var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo
1496
1497const Default_CompositeIndex_OnlyUseIfRequired bool = false
1498
1499func (m *CompositeIndex) GetAppId() string {
1500 if m != nil && m.AppId != nil {
1501 return *m.AppId
1502 }
1503 return ""
1504}
1505
1506func (m *CompositeIndex) GetId() int64 {
1507 if m != nil && m.Id != nil {
1508 return *m.Id
1509 }
1510 return 0
1511}
1512
1513func (m *CompositeIndex) GetDefinition() *Index {
1514 if m != nil {
1515 return m.Definition
1516 }
1517 return nil
1518}
1519
1520func (m *CompositeIndex) GetState() CompositeIndex_State {
1521 if m != nil && m.State != nil {
1522 return *m.State
1523 }
1524 return CompositeIndex_WRITE_ONLY
1525}
1526
1527func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
1528 if m != nil && m.OnlyUseIfRequired != nil {
1529 return *m.OnlyUseIfRequired
1530 }
1531 return Default_CompositeIndex_OnlyUseIfRequired
1532}
1533
1534type IndexPostfix struct {
1535 IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"`
1536 Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
1537 Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
1538 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1539 XXX_unrecognized []byte `json:"-"`
1540 XXX_sizecache int32 `json:"-"`
1541}
1542
1543func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
1544func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
1545func (*IndexPostfix) ProtoMessage() {}
1546func (*IndexPostfix) Descriptor() ([]byte, []int) {
1547 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10}
1548}
1549func (m *IndexPostfix) XXX_Unmarshal(b []byte) error {
1550 return xxx_messageInfo_IndexPostfix.Unmarshal(m, b)
1551}
1552func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1553 return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic)
1554}
1555func (dst *IndexPostfix) XXX_Merge(src proto.Message) {
1556 xxx_messageInfo_IndexPostfix.Merge(dst, src)
1557}
1558func (m *IndexPostfix) XXX_Size() int {
1559 return xxx_messageInfo_IndexPostfix.Size(m)
1560}
1561func (m *IndexPostfix) XXX_DiscardUnknown() {
1562 xxx_messageInfo_IndexPostfix.DiscardUnknown(m)
1563}
1564
1565var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo
1566
1567const Default_IndexPostfix_Before bool = true
1568
1569func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
1570 if m != nil {
1571 return m.IndexValue
1572 }
1573 return nil
1574}
1575
1576func (m *IndexPostfix) GetKey() *Reference {
1577 if m != nil {
1578 return m.Key
1579 }
1580 return nil
1581}
1582
1583func (m *IndexPostfix) GetBefore() bool {
1584 if m != nil && m.Before != nil {
1585 return *m.Before
1586 }
1587 return Default_IndexPostfix_Before
1588}
1589
1590type IndexPostfix_IndexValue struct {
1591 PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"`
1592 Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
1593 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1594 XXX_unrecognized []byte `json:"-"`
1595 XXX_sizecache int32 `json:"-"`
1596}
1597
1598func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
1599func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
1600func (*IndexPostfix_IndexValue) ProtoMessage() {}
1601func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) {
1602 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0}
1603}
1604func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error {
1605 return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b)
1606}
1607func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1608 return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic)
1609}
1610func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) {
1611 xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src)
1612}
1613func (m *IndexPostfix_IndexValue) XXX_Size() int {
1614 return xxx_messageInfo_IndexPostfix_IndexValue.Size(m)
1615}
1616func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() {
1617 xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m)
1618}
1619
1620var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo
1621
1622func (m *IndexPostfix_IndexValue) GetPropertyName() string {
1623 if m != nil && m.PropertyName != nil {
1624 return *m.PropertyName
1625 }
1626 return ""
1627}
1628
1629func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
1630 if m != nil {
1631 return m.Value
1632 }
1633 return nil
1634}
1635
1636type IndexPosition struct {
1637 Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
1638 Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
1639 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1640 XXX_unrecognized []byte `json:"-"`
1641 XXX_sizecache int32 `json:"-"`
1642}
1643
1644func (m *IndexPosition) Reset() { *m = IndexPosition{} }
1645func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
1646func (*IndexPosition) ProtoMessage() {}
1647func (*IndexPosition) Descriptor() ([]byte, []int) {
1648 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11}
1649}
1650func (m *IndexPosition) XXX_Unmarshal(b []byte) error {
1651 return xxx_messageInfo_IndexPosition.Unmarshal(m, b)
1652}
1653func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1654 return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic)
1655}
1656func (dst *IndexPosition) XXX_Merge(src proto.Message) {
1657 xxx_messageInfo_IndexPosition.Merge(dst, src)
1658}
1659func (m *IndexPosition) XXX_Size() int {
1660 return xxx_messageInfo_IndexPosition.Size(m)
1661}
1662func (m *IndexPosition) XXX_DiscardUnknown() {
1663 xxx_messageInfo_IndexPosition.DiscardUnknown(m)
1664}
1665
1666var xxx_messageInfo_IndexPosition proto.InternalMessageInfo
1667
1668const Default_IndexPosition_Before bool = true
1669
1670func (m *IndexPosition) GetKey() string {
1671 if m != nil && m.Key != nil {
1672 return *m.Key
1673 }
1674 return ""
1675}
1676
1677func (m *IndexPosition) GetBefore() bool {
1678 if m != nil && m.Before != nil {
1679 return *m.Before
1680 }
1681 return Default_IndexPosition_Before
1682}
1683
1684type Snapshot struct {
1685 Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
1686 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1687 XXX_unrecognized []byte `json:"-"`
1688 XXX_sizecache int32 `json:"-"`
1689}
1690
1691func (m *Snapshot) Reset() { *m = Snapshot{} }
1692func (m *Snapshot) String() string { return proto.CompactTextString(m) }
1693func (*Snapshot) ProtoMessage() {}
1694func (*Snapshot) Descriptor() ([]byte, []int) {
1695 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12}
1696}
1697func (m *Snapshot) XXX_Unmarshal(b []byte) error {
1698 return xxx_messageInfo_Snapshot.Unmarshal(m, b)
1699}
1700func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1701 return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
1702}
1703func (dst *Snapshot) XXX_Merge(src proto.Message) {
1704 xxx_messageInfo_Snapshot.Merge(dst, src)
1705}
1706func (m *Snapshot) XXX_Size() int {
1707 return xxx_messageInfo_Snapshot.Size(m)
1708}
1709func (m *Snapshot) XXX_DiscardUnknown() {
1710 xxx_messageInfo_Snapshot.DiscardUnknown(m)
1711}
1712
1713var xxx_messageInfo_Snapshot proto.InternalMessageInfo
1714
1715func (m *Snapshot) GetTs() int64 {
1716 if m != nil && m.Ts != nil {
1717 return *m.Ts
1718 }
1719 return 0
1720}
1721
1722type InternalHeader struct {
1723 Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
1724 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1725 XXX_unrecognized []byte `json:"-"`
1726 XXX_sizecache int32 `json:"-"`
1727}
1728
1729func (m *InternalHeader) Reset() { *m = InternalHeader{} }
1730func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
1731func (*InternalHeader) ProtoMessage() {}
1732func (*InternalHeader) Descriptor() ([]byte, []int) {
1733 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13}
1734}
1735func (m *InternalHeader) XXX_Unmarshal(b []byte) error {
1736 return xxx_messageInfo_InternalHeader.Unmarshal(m, b)
1737}
1738func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1739 return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic)
1740}
1741func (dst *InternalHeader) XXX_Merge(src proto.Message) {
1742 xxx_messageInfo_InternalHeader.Merge(dst, src)
1743}
1744func (m *InternalHeader) XXX_Size() int {
1745 return xxx_messageInfo_InternalHeader.Size(m)
1746}
1747func (m *InternalHeader) XXX_DiscardUnknown() {
1748 xxx_messageInfo_InternalHeader.DiscardUnknown(m)
1749}
1750
1751var xxx_messageInfo_InternalHeader proto.InternalMessageInfo
1752
1753func (m *InternalHeader) GetQos() string {
1754 if m != nil && m.Qos != nil {
1755 return *m.Qos
1756 }
1757 return ""
1758}
1759
1760type Transaction struct {
1761 Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
1762 Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
1763 App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
1764 MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
1765 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1766 XXX_unrecognized []byte `json:"-"`
1767 XXX_sizecache int32 `json:"-"`
1768}
1769
1770func (m *Transaction) Reset() { *m = Transaction{} }
1771func (m *Transaction) String() string { return proto.CompactTextString(m) }
1772func (*Transaction) ProtoMessage() {}
1773func (*Transaction) Descriptor() ([]byte, []int) {
1774 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14}
1775}
1776func (m *Transaction) XXX_Unmarshal(b []byte) error {
1777 return xxx_messageInfo_Transaction.Unmarshal(m, b)
1778}
1779func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1780 return xxx_messageInfo_Transaction.Marshal(b, m, deterministic)
1781}
1782func (dst *Transaction) XXX_Merge(src proto.Message) {
1783 xxx_messageInfo_Transaction.Merge(dst, src)
1784}
1785func (m *Transaction) XXX_Size() int {
1786 return xxx_messageInfo_Transaction.Size(m)
1787}
1788func (m *Transaction) XXX_DiscardUnknown() {
1789 xxx_messageInfo_Transaction.DiscardUnknown(m)
1790}
1791
1792var xxx_messageInfo_Transaction proto.InternalMessageInfo
1793
1794const Default_Transaction_MarkChanges bool = false
1795
1796func (m *Transaction) GetHeader() *InternalHeader {
1797 if m != nil {
1798 return m.Header
1799 }
1800 return nil
1801}
1802
1803func (m *Transaction) GetHandle() uint64 {
1804 if m != nil && m.Handle != nil {
1805 return *m.Handle
1806 }
1807 return 0
1808}
1809
1810func (m *Transaction) GetApp() string {
1811 if m != nil && m.App != nil {
1812 return *m.App
1813 }
1814 return ""
1815}
1816
1817func (m *Transaction) GetMarkChanges() bool {
1818 if m != nil && m.MarkChanges != nil {
1819 return *m.MarkChanges
1820 }
1821 return Default_Transaction_MarkChanges
1822}
1823
1824type Query struct {
1825 Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
1826 App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
1827 NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
1828 Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
1829 Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
1830 Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"`
1831 SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"`
1832 Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"`
1833 Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
1834 Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
1835 Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
1836 Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
1837 CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
1838 EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"`
1839 CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
1840 RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"`
1841 KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"`
1842 Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
1843 Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
1844 FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
1845 Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
1846 PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
1847 GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"`
1848 Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
1849 MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"`
1850 SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"`
1851 PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"`
1852 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1853 XXX_unrecognized []byte `json:"-"`
1854 XXX_sizecache int32 `json:"-"`
1855}
1856
1857func (m *Query) Reset() { *m = Query{} }
1858func (m *Query) String() string { return proto.CompactTextString(m) }
1859func (*Query) ProtoMessage() {}
1860func (*Query) Descriptor() ([]byte, []int) {
1861 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15}
1862}
1863func (m *Query) XXX_Unmarshal(b []byte) error {
1864 return xxx_messageInfo_Query.Unmarshal(m, b)
1865}
1866func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1867 return xxx_messageInfo_Query.Marshal(b, m, deterministic)
1868}
1869func (dst *Query) XXX_Merge(src proto.Message) {
1870 xxx_messageInfo_Query.Merge(dst, src)
1871}
1872func (m *Query) XXX_Size() int {
1873 return xxx_messageInfo_Query.Size(m)
1874}
1875func (m *Query) XXX_DiscardUnknown() {
1876 xxx_messageInfo_Query.DiscardUnknown(m)
1877}
1878
1879var xxx_messageInfo_Query proto.InternalMessageInfo
1880
1881const Default_Query_Offset int32 = 0
1882const Default_Query_RequirePerfectPlan bool = false
1883const Default_Query_KeysOnly bool = false
1884const Default_Query_Compile bool = false
1885const Default_Query_PersistOffset bool = false
1886
1887func (m *Query) GetHeader() *InternalHeader {
1888 if m != nil {
1889 return m.Header
1890 }
1891 return nil
1892}
1893
1894func (m *Query) GetApp() string {
1895 if m != nil && m.App != nil {
1896 return *m.App
1897 }
1898 return ""
1899}
1900
1901func (m *Query) GetNameSpace() string {
1902 if m != nil && m.NameSpace != nil {
1903 return *m.NameSpace
1904 }
1905 return ""
1906}
1907
1908func (m *Query) GetKind() string {
1909 if m != nil && m.Kind != nil {
1910 return *m.Kind
1911 }
1912 return ""
1913}
1914
1915func (m *Query) GetAncestor() *Reference {
1916 if m != nil {
1917 return m.Ancestor
1918 }
1919 return nil
1920}
1921
1922func (m *Query) GetFilter() []*Query_Filter {
1923 if m != nil {
1924 return m.Filter
1925 }
1926 return nil
1927}
1928
1929func (m *Query) GetSearchQuery() string {
1930 if m != nil && m.SearchQuery != nil {
1931 return *m.SearchQuery
1932 }
1933 return ""
1934}
1935
1936func (m *Query) GetOrder() []*Query_Order {
1937 if m != nil {
1938 return m.Order
1939 }
1940 return nil
1941}
1942
1943func (m *Query) GetHint() Query_Hint {
1944 if m != nil && m.Hint != nil {
1945 return *m.Hint
1946 }
1947 return Query_ORDER_FIRST
1948}
1949
1950func (m *Query) GetCount() int32 {
1951 if m != nil && m.Count != nil {
1952 return *m.Count
1953 }
1954 return 0
1955}
1956
1957func (m *Query) GetOffset() int32 {
1958 if m != nil && m.Offset != nil {
1959 return *m.Offset
1960 }
1961 return Default_Query_Offset
1962}
1963
1964func (m *Query) GetLimit() int32 {
1965 if m != nil && m.Limit != nil {
1966 return *m.Limit
1967 }
1968 return 0
1969}
1970
1971func (m *Query) GetCompiledCursor() *CompiledCursor {
1972 if m != nil {
1973 return m.CompiledCursor
1974 }
1975 return nil
1976}
1977
1978func (m *Query) GetEndCompiledCursor() *CompiledCursor {
1979 if m != nil {
1980 return m.EndCompiledCursor
1981 }
1982 return nil
1983}
1984
1985func (m *Query) GetCompositeIndex() []*CompositeIndex {
1986 if m != nil {
1987 return m.CompositeIndex
1988 }
1989 return nil
1990}
1991
1992func (m *Query) GetRequirePerfectPlan() bool {
1993 if m != nil && m.RequirePerfectPlan != nil {
1994 return *m.RequirePerfectPlan
1995 }
1996 return Default_Query_RequirePerfectPlan
1997}
1998
1999func (m *Query) GetKeysOnly() bool {
2000 if m != nil && m.KeysOnly != nil {
2001 return *m.KeysOnly
2002 }
2003 return Default_Query_KeysOnly
2004}
2005
2006func (m *Query) GetTransaction() *Transaction {
2007 if m != nil {
2008 return m.Transaction
2009 }
2010 return nil
2011}
2012
2013func (m *Query) GetCompile() bool {
2014 if m != nil && m.Compile != nil {
2015 return *m.Compile
2016 }
2017 return Default_Query_Compile
2018}
2019
2020func (m *Query) GetFailoverMs() int64 {
2021 if m != nil && m.FailoverMs != nil {
2022 return *m.FailoverMs
2023 }
2024 return 0
2025}
2026
2027func (m *Query) GetStrong() bool {
2028 if m != nil && m.Strong != nil {
2029 return *m.Strong
2030 }
2031 return false
2032}
2033
2034func (m *Query) GetPropertyName() []string {
2035 if m != nil {
2036 return m.PropertyName
2037 }
2038 return nil
2039}
2040
2041func (m *Query) GetGroupByPropertyName() []string {
2042 if m != nil {
2043 return m.GroupByPropertyName
2044 }
2045 return nil
2046}
2047
2048func (m *Query) GetDistinct() bool {
2049 if m != nil && m.Distinct != nil {
2050 return *m.Distinct
2051 }
2052 return false
2053}
2054
2055func (m *Query) GetMinSafeTimeSeconds() int64 {
2056 if m != nil && m.MinSafeTimeSeconds != nil {
2057 return *m.MinSafeTimeSeconds
2058 }
2059 return 0
2060}
2061
2062func (m *Query) GetSafeReplicaName() []string {
2063 if m != nil {
2064 return m.SafeReplicaName
2065 }
2066 return nil
2067}
2068
2069func (m *Query) GetPersistOffset() bool {
2070 if m != nil && m.PersistOffset != nil {
2071 return *m.PersistOffset
2072 }
2073 return Default_Query_PersistOffset
2074}
2075
2076type Query_Filter struct {
2077 Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
2078 Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
2079 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2080 XXX_unrecognized []byte `json:"-"`
2081 XXX_sizecache int32 `json:"-"`
2082}
2083
2084func (m *Query_Filter) Reset() { *m = Query_Filter{} }
2085func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
2086func (*Query_Filter) ProtoMessage() {}
2087func (*Query_Filter) Descriptor() ([]byte, []int) {
2088 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
2089}
2090func (m *Query_Filter) XXX_Unmarshal(b []byte) error {
2091 return xxx_messageInfo_Query_Filter.Unmarshal(m, b)
2092}
2093func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2094 return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic)
2095}
2096func (dst *Query_Filter) XXX_Merge(src proto.Message) {
2097 xxx_messageInfo_Query_Filter.Merge(dst, src)
2098}
2099func (m *Query_Filter) XXX_Size() int {
2100 return xxx_messageInfo_Query_Filter.Size(m)
2101}
2102func (m *Query_Filter) XXX_DiscardUnknown() {
2103 xxx_messageInfo_Query_Filter.DiscardUnknown(m)
2104}
2105
2106var xxx_messageInfo_Query_Filter proto.InternalMessageInfo
2107
2108func (m *Query_Filter) GetOp() Query_Filter_Operator {
2109 if m != nil && m.Op != nil {
2110 return *m.Op
2111 }
2112 return Query_Filter_LESS_THAN
2113}
2114
2115func (m *Query_Filter) GetProperty() []*Property {
2116 if m != nil {
2117 return m.Property
2118 }
2119 return nil
2120}
2121
2122type Query_Order struct {
2123 Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
2124 Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
2125 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2126 XXX_unrecognized []byte `json:"-"`
2127 XXX_sizecache int32 `json:"-"`
2128}
2129
2130func (m *Query_Order) Reset() { *m = Query_Order{} }
2131func (m *Query_Order) String() string { return proto.CompactTextString(m) }
2132func (*Query_Order) ProtoMessage() {}
2133func (*Query_Order) Descriptor() ([]byte, []int) {
2134 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1}
2135}
2136func (m *Query_Order) XXX_Unmarshal(b []byte) error {
2137 return xxx_messageInfo_Query_Order.Unmarshal(m, b)
2138}
2139func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2140 return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic)
2141}
2142func (dst *Query_Order) XXX_Merge(src proto.Message) {
2143 xxx_messageInfo_Query_Order.Merge(dst, src)
2144}
2145func (m *Query_Order) XXX_Size() int {
2146 return xxx_messageInfo_Query_Order.Size(m)
2147}
2148func (m *Query_Order) XXX_DiscardUnknown() {
2149 xxx_messageInfo_Query_Order.DiscardUnknown(m)
2150}
2151
2152var xxx_messageInfo_Query_Order proto.InternalMessageInfo
2153
2154const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
2155
2156func (m *Query_Order) GetProperty() string {
2157 if m != nil && m.Property != nil {
2158 return *m.Property
2159 }
2160 return ""
2161}
2162
2163func (m *Query_Order) GetDirection() Query_Order_Direction {
2164 if m != nil && m.Direction != nil {
2165 return *m.Direction
2166 }
2167 return Default_Query_Order_Direction
2168}
2169
2170type CompiledQuery struct {
2171 Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"`
2172 Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"`
2173 IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"`
2174 Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
2175 Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
2176 KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
2177 PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
2178 DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"`
2179 Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"`
2180 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2181 XXX_unrecognized []byte `json:"-"`
2182 XXX_sizecache int32 `json:"-"`
2183}
2184
2185func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
2186func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
2187func (*CompiledQuery) ProtoMessage() {}
2188func (*CompiledQuery) Descriptor() ([]byte, []int) {
2189 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16}
2190}
2191func (m *CompiledQuery) XXX_Unmarshal(b []byte) error {
2192 return xxx_messageInfo_CompiledQuery.Unmarshal(m, b)
2193}
2194func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2195 return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic)
2196}
2197func (dst *CompiledQuery) XXX_Merge(src proto.Message) {
2198 xxx_messageInfo_CompiledQuery.Merge(dst, src)
2199}
2200func (m *CompiledQuery) XXX_Size() int {
2201 return xxx_messageInfo_CompiledQuery.Size(m)
2202}
2203func (m *CompiledQuery) XXX_DiscardUnknown() {
2204 xxx_messageInfo_CompiledQuery.DiscardUnknown(m)
2205}
2206
2207var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo
2208
2209const Default_CompiledQuery_Offset int32 = 0
2210
2211func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
2212 if m != nil {
2213 return m.Primaryscan
2214 }
2215 return nil
2216}
2217
2218func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
2219 if m != nil {
2220 return m.Mergejoinscan
2221 }
2222 return nil
2223}
2224
2225func (m *CompiledQuery) GetIndexDef() *Index {
2226 if m != nil {
2227 return m.IndexDef
2228 }
2229 return nil
2230}
2231
2232func (m *CompiledQuery) GetOffset() int32 {
2233 if m != nil && m.Offset != nil {
2234 return *m.Offset
2235 }
2236 return Default_CompiledQuery_Offset
2237}
2238
2239func (m *CompiledQuery) GetLimit() int32 {
2240 if m != nil && m.Limit != nil {
2241 return *m.Limit
2242 }
2243 return 0
2244}
2245
2246func (m *CompiledQuery) GetKeysOnly() bool {
2247 if m != nil && m.KeysOnly != nil {
2248 return *m.KeysOnly
2249 }
2250 return false
2251}
2252
2253func (m *CompiledQuery) GetPropertyName() []string {
2254 if m != nil {
2255 return m.PropertyName
2256 }
2257 return nil
2258}
2259
2260func (m *CompiledQuery) GetDistinctInfixSize() int32 {
2261 if m != nil && m.DistinctInfixSize != nil {
2262 return *m.DistinctInfixSize
2263 }
2264 return 0
2265}
2266
2267func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
2268 if m != nil {
2269 return m.Entityfilter
2270 }
2271 return nil
2272}
2273
2274type CompiledQuery_PrimaryScan struct {
2275 IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"`
2276 StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
2277 StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"`
2278 EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"`
2279 EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"`
2280 StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"`
2281 EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"`
2282 EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"`
2283 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2284 XXX_unrecognized []byte `json:"-"`
2285 XXX_sizecache int32 `json:"-"`
2286}
2287
2288func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
2289func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
2290func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
2291func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) {
2292 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0}
2293}
2294func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error {
2295 return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b)
2296}
2297func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2298 return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic)
2299}
2300func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) {
2301 xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src)
2302}
2303func (m *CompiledQuery_PrimaryScan) XXX_Size() int {
2304 return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m)
2305}
2306func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() {
2307 xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m)
2308}
2309
2310var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo
2311
2312func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
2313 if m != nil && m.IndexName != nil {
2314 return *m.IndexName
2315 }
2316 return ""
2317}
2318
2319func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
2320 if m != nil && m.StartKey != nil {
2321 return *m.StartKey
2322 }
2323 return ""
2324}
2325
2326func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
2327 if m != nil && m.StartInclusive != nil {
2328 return *m.StartInclusive
2329 }
2330 return false
2331}
2332
2333func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
2334 if m != nil && m.EndKey != nil {
2335 return *m.EndKey
2336 }
2337 return ""
2338}
2339
2340func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
2341 if m != nil && m.EndInclusive != nil {
2342 return *m.EndInclusive
2343 }
2344 return false
2345}
2346
2347func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
2348 if m != nil {
2349 return m.StartPostfixValue
2350 }
2351 return nil
2352}
2353
2354func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
2355 if m != nil {
2356 return m.EndPostfixValue
2357 }
2358 return nil
2359}
2360
2361func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
2362 if m != nil && m.EndUnappliedLogTimestampUs != nil {
2363 return *m.EndUnappliedLogTimestampUs
2364 }
2365 return 0
2366}
2367
2368type CompiledQuery_MergeJoinScan struct {
2369 IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"`
2370 PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"`
2371 ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"`
2372 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2373 XXX_unrecognized []byte `json:"-"`
2374 XXX_sizecache int32 `json:"-"`
2375}
2376
2377func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
2378func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
2379func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
2380func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) {
2381 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1}
2382}
2383func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error {
2384 return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b)
2385}
2386func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2387 return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic)
2388}
2389func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) {
2390 xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src)
2391}
2392func (m *CompiledQuery_MergeJoinScan) XXX_Size() int {
2393 return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m)
2394}
2395func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() {
2396 xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m)
2397}
2398
2399var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo
2400
2401const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
2402
2403func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
2404 if m != nil && m.IndexName != nil {
2405 return *m.IndexName
2406 }
2407 return ""
2408}
2409
2410func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
2411 if m != nil {
2412 return m.PrefixValue
2413 }
2414 return nil
2415}
2416
2417func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
2418 if m != nil && m.ValuePrefix != nil {
2419 return *m.ValuePrefix
2420 }
2421 return Default_CompiledQuery_MergeJoinScan_ValuePrefix
2422}
2423
2424type CompiledQuery_EntityFilter struct {
2425 Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
2426 Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
2427 Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
2428 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2429 XXX_unrecognized []byte `json:"-"`
2430 XXX_sizecache int32 `json:"-"`
2431}
2432
2433func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
2434func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
2435func (*CompiledQuery_EntityFilter) ProtoMessage() {}
2436func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) {
2437 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2}
2438}
2439func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error {
2440 return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b)
2441}
2442func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2443 return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic)
2444}
2445func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) {
2446 xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src)
2447}
2448func (m *CompiledQuery_EntityFilter) XXX_Size() int {
2449 return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m)
2450}
2451func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() {
2452 xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m)
2453}
2454
2455var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo
2456
2457const Default_CompiledQuery_EntityFilter_Distinct bool = false
2458
2459func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
2460 if m != nil && m.Distinct != nil {
2461 return *m.Distinct
2462 }
2463 return Default_CompiledQuery_EntityFilter_Distinct
2464}
2465
2466func (m *CompiledQuery_EntityFilter) GetKind() string {
2467 if m != nil && m.Kind != nil {
2468 return *m.Kind
2469 }
2470 return ""
2471}
2472
2473func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
2474 if m != nil {
2475 return m.Ancestor
2476 }
2477 return nil
2478}
2479
2480type CompiledCursor struct {
2481 Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"`
2482 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2483 XXX_unrecognized []byte `json:"-"`
2484 XXX_sizecache int32 `json:"-"`
2485}
2486
2487func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
2488func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
2489func (*CompiledCursor) ProtoMessage() {}
2490func (*CompiledCursor) Descriptor() ([]byte, []int) {
2491 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17}
2492}
2493func (m *CompiledCursor) XXX_Unmarshal(b []byte) error {
2494 return xxx_messageInfo_CompiledCursor.Unmarshal(m, b)
2495}
2496func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2497 return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic)
2498}
2499func (dst *CompiledCursor) XXX_Merge(src proto.Message) {
2500 xxx_messageInfo_CompiledCursor.Merge(dst, src)
2501}
2502func (m *CompiledCursor) XXX_Size() int {
2503 return xxx_messageInfo_CompiledCursor.Size(m)
2504}
2505func (m *CompiledCursor) XXX_DiscardUnknown() {
2506 xxx_messageInfo_CompiledCursor.DiscardUnknown(m)
2507}
2508
2509var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo
2510
2511func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
2512 if m != nil {
2513 return m.Position
2514 }
2515 return nil
2516}
2517
2518type CompiledCursor_Position struct {
2519 StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
2520 Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"`
2521 Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
2522 StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"`
2523 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2524 XXX_unrecognized []byte `json:"-"`
2525 XXX_sizecache int32 `json:"-"`
2526}
2527
2528func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
2529func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
2530func (*CompiledCursor_Position) ProtoMessage() {}
2531func (*CompiledCursor_Position) Descriptor() ([]byte, []int) {
2532 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0}
2533}
2534func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error {
2535 return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b)
2536}
2537func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2538 return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic)
2539}
2540func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) {
2541 xxx_messageInfo_CompiledCursor_Position.Merge(dst, src)
2542}
2543func (m *CompiledCursor_Position) XXX_Size() int {
2544 return xxx_messageInfo_CompiledCursor_Position.Size(m)
2545}
2546func (m *CompiledCursor_Position) XXX_DiscardUnknown() {
2547 xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m)
2548}
2549
2550var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo
2551
2552const Default_CompiledCursor_Position_StartInclusive bool = true
2553
2554func (m *CompiledCursor_Position) GetStartKey() string {
2555 if m != nil && m.StartKey != nil {
2556 return *m.StartKey
2557 }
2558 return ""
2559}
2560
2561func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
2562 if m != nil {
2563 return m.Indexvalue
2564 }
2565 return nil
2566}
2567
2568func (m *CompiledCursor_Position) GetKey() *Reference {
2569 if m != nil {
2570 return m.Key
2571 }
2572 return nil
2573}
2574
2575func (m *CompiledCursor_Position) GetStartInclusive() bool {
2576 if m != nil && m.StartInclusive != nil {
2577 return *m.StartInclusive
2578 }
2579 return Default_CompiledCursor_Position_StartInclusive
2580}
2581
2582type CompiledCursor_Position_IndexValue struct {
2583 Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
2584 Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
2585 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2586 XXX_unrecognized []byte `json:"-"`
2587 XXX_sizecache int32 `json:"-"`
2588}
2589
2590func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
2591func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
2592func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
2593func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) {
2594 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0}
2595}
2596func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error {
2597 return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b)
2598}
2599func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2600 return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic)
2601}
2602func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) {
2603 xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src)
2604}
2605func (m *CompiledCursor_Position_IndexValue) XXX_Size() int {
2606 return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m)
2607}
2608func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() {
2609 xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m)
2610}
2611
2612var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo
2613
2614func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
2615 if m != nil && m.Property != nil {
2616 return *m.Property
2617 }
2618 return ""
2619}
2620
2621func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
2622 if m != nil {
2623 return m.Value
2624 }
2625 return nil
2626}
2627
2628type Cursor struct {
2629 Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
2630 App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
2631 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2632 XXX_unrecognized []byte `json:"-"`
2633 XXX_sizecache int32 `json:"-"`
2634}
2635
2636func (m *Cursor) Reset() { *m = Cursor{} }
2637func (m *Cursor) String() string { return proto.CompactTextString(m) }
2638func (*Cursor) ProtoMessage() {}
2639func (*Cursor) Descriptor() ([]byte, []int) {
2640 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18}
2641}
2642func (m *Cursor) XXX_Unmarshal(b []byte) error {
2643 return xxx_messageInfo_Cursor.Unmarshal(m, b)
2644}
2645func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2646 return xxx_messageInfo_Cursor.Marshal(b, m, deterministic)
2647}
2648func (dst *Cursor) XXX_Merge(src proto.Message) {
2649 xxx_messageInfo_Cursor.Merge(dst, src)
2650}
2651func (m *Cursor) XXX_Size() int {
2652 return xxx_messageInfo_Cursor.Size(m)
2653}
2654func (m *Cursor) XXX_DiscardUnknown() {
2655 xxx_messageInfo_Cursor.DiscardUnknown(m)
2656}
2657
2658var xxx_messageInfo_Cursor proto.InternalMessageInfo
2659
2660func (m *Cursor) GetCursor() uint64 {
2661 if m != nil && m.Cursor != nil {
2662 return *m.Cursor
2663 }
2664 return 0
2665}
2666
2667func (m *Cursor) GetApp() string {
2668 if m != nil && m.App != nil {
2669 return *m.App
2670 }
2671 return ""
2672}
2673
2674type Error struct {
2675 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2676 XXX_unrecognized []byte `json:"-"`
2677 XXX_sizecache int32 `json:"-"`
2678}
2679
2680func (m *Error) Reset() { *m = Error{} }
2681func (m *Error) String() string { return proto.CompactTextString(m) }
2682func (*Error) ProtoMessage() {}
2683func (*Error) Descriptor() ([]byte, []int) {
2684 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19}
2685}
2686func (m *Error) XXX_Unmarshal(b []byte) error {
2687 return xxx_messageInfo_Error.Unmarshal(m, b)
2688}
2689func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2690 return xxx_messageInfo_Error.Marshal(b, m, deterministic)
2691}
2692func (dst *Error) XXX_Merge(src proto.Message) {
2693 xxx_messageInfo_Error.Merge(dst, src)
2694}
2695func (m *Error) XXX_Size() int {
2696 return xxx_messageInfo_Error.Size(m)
2697}
2698func (m *Error) XXX_DiscardUnknown() {
2699 xxx_messageInfo_Error.DiscardUnknown(m)
2700}
2701
2702var xxx_messageInfo_Error proto.InternalMessageInfo
2703
2704type Cost struct {
2705 IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"`
2706 IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"`
2707 EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"`
2708 EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"`
2709 Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"`
2710 ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"`
2711 IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"`
2712 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2713 XXX_unrecognized []byte `json:"-"`
2714 XXX_sizecache int32 `json:"-"`
2715}
2716
2717func (m *Cost) Reset() { *m = Cost{} }
2718func (m *Cost) String() string { return proto.CompactTextString(m) }
2719func (*Cost) ProtoMessage() {}
2720func (*Cost) Descriptor() ([]byte, []int) {
2721 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20}
2722}
2723func (m *Cost) XXX_Unmarshal(b []byte) error {
2724 return xxx_messageInfo_Cost.Unmarshal(m, b)
2725}
2726func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2727 return xxx_messageInfo_Cost.Marshal(b, m, deterministic)
2728}
2729func (dst *Cost) XXX_Merge(src proto.Message) {
2730 xxx_messageInfo_Cost.Merge(dst, src)
2731}
2732func (m *Cost) XXX_Size() int {
2733 return xxx_messageInfo_Cost.Size(m)
2734}
2735func (m *Cost) XXX_DiscardUnknown() {
2736 xxx_messageInfo_Cost.DiscardUnknown(m)
2737}
2738
2739var xxx_messageInfo_Cost proto.InternalMessageInfo
2740
2741func (m *Cost) GetIndexWrites() int32 {
2742 if m != nil && m.IndexWrites != nil {
2743 return *m.IndexWrites
2744 }
2745 return 0
2746}
2747
2748func (m *Cost) GetIndexWriteBytes() int32 {
2749 if m != nil && m.IndexWriteBytes != nil {
2750 return *m.IndexWriteBytes
2751 }
2752 return 0
2753}
2754
2755func (m *Cost) GetEntityWrites() int32 {
2756 if m != nil && m.EntityWrites != nil {
2757 return *m.EntityWrites
2758 }
2759 return 0
2760}
2761
2762func (m *Cost) GetEntityWriteBytes() int32 {
2763 if m != nil && m.EntityWriteBytes != nil {
2764 return *m.EntityWriteBytes
2765 }
2766 return 0
2767}
2768
2769func (m *Cost) GetCommitcost() *Cost_CommitCost {
2770 if m != nil {
2771 return m.Commitcost
2772 }
2773 return nil
2774}
2775
2776func (m *Cost) GetApproximateStorageDelta() int32 {
2777 if m != nil && m.ApproximateStorageDelta != nil {
2778 return *m.ApproximateStorageDelta
2779 }
2780 return 0
2781}
2782
2783func (m *Cost) GetIdSequenceUpdates() int32 {
2784 if m != nil && m.IdSequenceUpdates != nil {
2785 return *m.IdSequenceUpdates
2786 }
2787 return 0
2788}
2789
2790type Cost_CommitCost struct {
2791 RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"`
2792 RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"`
2793 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2794 XXX_unrecognized []byte `json:"-"`
2795 XXX_sizecache int32 `json:"-"`
2796}
2797
2798func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
2799func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
2800func (*Cost_CommitCost) ProtoMessage() {}
2801func (*Cost_CommitCost) Descriptor() ([]byte, []int) {
2802 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0}
2803}
2804func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error {
2805 return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b)
2806}
2807func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2808 return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic)
2809}
2810func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) {
2811 xxx_messageInfo_Cost_CommitCost.Merge(dst, src)
2812}
2813func (m *Cost_CommitCost) XXX_Size() int {
2814 return xxx_messageInfo_Cost_CommitCost.Size(m)
2815}
2816func (m *Cost_CommitCost) XXX_DiscardUnknown() {
2817 xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m)
2818}
2819
2820var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo
2821
2822func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
2823 if m != nil && m.RequestedEntityPuts != nil {
2824 return *m.RequestedEntityPuts
2825 }
2826 return 0
2827}
2828
2829func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
2830 if m != nil && m.RequestedEntityDeletes != nil {
2831 return *m.RequestedEntityDeletes
2832 }
2833 return 0
2834}
2835
2836type GetRequest struct {
2837 Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
2838 Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
2839 Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
2840 FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
2841 Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
2842 AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"`
2843 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2844 XXX_unrecognized []byte `json:"-"`
2845 XXX_sizecache int32 `json:"-"`
2846}
2847
2848func (m *GetRequest) Reset() { *m = GetRequest{} }
2849func (m *GetRequest) String() string { return proto.CompactTextString(m) }
2850func (*GetRequest) ProtoMessage() {}
2851func (*GetRequest) Descriptor() ([]byte, []int) {
2852 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21}
2853}
2854func (m *GetRequest) XXX_Unmarshal(b []byte) error {
2855 return xxx_messageInfo_GetRequest.Unmarshal(m, b)
2856}
2857func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2858 return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
2859}
2860func (dst *GetRequest) XXX_Merge(src proto.Message) {
2861 xxx_messageInfo_GetRequest.Merge(dst, src)
2862}
2863func (m *GetRequest) XXX_Size() int {
2864 return xxx_messageInfo_GetRequest.Size(m)
2865}
2866func (m *GetRequest) XXX_DiscardUnknown() {
2867 xxx_messageInfo_GetRequest.DiscardUnknown(m)
2868}
2869
2870var xxx_messageInfo_GetRequest proto.InternalMessageInfo
2871
2872const Default_GetRequest_AllowDeferred bool = false
2873
2874func (m *GetRequest) GetHeader() *InternalHeader {
2875 if m != nil {
2876 return m.Header
2877 }
2878 return nil
2879}
2880
2881func (m *GetRequest) GetKey() []*Reference {
2882 if m != nil {
2883 return m.Key
2884 }
2885 return nil
2886}
2887
2888func (m *GetRequest) GetTransaction() *Transaction {
2889 if m != nil {
2890 return m.Transaction
2891 }
2892 return nil
2893}
2894
2895func (m *GetRequest) GetFailoverMs() int64 {
2896 if m != nil && m.FailoverMs != nil {
2897 return *m.FailoverMs
2898 }
2899 return 0
2900}
2901
2902func (m *GetRequest) GetStrong() bool {
2903 if m != nil && m.Strong != nil {
2904 return *m.Strong
2905 }
2906 return false
2907}
2908
2909func (m *GetRequest) GetAllowDeferred() bool {
2910 if m != nil && m.AllowDeferred != nil {
2911 return *m.AllowDeferred
2912 }
2913 return Default_GetRequest_AllowDeferred
2914}
2915
2916type GetResponse struct {
2917 Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"`
2918 Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
2919 InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"`
2920 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2921 XXX_unrecognized []byte `json:"-"`
2922 XXX_sizecache int32 `json:"-"`
2923}
2924
2925func (m *GetResponse) Reset() { *m = GetResponse{} }
2926func (m *GetResponse) String() string { return proto.CompactTextString(m) }
2927func (*GetResponse) ProtoMessage() {}
2928func (*GetResponse) Descriptor() ([]byte, []int) {
2929 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22}
2930}
2931func (m *GetResponse) XXX_Unmarshal(b []byte) error {
2932 return xxx_messageInfo_GetResponse.Unmarshal(m, b)
2933}
2934func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2935 return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
2936}
2937func (dst *GetResponse) XXX_Merge(src proto.Message) {
2938 xxx_messageInfo_GetResponse.Merge(dst, src)
2939}
2940func (m *GetResponse) XXX_Size() int {
2941 return xxx_messageInfo_GetResponse.Size(m)
2942}
2943func (m *GetResponse) XXX_DiscardUnknown() {
2944 xxx_messageInfo_GetResponse.DiscardUnknown(m)
2945}
2946
2947var xxx_messageInfo_GetResponse proto.InternalMessageInfo
2948
2949const Default_GetResponse_InOrder bool = true
2950
2951func (m *GetResponse) GetEntity() []*GetResponse_Entity {
2952 if m != nil {
2953 return m.Entity
2954 }
2955 return nil
2956}
2957
2958func (m *GetResponse) GetDeferred() []*Reference {
2959 if m != nil {
2960 return m.Deferred
2961 }
2962 return nil
2963}
2964
2965func (m *GetResponse) GetInOrder() bool {
2966 if m != nil && m.InOrder != nil {
2967 return *m.InOrder
2968 }
2969 return Default_GetResponse_InOrder
2970}
2971
2972type GetResponse_Entity struct {
2973 Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
2974 Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
2975 Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
2976 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2977 XXX_unrecognized []byte `json:"-"`
2978 XXX_sizecache int32 `json:"-"`
2979}
2980
2981func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
2982func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
2983func (*GetResponse_Entity) ProtoMessage() {}
2984func (*GetResponse_Entity) Descriptor() ([]byte, []int) {
2985 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0}
2986}
2987func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error {
2988 return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b)
2989}
2990func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2991 return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic)
2992}
2993func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) {
2994 xxx_messageInfo_GetResponse_Entity.Merge(dst, src)
2995}
2996func (m *GetResponse_Entity) XXX_Size() int {
2997 return xxx_messageInfo_GetResponse_Entity.Size(m)
2998}
2999func (m *GetResponse_Entity) XXX_DiscardUnknown() {
3000 xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m)
3001}
3002
3003var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo
3004
3005func (m *GetResponse_Entity) GetEntity() *EntityProto {
3006 if m != nil {
3007 return m.Entity
3008 }
3009 return nil
3010}
3011
3012func (m *GetResponse_Entity) GetKey() *Reference {
3013 if m != nil {
3014 return m.Key
3015 }
3016 return nil
3017}
3018
3019func (m *GetResponse_Entity) GetVersion() int64 {
3020 if m != nil && m.Version != nil {
3021 return *m.Version
3022 }
3023 return 0
3024}
3025
3026type PutRequest struct {
3027 Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
3028 Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
3029 Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
3030 CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
3031 Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
3032 Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
3033 MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
3034 Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
3035 AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
3036 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3037 XXX_unrecognized []byte `json:"-"`
3038 XXX_sizecache int32 `json:"-"`
3039}
3040
3041func (m *PutRequest) Reset() { *m = PutRequest{} }
3042func (m *PutRequest) String() string { return proto.CompactTextString(m) }
3043func (*PutRequest) ProtoMessage() {}
3044func (*PutRequest) Descriptor() ([]byte, []int) {
3045 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23}
3046}
3047func (m *PutRequest) XXX_Unmarshal(b []byte) error {
3048 return xxx_messageInfo_PutRequest.Unmarshal(m, b)
3049}
3050func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3051 return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
3052}
3053func (dst *PutRequest) XXX_Merge(src proto.Message) {
3054 xxx_messageInfo_PutRequest.Merge(dst, src)
3055}
3056func (m *PutRequest) XXX_Size() int {
3057 return xxx_messageInfo_PutRequest.Size(m)
3058}
3059func (m *PutRequest) XXX_DiscardUnknown() {
3060 xxx_messageInfo_PutRequest.DiscardUnknown(m)
3061}
3062
3063var xxx_messageInfo_PutRequest proto.InternalMessageInfo
3064
3065const Default_PutRequest_Trusted bool = false
3066const Default_PutRequest_Force bool = false
3067const Default_PutRequest_MarkChanges bool = false
3068const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
3069
3070func (m *PutRequest) GetHeader() *InternalHeader {
3071 if m != nil {
3072 return m.Header
3073 }
3074 return nil
3075}
3076
3077func (m *PutRequest) GetEntity() []*EntityProto {
3078 if m != nil {
3079 return m.Entity
3080 }
3081 return nil
3082}
3083
3084func (m *PutRequest) GetTransaction() *Transaction {
3085 if m != nil {
3086 return m.Transaction
3087 }
3088 return nil
3089}
3090
3091func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
3092 if m != nil {
3093 return m.CompositeIndex
3094 }
3095 return nil
3096}
3097
3098func (m *PutRequest) GetTrusted() bool {
3099 if m != nil && m.Trusted != nil {
3100 return *m.Trusted
3101 }
3102 return Default_PutRequest_Trusted
3103}
3104
3105func (m *PutRequest) GetForce() bool {
3106 if m != nil && m.Force != nil {
3107 return *m.Force
3108 }
3109 return Default_PutRequest_Force
3110}
3111
3112func (m *PutRequest) GetMarkChanges() bool {
3113 if m != nil && m.MarkChanges != nil {
3114 return *m.MarkChanges
3115 }
3116 return Default_PutRequest_MarkChanges
3117}
3118
3119func (m *PutRequest) GetSnapshot() []*Snapshot {
3120 if m != nil {
3121 return m.Snapshot
3122 }
3123 return nil
3124}
3125
3126func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
3127 if m != nil && m.AutoIdPolicy != nil {
3128 return *m.AutoIdPolicy
3129 }
3130 return Default_PutRequest_AutoIdPolicy
3131}
3132
3133type PutResponse struct {
3134 Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
3135 Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
3136 Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
3137 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3138 XXX_unrecognized []byte `json:"-"`
3139 XXX_sizecache int32 `json:"-"`
3140}
3141
3142func (m *PutResponse) Reset() { *m = PutResponse{} }
3143func (m *PutResponse) String() string { return proto.CompactTextString(m) }
3144func (*PutResponse) ProtoMessage() {}
3145func (*PutResponse) Descriptor() ([]byte, []int) {
3146 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24}
3147}
3148func (m *PutResponse) XXX_Unmarshal(b []byte) error {
3149 return xxx_messageInfo_PutResponse.Unmarshal(m, b)
3150}
3151func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3152 return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
3153}
3154func (dst *PutResponse) XXX_Merge(src proto.Message) {
3155 xxx_messageInfo_PutResponse.Merge(dst, src)
3156}
3157func (m *PutResponse) XXX_Size() int {
3158 return xxx_messageInfo_PutResponse.Size(m)
3159}
3160func (m *PutResponse) XXX_DiscardUnknown() {
3161 xxx_messageInfo_PutResponse.DiscardUnknown(m)
3162}
3163
3164var xxx_messageInfo_PutResponse proto.InternalMessageInfo
3165
3166func (m *PutResponse) GetKey() []*Reference {
3167 if m != nil {
3168 return m.Key
3169 }
3170 return nil
3171}
3172
3173func (m *PutResponse) GetCost() *Cost {
3174 if m != nil {
3175 return m.Cost
3176 }
3177 return nil
3178}
3179
3180func (m *PutResponse) GetVersion() []int64 {
3181 if m != nil {
3182 return m.Version
3183 }
3184 return nil
3185}
3186
3187type TouchRequest struct {
3188 Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
3189 Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
3190 CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
3191 Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
3192 Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
3193 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3194 XXX_unrecognized []byte `json:"-"`
3195 XXX_sizecache int32 `json:"-"`
3196}
3197
3198func (m *TouchRequest) Reset() { *m = TouchRequest{} }
3199func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
3200func (*TouchRequest) ProtoMessage() {}
3201func (*TouchRequest) Descriptor() ([]byte, []int) {
3202 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25}
3203}
3204func (m *TouchRequest) XXX_Unmarshal(b []byte) error {
3205 return xxx_messageInfo_TouchRequest.Unmarshal(m, b)
3206}
3207func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3208 return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic)
3209}
3210func (dst *TouchRequest) XXX_Merge(src proto.Message) {
3211 xxx_messageInfo_TouchRequest.Merge(dst, src)
3212}
3213func (m *TouchRequest) XXX_Size() int {
3214 return xxx_messageInfo_TouchRequest.Size(m)
3215}
3216func (m *TouchRequest) XXX_DiscardUnknown() {
3217 xxx_messageInfo_TouchRequest.DiscardUnknown(m)
3218}
3219
3220var xxx_messageInfo_TouchRequest proto.InternalMessageInfo
3221
3222const Default_TouchRequest_Force bool = false
3223
3224func (m *TouchRequest) GetHeader() *InternalHeader {
3225 if m != nil {
3226 return m.Header
3227 }
3228 return nil
3229}
3230
3231func (m *TouchRequest) GetKey() []*Reference {
3232 if m != nil {
3233 return m.Key
3234 }
3235 return nil
3236}
3237
3238func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
3239 if m != nil {
3240 return m.CompositeIndex
3241 }
3242 return nil
3243}
3244
3245func (m *TouchRequest) GetForce() bool {
3246 if m != nil && m.Force != nil {
3247 return *m.Force
3248 }
3249 return Default_TouchRequest_Force
3250}
3251
3252func (m *TouchRequest) GetSnapshot() []*Snapshot {
3253 if m != nil {
3254 return m.Snapshot
3255 }
3256 return nil
3257}
3258
3259type TouchResponse struct {
3260 Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
3261 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3262 XXX_unrecognized []byte `json:"-"`
3263 XXX_sizecache int32 `json:"-"`
3264}
3265
3266func (m *TouchResponse) Reset() { *m = TouchResponse{} }
3267func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
3268func (*TouchResponse) ProtoMessage() {}
3269func (*TouchResponse) Descriptor() ([]byte, []int) {
3270 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26}
3271}
3272func (m *TouchResponse) XXX_Unmarshal(b []byte) error {
3273 return xxx_messageInfo_TouchResponse.Unmarshal(m, b)
3274}
3275func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3276 return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic)
3277}
3278func (dst *TouchResponse) XXX_Merge(src proto.Message) {
3279 xxx_messageInfo_TouchResponse.Merge(dst, src)
3280}
3281func (m *TouchResponse) XXX_Size() int {
3282 return xxx_messageInfo_TouchResponse.Size(m)
3283}
3284func (m *TouchResponse) XXX_DiscardUnknown() {
3285 xxx_messageInfo_TouchResponse.DiscardUnknown(m)
3286}
3287
3288var xxx_messageInfo_TouchResponse proto.InternalMessageInfo
3289
3290func (m *TouchResponse) GetCost() *Cost {
3291 if m != nil {
3292 return m.Cost
3293 }
3294 return nil
3295}
3296
3297type DeleteRequest struct {
3298 Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
3299 Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
3300 Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
3301 Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
3302 Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
3303 MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
3304 Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
3305 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3306 XXX_unrecognized []byte `json:"-"`
3307 XXX_sizecache int32 `json:"-"`
3308}
3309
3310func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
3311func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
3312func (*DeleteRequest) ProtoMessage() {}
3313func (*DeleteRequest) Descriptor() ([]byte, []int) {
3314 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27}
3315}
3316func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
3317 return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
3318}
3319func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3320 return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
3321}
3322func (dst *DeleteRequest) XXX_Merge(src proto.Message) {
3323 xxx_messageInfo_DeleteRequest.Merge(dst, src)
3324}
3325func (m *DeleteRequest) XXX_Size() int {
3326 return xxx_messageInfo_DeleteRequest.Size(m)
3327}
3328func (m *DeleteRequest) XXX_DiscardUnknown() {
3329 xxx_messageInfo_DeleteRequest.DiscardUnknown(m)
3330}
3331
3332var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
3333
3334const Default_DeleteRequest_Trusted bool = false
3335const Default_DeleteRequest_Force bool = false
3336const Default_DeleteRequest_MarkChanges bool = false
3337
3338func (m *DeleteRequest) GetHeader() *InternalHeader {
3339 if m != nil {
3340 return m.Header
3341 }
3342 return nil
3343}
3344
3345func (m *DeleteRequest) GetKey() []*Reference {
3346 if m != nil {
3347 return m.Key
3348 }
3349 return nil
3350}
3351
3352func (m *DeleteRequest) GetTransaction() *Transaction {
3353 if m != nil {
3354 return m.Transaction
3355 }
3356 return nil
3357}
3358
3359func (m *DeleteRequest) GetTrusted() bool {
3360 if m != nil && m.Trusted != nil {
3361 return *m.Trusted
3362 }
3363 return Default_DeleteRequest_Trusted
3364}
3365
3366func (m *DeleteRequest) GetForce() bool {
3367 if m != nil && m.Force != nil {
3368 return *m.Force
3369 }
3370 return Default_DeleteRequest_Force
3371}
3372
3373func (m *DeleteRequest) GetMarkChanges() bool {
3374 if m != nil && m.MarkChanges != nil {
3375 return *m.MarkChanges
3376 }
3377 return Default_DeleteRequest_MarkChanges
3378}
3379
3380func (m *DeleteRequest) GetSnapshot() []*Snapshot {
3381 if m != nil {
3382 return m.Snapshot
3383 }
3384 return nil
3385}
3386
3387type DeleteResponse struct {
3388 Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
3389 Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
3390 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3391 XXX_unrecognized []byte `json:"-"`
3392 XXX_sizecache int32 `json:"-"`
3393}
3394
3395func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
3396func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
3397func (*DeleteResponse) ProtoMessage() {}
3398func (*DeleteResponse) Descriptor() ([]byte, []int) {
3399 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28}
3400}
3401func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
3402 return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
3403}
3404func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3405 return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
3406}
3407func (dst *DeleteResponse) XXX_Merge(src proto.Message) {
3408 xxx_messageInfo_DeleteResponse.Merge(dst, src)
3409}
3410func (m *DeleteResponse) XXX_Size() int {
3411 return xxx_messageInfo_DeleteResponse.Size(m)
3412}
3413func (m *DeleteResponse) XXX_DiscardUnknown() {
3414 xxx_messageInfo_DeleteResponse.DiscardUnknown(m)
3415}
3416
3417var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo
3418
3419func (m *DeleteResponse) GetCost() *Cost {
3420 if m != nil {
3421 return m.Cost
3422 }
3423 return nil
3424}
3425
3426func (m *DeleteResponse) GetVersion() []int64 {
3427 if m != nil {
3428 return m.Version
3429 }
3430 return nil
3431}
3432
3433type NextRequest struct {
3434 Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
3435 Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
3436 Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
3437 Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
3438 Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
3439 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3440 XXX_unrecognized []byte `json:"-"`
3441 XXX_sizecache int32 `json:"-"`
3442}
3443
3444func (m *NextRequest) Reset() { *m = NextRequest{} }
3445func (m *NextRequest) String() string { return proto.CompactTextString(m) }
3446func (*NextRequest) ProtoMessage() {}
3447func (*NextRequest) Descriptor() ([]byte, []int) {
3448 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29}
3449}
3450func (m *NextRequest) XXX_Unmarshal(b []byte) error {
3451 return xxx_messageInfo_NextRequest.Unmarshal(m, b)
3452}
3453func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3454 return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic)
3455}
3456func (dst *NextRequest) XXX_Merge(src proto.Message) {
3457 xxx_messageInfo_NextRequest.Merge(dst, src)
3458}
3459func (m *NextRequest) XXX_Size() int {
3460 return xxx_messageInfo_NextRequest.Size(m)
3461}
3462func (m *NextRequest) XXX_DiscardUnknown() {
3463 xxx_messageInfo_NextRequest.DiscardUnknown(m)
3464}
3465
3466var xxx_messageInfo_NextRequest proto.InternalMessageInfo
3467
3468const Default_NextRequest_Offset int32 = 0
3469const Default_NextRequest_Compile bool = false
3470
3471func (m *NextRequest) GetHeader() *InternalHeader {
3472 if m != nil {
3473 return m.Header
3474 }
3475 return nil
3476}
3477
3478func (m *NextRequest) GetCursor() *Cursor {
3479 if m != nil {
3480 return m.Cursor
3481 }
3482 return nil
3483}
3484
3485func (m *NextRequest) GetCount() int32 {
3486 if m != nil && m.Count != nil {
3487 return *m.Count
3488 }
3489 return 0
3490}
3491
3492func (m *NextRequest) GetOffset() int32 {
3493 if m != nil && m.Offset != nil {
3494 return *m.Offset
3495 }
3496 return Default_NextRequest_Offset
3497}
3498
3499func (m *NextRequest) GetCompile() bool {
3500 if m != nil && m.Compile != nil {
3501 return *m.Compile
3502 }
3503 return Default_NextRequest_Compile
3504}
3505
3506type QueryResult struct {
3507 Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
3508 Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
3509 SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"`
3510 MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"`
3511 KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
3512 IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"`
3513 SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"`
3514 CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"`
3515 CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
3516 Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
3517 Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
3518 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3519 XXX_unrecognized []byte `json:"-"`
3520 XXX_sizecache int32 `json:"-"`
3521}
3522
3523func (m *QueryResult) Reset() { *m = QueryResult{} }
3524func (m *QueryResult) String() string { return proto.CompactTextString(m) }
3525func (*QueryResult) ProtoMessage() {}
3526func (*QueryResult) Descriptor() ([]byte, []int) {
3527 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30}
3528}
3529func (m *QueryResult) XXX_Unmarshal(b []byte) error {
3530 return xxx_messageInfo_QueryResult.Unmarshal(m, b)
3531}
3532func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3533 return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic)
3534}
3535func (dst *QueryResult) XXX_Merge(src proto.Message) {
3536 xxx_messageInfo_QueryResult.Merge(dst, src)
3537}
3538func (m *QueryResult) XXX_Size() int {
3539 return xxx_messageInfo_QueryResult.Size(m)
3540}
3541func (m *QueryResult) XXX_DiscardUnknown() {
3542 xxx_messageInfo_QueryResult.DiscardUnknown(m)
3543}
3544
3545var xxx_messageInfo_QueryResult proto.InternalMessageInfo
3546
3547func (m *QueryResult) GetCursor() *Cursor {
3548 if m != nil {
3549 return m.Cursor
3550 }
3551 return nil
3552}
3553
3554func (m *QueryResult) GetResult() []*EntityProto {
3555 if m != nil {
3556 return m.Result
3557 }
3558 return nil
3559}
3560
3561func (m *QueryResult) GetSkippedResults() int32 {
3562 if m != nil && m.SkippedResults != nil {
3563 return *m.SkippedResults
3564 }
3565 return 0
3566}
3567
3568func (m *QueryResult) GetMoreResults() bool {
3569 if m != nil && m.MoreResults != nil {
3570 return *m.MoreResults
3571 }
3572 return false
3573}
3574
3575func (m *QueryResult) GetKeysOnly() bool {
3576 if m != nil && m.KeysOnly != nil {
3577 return *m.KeysOnly
3578 }
3579 return false
3580}
3581
3582func (m *QueryResult) GetIndexOnly() bool {
3583 if m != nil && m.IndexOnly != nil {
3584 return *m.IndexOnly
3585 }
3586 return false
3587}
3588
3589func (m *QueryResult) GetSmallOps() bool {
3590 if m != nil && m.SmallOps != nil {
3591 return *m.SmallOps
3592 }
3593 return false
3594}
3595
3596func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
3597 if m != nil {
3598 return m.CompiledQuery
3599 }
3600 return nil
3601}
3602
3603func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
3604 if m != nil {
3605 return m.CompiledCursor
3606 }
3607 return nil
3608}
3609
3610func (m *QueryResult) GetIndex() []*CompositeIndex {
3611 if m != nil {
3612 return m.Index
3613 }
3614 return nil
3615}
3616
3617func (m *QueryResult) GetVersion() []int64 {
3618 if m != nil {
3619 return m.Version
3620 }
3621 return nil
3622}
3623
3624type AllocateIdsRequest struct {
3625 Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
3626 ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"`
3627 Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
3628 Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
3629 Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
3630 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3631 XXX_unrecognized []byte `json:"-"`
3632 XXX_sizecache int32 `json:"-"`
3633}
3634
3635func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
3636func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
3637func (*AllocateIdsRequest) ProtoMessage() {}
3638func (*AllocateIdsRequest) Descriptor() ([]byte, []int) {
3639 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31}
3640}
3641func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error {
3642 return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b)
3643}
3644func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3645 return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic)
3646}
3647func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) {
3648 xxx_messageInfo_AllocateIdsRequest.Merge(dst, src)
3649}
3650func (m *AllocateIdsRequest) XXX_Size() int {
3651 return xxx_messageInfo_AllocateIdsRequest.Size(m)
3652}
3653func (m *AllocateIdsRequest) XXX_DiscardUnknown() {
3654 xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m)
3655}
3656
3657var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo
3658
3659func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
3660 if m != nil {
3661 return m.Header
3662 }
3663 return nil
3664}
3665
3666func (m *AllocateIdsRequest) GetModelKey() *Reference {
3667 if m != nil {
3668 return m.ModelKey
3669 }
3670 return nil
3671}
3672
3673func (m *AllocateIdsRequest) GetSize() int64 {
3674 if m != nil && m.Size != nil {
3675 return *m.Size
3676 }
3677 return 0
3678}
3679
3680func (m *AllocateIdsRequest) GetMax() int64 {
3681 if m != nil && m.Max != nil {
3682 return *m.Max
3683 }
3684 return 0
3685}
3686
3687func (m *AllocateIdsRequest) GetReserve() []*Reference {
3688 if m != nil {
3689 return m.Reserve
3690 }
3691 return nil
3692}
3693
3694type AllocateIdsResponse struct {
3695 Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
3696 End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
3697 Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
3698 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3699 XXX_unrecognized []byte `json:"-"`
3700 XXX_sizecache int32 `json:"-"`
3701}
3702
3703func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
3704func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
3705func (*AllocateIdsResponse) ProtoMessage() {}
3706func (*AllocateIdsResponse) Descriptor() ([]byte, []int) {
3707 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32}
3708}
3709func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error {
3710 return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b)
3711}
3712func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3713 return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic)
3714}
3715func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) {
3716 xxx_messageInfo_AllocateIdsResponse.Merge(dst, src)
3717}
3718func (m *AllocateIdsResponse) XXX_Size() int {
3719 return xxx_messageInfo_AllocateIdsResponse.Size(m)
3720}
3721func (m *AllocateIdsResponse) XXX_DiscardUnknown() {
3722 xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m)
3723}
3724
3725var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo
3726
3727func (m *AllocateIdsResponse) GetStart() int64 {
3728 if m != nil && m.Start != nil {
3729 return *m.Start
3730 }
3731 return 0
3732}
3733
3734func (m *AllocateIdsResponse) GetEnd() int64 {
3735 if m != nil && m.End != nil {
3736 return *m.End
3737 }
3738 return 0
3739}
3740
3741func (m *AllocateIdsResponse) GetCost() *Cost {
3742 if m != nil {
3743 return m.Cost
3744 }
3745 return nil
3746}
3747
3748type CompositeIndices struct {
3749 Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
3750 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3751 XXX_unrecognized []byte `json:"-"`
3752 XXX_sizecache int32 `json:"-"`
3753}
3754
3755func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
3756func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
3757func (*CompositeIndices) ProtoMessage() {}
3758func (*CompositeIndices) Descriptor() ([]byte, []int) {
3759 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33}
3760}
3761func (m *CompositeIndices) XXX_Unmarshal(b []byte) error {
3762 return xxx_messageInfo_CompositeIndices.Unmarshal(m, b)
3763}
3764func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3765 return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic)
3766}
3767func (dst *CompositeIndices) XXX_Merge(src proto.Message) {
3768 xxx_messageInfo_CompositeIndices.Merge(dst, src)
3769}
3770func (m *CompositeIndices) XXX_Size() int {
3771 return xxx_messageInfo_CompositeIndices.Size(m)
3772}
3773func (m *CompositeIndices) XXX_DiscardUnknown() {
3774 xxx_messageInfo_CompositeIndices.DiscardUnknown(m)
3775}
3776
3777var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo
3778
3779func (m *CompositeIndices) GetIndex() []*CompositeIndex {
3780 if m != nil {
3781 return m.Index
3782 }
3783 return nil
3784}
3785
3786type AddActionsRequest struct {
3787 Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
3788 Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
3789 Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
3790 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3791 XXX_unrecognized []byte `json:"-"`
3792 XXX_sizecache int32 `json:"-"`
3793}
3794
3795func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
3796func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
3797func (*AddActionsRequest) ProtoMessage() {}
3798func (*AddActionsRequest) Descriptor() ([]byte, []int) {
3799 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34}
3800}
3801func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error {
3802 return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b)
3803}
3804func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3805 return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic)
3806}
3807func (dst *AddActionsRequest) XXX_Merge(src proto.Message) {
3808 xxx_messageInfo_AddActionsRequest.Merge(dst, src)
3809}
3810func (m *AddActionsRequest) XXX_Size() int {
3811 return xxx_messageInfo_AddActionsRequest.Size(m)
3812}
3813func (m *AddActionsRequest) XXX_DiscardUnknown() {
3814 xxx_messageInfo_AddActionsRequest.DiscardUnknown(m)
3815}
3816
3817var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo
3818
3819func (m *AddActionsRequest) GetHeader() *InternalHeader {
3820 if m != nil {
3821 return m.Header
3822 }
3823 return nil
3824}
3825
3826func (m *AddActionsRequest) GetTransaction() *Transaction {
3827 if m != nil {
3828 return m.Transaction
3829 }
3830 return nil
3831}
3832
3833func (m *AddActionsRequest) GetAction() []*Action {
3834 if m != nil {
3835 return m.Action
3836 }
3837 return nil
3838}
3839
3840type AddActionsResponse struct {
3841 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3842 XXX_unrecognized []byte `json:"-"`
3843 XXX_sizecache int32 `json:"-"`
3844}
3845
3846func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
3847func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
3848func (*AddActionsResponse) ProtoMessage() {}
3849func (*AddActionsResponse) Descriptor() ([]byte, []int) {
3850 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35}
3851}
3852func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error {
3853 return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b)
3854}
3855func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3856 return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic)
3857}
3858func (dst *AddActionsResponse) XXX_Merge(src proto.Message) {
3859 xxx_messageInfo_AddActionsResponse.Merge(dst, src)
3860}
3861func (m *AddActionsResponse) XXX_Size() int {
3862 return xxx_messageInfo_AddActionsResponse.Size(m)
3863}
3864func (m *AddActionsResponse) XXX_DiscardUnknown() {
3865 xxx_messageInfo_AddActionsResponse.DiscardUnknown(m)
3866}
3867
3868var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo
3869
3870type BeginTransactionRequest struct {
3871 Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
3872 App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
3873 AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"`
3874 DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"`
3875 Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"`
3876 PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"`
3877 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3878 XXX_unrecognized []byte `json:"-"`
3879 XXX_sizecache int32 `json:"-"`
3880}
3881
3882func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
3883func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
3884func (*BeginTransactionRequest) ProtoMessage() {}
3885func (*BeginTransactionRequest) Descriptor() ([]byte, []int) {
3886 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36}
3887}
3888func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error {
3889 return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b)
3890}
3891func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3892 return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic)
3893}
3894func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) {
3895 xxx_messageInfo_BeginTransactionRequest.Merge(dst, src)
3896}
3897func (m *BeginTransactionRequest) XXX_Size() int {
3898 return xxx_messageInfo_BeginTransactionRequest.Size(m)
3899}
3900func (m *BeginTransactionRequest) XXX_DiscardUnknown() {
3901 xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m)
3902}
3903
3904var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo
3905
3906const Default_BeginTransactionRequest_AllowMultipleEg bool = false
3907const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN
3908
3909func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
3910 if m != nil {
3911 return m.Header
3912 }
3913 return nil
3914}
3915
3916func (m *BeginTransactionRequest) GetApp() string {
3917 if m != nil && m.App != nil {
3918 return *m.App
3919 }
3920 return ""
3921}
3922
3923func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
3924 if m != nil && m.AllowMultipleEg != nil {
3925 return *m.AllowMultipleEg
3926 }
3927 return Default_BeginTransactionRequest_AllowMultipleEg
3928}
3929
3930func (m *BeginTransactionRequest) GetDatabaseId() string {
3931 if m != nil && m.DatabaseId != nil {
3932 return *m.DatabaseId
3933 }
3934 return ""
3935}
3936
3937func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode {
3938 if m != nil && m.Mode != nil {
3939 return *m.Mode
3940 }
3941 return Default_BeginTransactionRequest_Mode
3942}
3943
3944func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction {
3945 if m != nil {
3946 return m.PreviousTransaction
3947 }
3948 return nil
3949}
3950
3951type CommitResponse struct {
3952 Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
3953 Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"`
3954 XXX_NoUnkeyedLiteral struct{} `json:"-"`
3955 XXX_unrecognized []byte `json:"-"`
3956 XXX_sizecache int32 `json:"-"`
3957}
3958
3959func (m *CommitResponse) Reset() { *m = CommitResponse{} }
3960func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
3961func (*CommitResponse) ProtoMessage() {}
3962func (*CommitResponse) Descriptor() ([]byte, []int) {
3963 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37}
3964}
3965func (m *CommitResponse) XXX_Unmarshal(b []byte) error {
3966 return xxx_messageInfo_CommitResponse.Unmarshal(m, b)
3967}
3968func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
3969 return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic)
3970}
3971func (dst *CommitResponse) XXX_Merge(src proto.Message) {
3972 xxx_messageInfo_CommitResponse.Merge(dst, src)
3973}
3974func (m *CommitResponse) XXX_Size() int {
3975 return xxx_messageInfo_CommitResponse.Size(m)
3976}
3977func (m *CommitResponse) XXX_DiscardUnknown() {
3978 xxx_messageInfo_CommitResponse.DiscardUnknown(m)
3979}
3980
3981var xxx_messageInfo_CommitResponse proto.InternalMessageInfo
3982
3983func (m *CommitResponse) GetCost() *Cost {
3984 if m != nil {
3985 return m.Cost
3986 }
3987 return nil
3988}
3989
3990func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
3991 if m != nil {
3992 return m.Version
3993 }
3994 return nil
3995}
3996
3997type CommitResponse_Version struct {
3998 RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"`
3999 Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
4000 XXX_NoUnkeyedLiteral struct{} `json:"-"`
4001 XXX_unrecognized []byte `json:"-"`
4002 XXX_sizecache int32 `json:"-"`
4003}
4004
4005func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
4006func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
4007func (*CommitResponse_Version) ProtoMessage() {}
4008func (*CommitResponse_Version) Descriptor() ([]byte, []int) {
4009 return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0}
4010}
4011func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error {
4012 return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b)
4013}
4014func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
4015 return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic)
4016}
4017func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) {
4018 xxx_messageInfo_CommitResponse_Version.Merge(dst, src)
4019}
4020func (m *CommitResponse_Version) XXX_Size() int {
4021 return xxx_messageInfo_CommitResponse_Version.Size(m)
4022}
4023func (m *CommitResponse_Version) XXX_DiscardUnknown() {
4024 xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m)
4025}
4026
4027var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo
4028
4029func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
4030 if m != nil {
4031 return m.RootEntityKey
4032 }
4033 return nil
4034}
4035
4036func (m *CommitResponse_Version) GetVersion() int64 {
4037 if m != nil && m.Version != nil {
4038 return *m.Version
4039 }
4040 return 0
4041}
4042
4043func init() {
4044 proto.RegisterType((*Action)(nil), "appengine.Action")
4045 proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue")
4046 proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue")
4047 proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue")
4048 proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue")
4049 proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement")
4050 proto.RegisterType((*Property)(nil), "appengine.Property")
4051 proto.RegisterType((*Path)(nil), "appengine.Path")
4052 proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element")
4053 proto.RegisterType((*Reference)(nil), "appengine.Reference")
4054 proto.RegisterType((*User)(nil), "appengine.User")
4055 proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto")
4056 proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty")
4057 proto.RegisterType((*Index)(nil), "appengine.Index")
4058 proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property")
4059 proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex")
4060 proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix")
4061 proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue")
4062 proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition")
4063 proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot")
4064 proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader")
4065 proto.RegisterType((*Transaction)(nil), "appengine.Transaction")
4066 proto.RegisterType((*Query)(nil), "appengine.Query")
4067 proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter")
4068 proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order")
4069 proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery")
4070 proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan")
4071 proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan")
4072 proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter")
4073 proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor")
4074 proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position")
4075 proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue")
4076 proto.RegisterType((*Cursor)(nil), "appengine.Cursor")
4077 proto.RegisterType((*Error)(nil), "appengine.Error")
4078 proto.RegisterType((*Cost)(nil), "appengine.Cost")
4079 proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost")
4080 proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest")
4081 proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse")
4082 proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity")
4083 proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest")
4084 proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse")
4085 proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest")
4086 proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse")
4087 proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest")
4088 proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse")
4089 proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest")
4090 proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult")
4091 proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest")
4092 proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse")
4093 proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices")
4094 proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest")
4095 proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse")
4096 proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest")
4097 proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse")
4098 proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version")
4099}
4100
4101func init() {
4102 proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179)
4103}
4104
4105var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{
4106 // 4156 bytes of a gzipped FileDescriptorProto
4107 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46,
4108 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d,
4109 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48,
4110 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46,
4111 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8,
4112 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24,
4113 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd,
4114 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f,
4115 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74,
4116 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac,
4117 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8,
4118 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9,
4119 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22,
4120 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56,
4121 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b,
4122 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05,
4123 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c,
4124 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2,
4125 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16,
4126 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3,
4127 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce,
4128 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67,
4129 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66,
4130 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13,
4131 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c,
4132 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6,
4133 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a,
4134 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f,
4135 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15,
4136 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a,
4137 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b,
4138 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17,
4139 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad,
4140 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50,
4141 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6,
4142 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b,
4143 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4,
4144 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2,
4145 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc,
4146 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e,
4147 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62,
4148 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee,
4149 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f,
4150 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4,
4151 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02,
4152 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae,
4153 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94,
4154 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f,
4155 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a,
4156 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52,
4157 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc,
4158 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92,
4159 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9,
4160 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50,
4161 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9,
4162 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23,
4163 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f,
4164 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87,
4165 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda,
4166 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b,
4167 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d,
4168 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f,
4169 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b,
4170 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9,
4171 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0,
4172 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68,
4173 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b,
4174 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79,
4175 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63,
4176 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0,
4177 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1,
4178 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10,
4179 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b,
4180 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b,
4181 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08,
4182 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72,
4183 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23,
4184 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91,
4185 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f,
4186 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93,
4187 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c,
4188 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa,
4189 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f,
4190 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef,
4191 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4,
4192 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90,
4193 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f,
4194 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86,
4195 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d,
4196 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee,
4197 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c,
4198 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1,
4199 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7,
4200 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80,
4201 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c,
4202 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21,
4203 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6,
4204 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26,
4205 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba,
4206 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c,
4207 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2,
4208 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8,
4209 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90,
4210 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91,
4211 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58,
4212 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c,
4213 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b,
4214 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f,
4215 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02,
4216 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22,
4217 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b,
4218 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0,
4219 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18,
4220 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8,
4221 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b,
4222 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e,
4223 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84,
4224 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0,
4225 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec,
4226 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7,
4227 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60,
4228 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad,
4229 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4,
4230 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76,
4231 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0,
4232 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba,
4233 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5,
4234 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26,
4235 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60,
4236 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e,
4237 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33,
4238 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e,
4239 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e,
4240 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7,
4241 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45,
4242 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92,
4243 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb,
4244 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc,
4245 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43,
4246 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2,
4247 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5,
4248 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40,
4249 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa,
4250 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc,
4251 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8,
4252 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7,
4253 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6,
4254 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c,
4255 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a,
4256 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e,
4257 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8,
4258 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a,
4259 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55,
4260 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0,
4261 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e,
4262 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78,
4263 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8,
4264 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1,
4265 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a,
4266 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60,
4267 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf,
4268 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c,
4269 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d,
4270 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb,
4271 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f,
4272 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe,
4273 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1,
4274 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80,
4275 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2,
4276 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f,
4277 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d,
4278 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90,
4279 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb,
4280 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe,
4281 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7,
4282 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31,
4283 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe,
4284 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd,
4285 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99,
4286 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41,
4287 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1,
4288 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81,
4289 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63,
4290 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3,
4291 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff,
4292 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f,
4293 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14,
4294 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2,
4295 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4,
4296 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1,
4297 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b,
4298 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9,
4299 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18,
4300 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e,
4301 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9,
4302 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95,
4303 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30,
4304 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c,
4305 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73,
4306 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79,
4307 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59,
4308 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95,
4309 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4,
4310 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74,
4311 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49,
4312 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e,
4313 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42,
4314 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c,
4315 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b,
4316 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca,
4317 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c,
4318 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69,
4319 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a,
4320 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65,
4321 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96,
4322 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4,
4323 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1,
4324 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85,
4325 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf,
4326 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55,
4327 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58,
4328 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6,
4329 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e,
4330 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb,
4331 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd,
4332 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20,
4333 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63,
4334 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b,
4335 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27,
4336 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61,
4337 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44,
4338 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd,
4339 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91,
4340 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3,
4341 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0,
4342 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4,
4343 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74,
4344 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41,
4345 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02,
4346 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1,
4347 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06,
4348 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe,
4349 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59,
4350 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde,
4351 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89,
4352 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0,
4353 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb,
4354 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62,
4355 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5,
4356 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90,
4357 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02,
4358 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06,
4359 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91,
4360 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41,
4361 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37,
4362 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3,
4363 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71,
4364 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd,
4365 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01,
4366 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00,
4367}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100644
index 0000000..497b4d9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,551 @@
1syntax = "proto2";
2option go_package = "datastore";
3
4package appengine;
5
6message Action{}
7
8message PropertyValue {
9 optional int64 int64Value = 1;
10 optional bool booleanValue = 2;
11 optional string stringValue = 3;
12 optional double doubleValue = 4;
13
14 optional group PointValue = 5 {
15 required double x = 6;
16 required double y = 7;
17 }
18
19 optional group UserValue = 8 {
20 required string email = 9;
21 required string auth_domain = 10;
22 optional string nickname = 11;
23 optional string federated_identity = 21;
24 optional string federated_provider = 22;
25 }
26
27 optional group ReferenceValue = 12 {
28 required string app = 13;
29 optional string name_space = 20;
30 repeated group PathElement = 14 {
31 required string type = 15;
32 optional int64 id = 16;
33 optional string name = 17;
34 }
35 }
36}
37
38message Property {
39 enum Meaning {
40 NO_MEANING = 0;
41 BLOB = 14;
42 TEXT = 15;
43 BYTESTRING = 16;
44
45 ATOM_CATEGORY = 1;
46 ATOM_LINK = 2;
47 ATOM_TITLE = 3;
48 ATOM_CONTENT = 4;
49 ATOM_SUMMARY = 5;
50 ATOM_AUTHOR = 6;
51
52 GD_WHEN = 7;
53 GD_EMAIL = 8;
54 GEORSS_POINT = 9;
55 GD_IM = 10;
56
57 GD_PHONENUMBER = 11;
58 GD_POSTALADDRESS = 12;
59
60 GD_RATING = 13;
61
62 BLOBKEY = 17;
63 ENTITY_PROTO = 19;
64
65 INDEX_VALUE = 18;
66 };
67
68 optional Meaning meaning = 1 [default = NO_MEANING];
69 optional string meaning_uri = 2;
70
71 required string name = 3;
72
73 required PropertyValue value = 5;
74
75 required bool multiple = 4;
76
77 optional bool searchable = 6 [default=false];
78
79 enum FtsTokenizationOption {
80 HTML = 1;
81 ATOM = 2;
82 }
83
84 optional FtsTokenizationOption fts_tokenization_option = 8;
85
86 optional string locale = 9 [default = "en"];
87}
88
89message Path {
90 repeated group Element = 1 {
91 required string type = 2;
92 optional int64 id = 3;
93 optional string name = 4;
94 }
95}
96
97message Reference {
98 required string app = 13;
99 optional string name_space = 20;
100 required Path path = 14;
101}
102
103message User {
104 required string email = 1;
105 required string auth_domain = 2;
106 optional string nickname = 3;
107 optional string federated_identity = 6;
108 optional string federated_provider = 7;
109}
110
111message EntityProto {
112 required Reference key = 13;
113 required Path entity_group = 16;
114 optional User owner = 17;
115
116 enum Kind {
117 GD_CONTACT = 1;
118 GD_EVENT = 2;
119 GD_MESSAGE = 3;
120 }
121 optional Kind kind = 4;
122 optional string kind_uri = 5;
123
124 repeated Property property = 14;
125 repeated Property raw_property = 15;
126
127 optional int32 rank = 18;
128}
129
130message CompositeProperty {
131 required int64 index_id = 1;
132 repeated string value = 2;
133}
134
135message Index {
136 required string entity_type = 1;
137 required bool ancestor = 5;
138 repeated group Property = 2 {
139 required string name = 3;
140 enum Direction {
141 ASCENDING = 1;
142 DESCENDING = 2;
143 }
144 optional Direction direction = 4 [default = ASCENDING];
145 }
146}
147
148message CompositeIndex {
149 required string app_id = 1;
150 required int64 id = 2;
151 required Index definition = 3;
152
153 enum State {
154 WRITE_ONLY = 1;
155 READ_WRITE = 2;
156 DELETED = 3;
157 ERROR = 4;
158 }
159 required State state = 4;
160
161 optional bool only_use_if_required = 6 [default = false];
162}
163
164message IndexPostfix {
165 message IndexValue {
166 required string property_name = 1;
167 required PropertyValue value = 2;
168 }
169
170 repeated IndexValue index_value = 1;
171
172 optional Reference key = 2;
173
174 optional bool before = 3 [default=true];
175}
176
177message IndexPosition {
178 optional string key = 1;
179
180 optional bool before = 2 [default=true];
181}
182
183message Snapshot {
184 enum Status {
185 INACTIVE = 0;
186 ACTIVE = 1;
187 }
188
189 required int64 ts = 1;
190}
191
192message InternalHeader {
193 optional string qos = 1;
194}
195
196message Transaction {
197 optional InternalHeader header = 4;
198 required fixed64 handle = 1;
199 required string app = 2;
200 optional bool mark_changes = 3 [default = false];
201}
202
203message Query {
204 optional InternalHeader header = 39;
205
206 required string app = 1;
207 optional string name_space = 29;
208
209 optional string kind = 3;
210 optional Reference ancestor = 17;
211
212 repeated group Filter = 4 {
213 enum Operator {
214 LESS_THAN = 1;
215 LESS_THAN_OR_EQUAL = 2;
216 GREATER_THAN = 3;
217 GREATER_THAN_OR_EQUAL = 4;
218 EQUAL = 5;
219 IN = 6;
220 EXISTS = 7;
221 }
222
223 required Operator op = 6;
224 repeated Property property = 14;
225 }
226
227 optional string search_query = 8;
228
229 repeated group Order = 9 {
230 enum Direction {
231 ASCENDING = 1;
232 DESCENDING = 2;
233 }
234
235 required string property = 10;
236 optional Direction direction = 11 [default = ASCENDING];
237 }
238
239 enum Hint {
240 ORDER_FIRST = 1;
241 ANCESTOR_FIRST = 2;
242 FILTER_FIRST = 3;
243 }
244 optional Hint hint = 18;
245
246 optional int32 count = 23;
247
248 optional int32 offset = 12 [default = 0];
249
250 optional int32 limit = 16;
251
252 optional CompiledCursor compiled_cursor = 30;
253 optional CompiledCursor end_compiled_cursor = 31;
254
255 repeated CompositeIndex composite_index = 19;
256
257 optional bool require_perfect_plan = 20 [default = false];
258
259 optional bool keys_only = 21 [default = false];
260
261 optional Transaction transaction = 22;
262
263 optional bool compile = 25 [default = false];
264
265 optional int64 failover_ms = 26;
266
267 optional bool strong = 32;
268
269 repeated string property_name = 33;
270
271 repeated string group_by_property_name = 34;
272
273 optional bool distinct = 24;
274
275 optional int64 min_safe_time_seconds = 35;
276
277 repeated string safe_replica_name = 36;
278
279 optional bool persist_offset = 37 [default=false];
280}
281
282message CompiledQuery {
283 required group PrimaryScan = 1 {
284 optional string index_name = 2;
285
286 optional string start_key = 3;
287 optional bool start_inclusive = 4;
288 optional string end_key = 5;
289 optional bool end_inclusive = 6;
290
291 repeated string start_postfix_value = 22;
292 repeated string end_postfix_value = 23;
293
294 optional int64 end_unapplied_log_timestamp_us = 19;
295 }
296
297 repeated group MergeJoinScan = 7 {
298 required string index_name = 8;
299
300 repeated string prefix_value = 9;
301
302 optional bool value_prefix = 20 [default=false];
303 }
304
305 optional Index index_def = 21;
306
307 optional int32 offset = 10 [default = 0];
308
309 optional int32 limit = 11;
310
311 required bool keys_only = 12;
312
313 repeated string property_name = 24;
314
315 optional int32 distinct_infix_size = 25;
316
317 optional group EntityFilter = 13 {
318 optional bool distinct = 14 [default=false];
319
320 optional string kind = 17;
321 optional Reference ancestor = 18;
322 }
323}
324
325message CompiledCursor {
326 optional group Position = 2 {
327 optional string start_key = 27;
328
329 repeated group IndexValue = 29 {
330 optional string property = 30;
331 required PropertyValue value = 31;
332 }
333
334 optional Reference key = 32;
335
336 optional bool start_inclusive = 28 [default=true];
337 }
338}
339
340message Cursor {
341 required fixed64 cursor = 1;
342
343 optional string app = 2;
344}
345
346message Error {
347 enum ErrorCode {
348 BAD_REQUEST = 1;
349 CONCURRENT_TRANSACTION = 2;
350 INTERNAL_ERROR = 3;
351 NEED_INDEX = 4;
352 TIMEOUT = 5;
353 PERMISSION_DENIED = 6;
354 BIGTABLE_ERROR = 7;
355 COMMITTED_BUT_STILL_APPLYING = 8;
356 CAPABILITY_DISABLED = 9;
357 TRY_ALTERNATE_BACKEND = 10;
358 SAFE_TIME_TOO_OLD = 11;
359 }
360}
361
362message Cost {
363 optional int32 index_writes = 1;
364 optional int32 index_write_bytes = 2;
365 optional int32 entity_writes = 3;
366 optional int32 entity_write_bytes = 4;
367 optional group CommitCost = 5 {
368 optional int32 requested_entity_puts = 6;
369 optional int32 requested_entity_deletes = 7;
370 };
371 optional int32 approximate_storage_delta = 8;
372 optional int32 id_sequence_updates = 9;
373}
374
375message GetRequest {
376 optional InternalHeader header = 6;
377
378 repeated Reference key = 1;
379 optional Transaction transaction = 2;
380
381 optional int64 failover_ms = 3;
382
383 optional bool strong = 4;
384
385 optional bool allow_deferred = 5 [default=false];
386}
387
388message GetResponse {
389 repeated group Entity = 1 {
390 optional EntityProto entity = 2;
391 optional Reference key = 4;
392
393 optional int64 version = 3;
394 }
395
396 repeated Reference deferred = 5;
397
398 optional bool in_order = 6 [default=true];
399}
400
401message PutRequest {
402 optional InternalHeader header = 11;
403
404 repeated EntityProto entity = 1;
405 optional Transaction transaction = 2;
406 repeated CompositeIndex composite_index = 3;
407
408 optional bool trusted = 4 [default = false];
409
410 optional bool force = 7 [default = false];
411
412 optional bool mark_changes = 8 [default = false];
413 repeated Snapshot snapshot = 9;
414
415 enum AutoIdPolicy {
416 CURRENT = 0;
417 SEQUENTIAL = 1;
418 }
419 optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
420}
421
422message PutResponse {
423 repeated Reference key = 1;
424 optional Cost cost = 2;
425 repeated int64 version = 3;
426}
427
428message TouchRequest {
429 optional InternalHeader header = 10;
430
431 repeated Reference key = 1;
432 repeated CompositeIndex composite_index = 2;
433 optional bool force = 3 [default = false];
434 repeated Snapshot snapshot = 9;
435}
436
437message TouchResponse {
438 optional Cost cost = 1;
439}
440
441message DeleteRequest {
442 optional InternalHeader header = 10;
443
444 repeated Reference key = 6;
445 optional Transaction transaction = 5;
446
447 optional bool trusted = 4 [default = false];
448
449 optional bool force = 7 [default = false];
450
451 optional bool mark_changes = 8 [default = false];
452 repeated Snapshot snapshot = 9;
453}
454
455message DeleteResponse {
456 optional Cost cost = 1;
457 repeated int64 version = 3;
458}
459
460message NextRequest {
461 optional InternalHeader header = 5;
462
463 required Cursor cursor = 1;
464 optional int32 count = 2;
465
466 optional int32 offset = 4 [default = 0];
467
468 optional bool compile = 3 [default = false];
469}
470
471message QueryResult {
472 optional Cursor cursor = 1;
473
474 repeated EntityProto result = 2;
475
476 optional int32 skipped_results = 7;
477
478 required bool more_results = 3;
479
480 optional bool keys_only = 4;
481
482 optional bool index_only = 9;
483
484 optional bool small_ops = 10;
485
486 optional CompiledQuery compiled_query = 5;
487
488 optional CompiledCursor compiled_cursor = 6;
489
490 repeated CompositeIndex index = 8;
491
492 repeated int64 version = 11;
493}
494
495message AllocateIdsRequest {
496 optional InternalHeader header = 4;
497
498 optional Reference model_key = 1;
499
500 optional int64 size = 2;
501
502 optional int64 max = 3;
503
504 repeated Reference reserve = 5;
505}
506
507message AllocateIdsResponse {
508 required int64 start = 1;
509 required int64 end = 2;
510 optional Cost cost = 3;
511}
512
513message CompositeIndices {
514 repeated CompositeIndex index = 1;
515}
516
517message AddActionsRequest {
518 optional InternalHeader header = 3;
519
520 required Transaction transaction = 1;
521 repeated Action action = 2;
522}
523
524message AddActionsResponse {
525}
526
527message BeginTransactionRequest {
528 optional InternalHeader header = 3;
529
530 required string app = 1;
531 optional bool allow_multiple_eg = 2 [default = false];
532 optional string database_id = 4;
533
534 enum TransactionMode {
535 UNKNOWN = 0;
536 READ_ONLY = 1;
537 READ_WRITE = 2;
538 }
539 optional TransactionMode mode = 5 [default = UNKNOWN];
540
541 optional Transaction previous_transaction = 7;
542}
543
544message CommitResponse {
545 optional Cost cost = 1;
546
547 repeated group Version = 3 {
548 required Reference root_entity_key = 4;
549 required int64 version = 5;
550 }
551}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 0000000..9b4134e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,55 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package internal
6
7import (
8 "os"
9
10 netcontext "golang.org/x/net/context"
11)
12
13var (
14 // This is set to true in identity_classic.go, which is behind the appengine build tag.
15 // The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not
16 // the second generation runtimes (>= Go 1.11), so this indicates whether we're on a
17 // first-gen runtime. See IsStandard below for the second-gen check.
18 appengineStandard bool
19
20 // This is set to true in identity_flex.go, which is behind the appenginevm build tag.
21 appengineFlex bool
22)
23
24// AppID is the implementation of the wrapper function of the same name in
25// ../identity.go. See that file for commentary.
26func AppID(c netcontext.Context) string {
27 return appID(FullyQualifiedAppID(c))
28}
29
30// IsStandard is the implementation of the wrapper function of the same name in
31// ../appengine.go. See that file for commentary.
32func IsStandard() bool {
33 // appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not
34 // second-gen (>= Go 1.11).
35 return appengineStandard || IsSecondGen()
36}
37
38// IsStandard is the implementation of the wrapper function of the same name in
39// ../appengine.go. See that file for commentary.
40func IsSecondGen() bool {
41 // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
42 return os.Getenv("GAE_ENV") == "standard"
43}
44
45// IsFlex is the implementation of the wrapper function of the same name in
46// ../appengine.go. See that file for commentary.
47func IsFlex() bool {
48 return appengineFlex
49}
50
51// IsAppEngine is the implementation of the wrapper function of the same name in
52// ../appengine.go. See that file for commentary.
53func IsAppEngine() bool {
54 return IsStandard() || IsFlex()
55}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
new file mode 100644
index 0000000..4e979f4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -0,0 +1,61 @@
1// Copyright 2015 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// +build appengine
6
7package internal
8
9import (
10 "appengine"
11
12 netcontext "golang.org/x/net/context"
13)
14
15func init() {
16 appengineStandard = true
17}
18
19func DefaultVersionHostname(ctx netcontext.Context) string {
20 c := fromContext(ctx)
21 if c == nil {
22 panic(errNotAppEngineContext)
23 }
24 return appengine.DefaultVersionHostname(c)
25}
26
27func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
28func ServerSoftware() string { return appengine.ServerSoftware() }
29func InstanceID() string { return appengine.InstanceID() }
30func IsDevAppServer() bool { return appengine.IsDevAppServer() }
31
32func RequestID(ctx netcontext.Context) string {
33 c := fromContext(ctx)
34 if c == nil {
35 panic(errNotAppEngineContext)
36 }
37 return appengine.RequestID(c)
38}
39
40func ModuleName(ctx netcontext.Context) string {
41 c := fromContext(ctx)
42 if c == nil {
43 panic(errNotAppEngineContext)
44 }
45 return appengine.ModuleName(c)
46}
47func VersionID(ctx netcontext.Context) string {
48 c := fromContext(ctx)
49 if c == nil {
50 panic(errNotAppEngineContext)
51 }
52 return appengine.VersionID(c)
53}
54
55func fullyQualifiedAppID(ctx netcontext.Context) string {
56 c := fromContext(ctx)
57 if c == nil {
58 panic(errNotAppEngineContext)
59 }
60 return c.FullyQualifiedAppID()
61}
diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go
new file mode 100644
index 0000000..d5e2e7b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_flex.go
@@ -0,0 +1,11 @@
1// Copyright 2018 Google LLC. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// +build appenginevm
6
7package internal
8
9func init() {
10 appengineFlex = true
11}
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 0000000..5d80672
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,134 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// +build !appengine
6
7package internal
8
9import (
10 "log"
11 "net/http"
12 "os"
13 "strings"
14
15 netcontext "golang.org/x/net/context"
16)
17
18// These functions are implementations of the wrapper functions
19// in ../appengine/identity.go. See that file for commentary.
20
21const (
22 hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
23 hRequestLogId = "X-AppEngine-Request-Log-Id"
24 hDatacenter = "X-AppEngine-Datacenter"
25)
26
27func ctxHeaders(ctx netcontext.Context) http.Header {
28 c := fromContext(ctx)
29 if c == nil {
30 return nil
31 }
32 return c.Request().Header
33}
34
35func DefaultVersionHostname(ctx netcontext.Context) string {
36 return ctxHeaders(ctx).Get(hDefaultVersionHostname)
37}
38
39func RequestID(ctx netcontext.Context) string {
40 return ctxHeaders(ctx).Get(hRequestLogId)
41}
42
43func Datacenter(ctx netcontext.Context) string {
44 if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
45 return dc
46 }
47 // If the header isn't set, read zone from the metadata service.
48 // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE]
49 zone, err := getMetadata("instance/zone")
50 if err != nil {
51 log.Printf("Datacenter: %v", err)
52 return ""
53 }
54 parts := strings.Split(string(zone), "/")
55 if len(parts) == 0 {
56 return ""
57 }
58 return parts[len(parts)-1]
59}
60
61func ServerSoftware() string {
62 // TODO(dsymonds): Remove fallback when we've verified this.
63 if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
64 return s
65 }
66 if s := os.Getenv("GAE_ENV"); s != "" {
67 return s
68 }
69 return "Google App Engine/1.x.x"
70}
71
72// TODO(dsymonds): Remove the metadata fetches.
73
74func ModuleName(_ netcontext.Context) string {
75 if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
76 return s
77 }
78 if s := os.Getenv("GAE_SERVICE"); s != "" {
79 return s
80 }
81 return string(mustGetMetadata("instance/attributes/gae_backend_name"))
82}
83
84func VersionID(_ netcontext.Context) string {
85 if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
86 return s1 + "." + s2
87 }
88 if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" {
89 return s1 + "." + s2
90 }
91 return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
92}
93
94func InstanceID() string {
95 if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
96 return s
97 }
98 if s := os.Getenv("GAE_INSTANCE"); s != "" {
99 return s
100 }
101 return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
102}
103
104func partitionlessAppID() string {
105 // gae_project has everything except the partition prefix.
106 if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" {
107 return appID
108 }
109 if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" {
110 return project
111 }
112 return string(mustGetMetadata("instance/attributes/gae_project"))
113}
114
115func fullyQualifiedAppID(_ netcontext.Context) string {
116 if s := os.Getenv("GAE_APPLICATION"); s != "" {
117 return s
118 }
119 appID := partitionlessAppID()
120
121 part := os.Getenv("GAE_PARTITION")
122 if part == "" {
123 part = string(mustGetMetadata("instance/attributes/gae_partition"))
124 }
125
126 if part != "" {
127 appID = part + "~" + appID
128 }
129 return appID
130}
131
132func IsDevAppServer() bool {
133 return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
134}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 0000000..051ea39
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,110 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// Package internal provides support for package appengine.
6//
7// Programs should not use this package directly. Its API is not stable.
8// Use packages appengine and appengine/* instead.
9package internal
10
11import (
12 "fmt"
13
14 "github.com/golang/protobuf/proto"
15
16 remotepb "google.golang.org/appengine/internal/remote_api"
17)
18
19// errorCodeMaps is a map of service name to the error code map for the service.
20var errorCodeMaps = make(map[string]map[int32]string)
21
22// RegisterErrorCodeMap is called from API implementations to register their
23// error code map. This should only be called from init functions.
24func RegisterErrorCodeMap(service string, m map[int32]string) {
25 errorCodeMaps[service] = m
26}
27
28type timeoutCodeKey struct {
29 service string
30 code int32
31}
32
33// timeoutCodes is the set of service+code pairs that represent timeouts.
34var timeoutCodes = make(map[timeoutCodeKey]bool)
35
36func RegisterTimeoutErrorCode(service string, code int32) {
37 timeoutCodes[timeoutCodeKey{service, code}] = true
38}
39
40// APIError is the type returned by appengine.Context's Call method
41// when an API call fails in an API-specific way. This may be, for instance,
42// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
43type APIError struct {
44 Service string
45 Detail string
46 Code int32 // API-specific error code
47}
48
49func (e *APIError) Error() string {
50 if e.Code == 0 {
51 if e.Detail == "" {
52 return "APIError <empty>"
53 }
54 return e.Detail
55 }
56 s := fmt.Sprintf("API error %d", e.Code)
57 if m, ok := errorCodeMaps[e.Service]; ok {
58 s += " (" + e.Service + ": " + m[e.Code] + ")"
59 } else {
60 // Shouldn't happen, but provide a bit more detail if it does.
61 s = e.Service + " " + s
62 }
63 if e.Detail != "" {
64 s += ": " + e.Detail
65 }
66 return s
67}
68
69func (e *APIError) IsTimeout() bool {
70 return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
71}
72
73// CallError is the type returned by appengine.Context's Call method when an
74// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
75type CallError struct {
76 Detail string
77 Code int32
78 // TODO: Remove this if we get a distinguishable error code.
79 Timeout bool
80}
81
82func (e *CallError) Error() string {
83 var msg string
84 switch remotepb.RpcError_ErrorCode(e.Code) {
85 case remotepb.RpcError_UNKNOWN:
86 return e.Detail
87 case remotepb.RpcError_OVER_QUOTA:
88 msg = "Over quota"
89 case remotepb.RpcError_CAPABILITY_DISABLED:
90 msg = "Capability disabled"
91 case remotepb.RpcError_CANCELLED:
92 msg = "Canceled"
93 default:
94 msg = fmt.Sprintf("Call error %d", e.Code)
95 }
96 s := msg + ": " + e.Detail
97 if e.Timeout {
98 s += " (timeout)"
99 }
100 return s
101}
102
103func (e *CallError) IsTimeout() bool {
104 return e.Timeout
105}
106
107// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
108// The function should be prepared to be called on the same message more than once; it should only modify the
109// RPC request the first time.
110var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
new file mode 100644
index 0000000..8545ac4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
@@ -0,0 +1,1313 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google.golang.org/appengine/internal/log/log_service.proto
3
4package log
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21type LogServiceError_ErrorCode int32
22
23const (
24 LogServiceError_OK LogServiceError_ErrorCode = 0
25 LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
26 LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
27)
28
29var LogServiceError_ErrorCode_name = map[int32]string{
30 0: "OK",
31 1: "INVALID_REQUEST",
32 2: "STORAGE_ERROR",
33}
34var LogServiceError_ErrorCode_value = map[string]int32{
35 "OK": 0,
36 "INVALID_REQUEST": 1,
37 "STORAGE_ERROR": 2,
38}
39
40func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
41 p := new(LogServiceError_ErrorCode)
42 *p = x
43 return p
44}
45func (x LogServiceError_ErrorCode) String() string {
46 return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
47}
48func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
49 value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
50 if err != nil {
51 return err
52 }
53 *x = LogServiceError_ErrorCode(value)
54 return nil
55}
56func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
57 return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0}
58}
59
60type LogServiceError struct {
61 XXX_NoUnkeyedLiteral struct{} `json:"-"`
62 XXX_unrecognized []byte `json:"-"`
63 XXX_sizecache int32 `json:"-"`
64}
65
66func (m *LogServiceError) Reset() { *m = LogServiceError{} }
67func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
68func (*LogServiceError) ProtoMessage() {}
69func (*LogServiceError) Descriptor() ([]byte, []int) {
70 return fileDescriptor_log_service_f054fd4b5012319d, []int{0}
71}
72func (m *LogServiceError) XXX_Unmarshal(b []byte) error {
73 return xxx_messageInfo_LogServiceError.Unmarshal(m, b)
74}
75func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
76 return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic)
77}
78func (dst *LogServiceError) XXX_Merge(src proto.Message) {
79 xxx_messageInfo_LogServiceError.Merge(dst, src)
80}
81func (m *LogServiceError) XXX_Size() int {
82 return xxx_messageInfo_LogServiceError.Size(m)
83}
84func (m *LogServiceError) XXX_DiscardUnknown() {
85 xxx_messageInfo_LogServiceError.DiscardUnknown(m)
86}
87
88var xxx_messageInfo_LogServiceError proto.InternalMessageInfo
89
90type UserAppLogLine struct {
91 TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"`
92 Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
93 Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
94 XXX_NoUnkeyedLiteral struct{} `json:"-"`
95 XXX_unrecognized []byte `json:"-"`
96 XXX_sizecache int32 `json:"-"`
97}
98
99func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
100func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
101func (*UserAppLogLine) ProtoMessage() {}
102func (*UserAppLogLine) Descriptor() ([]byte, []int) {
103 return fileDescriptor_log_service_f054fd4b5012319d, []int{1}
104}
105func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error {
106 return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b)
107}
108func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
109 return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic)
110}
111func (dst *UserAppLogLine) XXX_Merge(src proto.Message) {
112 xxx_messageInfo_UserAppLogLine.Merge(dst, src)
113}
114func (m *UserAppLogLine) XXX_Size() int {
115 return xxx_messageInfo_UserAppLogLine.Size(m)
116}
117func (m *UserAppLogLine) XXX_DiscardUnknown() {
118 xxx_messageInfo_UserAppLogLine.DiscardUnknown(m)
119}
120
121var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo
122
123func (m *UserAppLogLine) GetTimestampUsec() int64 {
124 if m != nil && m.TimestampUsec != nil {
125 return *m.TimestampUsec
126 }
127 return 0
128}
129
130func (m *UserAppLogLine) GetLevel() int64 {
131 if m != nil && m.Level != nil {
132 return *m.Level
133 }
134 return 0
135}
136
137func (m *UserAppLogLine) GetMessage() string {
138 if m != nil && m.Message != nil {
139 return *m.Message
140 }
141 return ""
142}
143
144type UserAppLogGroup struct {
145 LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"`
146 XXX_NoUnkeyedLiteral struct{} `json:"-"`
147 XXX_unrecognized []byte `json:"-"`
148 XXX_sizecache int32 `json:"-"`
149}
150
151func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
152func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
153func (*UserAppLogGroup) ProtoMessage() {}
154func (*UserAppLogGroup) Descriptor() ([]byte, []int) {
155 return fileDescriptor_log_service_f054fd4b5012319d, []int{2}
156}
157func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error {
158 return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b)
159}
160func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
161 return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic)
162}
163func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) {
164 xxx_messageInfo_UserAppLogGroup.Merge(dst, src)
165}
166func (m *UserAppLogGroup) XXX_Size() int {
167 return xxx_messageInfo_UserAppLogGroup.Size(m)
168}
169func (m *UserAppLogGroup) XXX_DiscardUnknown() {
170 xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m)
171}
172
173var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo
174
175func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
176 if m != nil {
177 return m.LogLine
178 }
179 return nil
180}
181
182type FlushRequest struct {
183 Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
184 XXX_NoUnkeyedLiteral struct{} `json:"-"`
185 XXX_unrecognized []byte `json:"-"`
186 XXX_sizecache int32 `json:"-"`
187}
188
189func (m *FlushRequest) Reset() { *m = FlushRequest{} }
190func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
191func (*FlushRequest) ProtoMessage() {}
192func (*FlushRequest) Descriptor() ([]byte, []int) {
193 return fileDescriptor_log_service_f054fd4b5012319d, []int{3}
194}
195func (m *FlushRequest) XXX_Unmarshal(b []byte) error {
196 return xxx_messageInfo_FlushRequest.Unmarshal(m, b)
197}
198func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
199 return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic)
200}
201func (dst *FlushRequest) XXX_Merge(src proto.Message) {
202 xxx_messageInfo_FlushRequest.Merge(dst, src)
203}
204func (m *FlushRequest) XXX_Size() int {
205 return xxx_messageInfo_FlushRequest.Size(m)
206}
207func (m *FlushRequest) XXX_DiscardUnknown() {
208 xxx_messageInfo_FlushRequest.DiscardUnknown(m)
209}
210
211var xxx_messageInfo_FlushRequest proto.InternalMessageInfo
212
213func (m *FlushRequest) GetLogs() []byte {
214 if m != nil {
215 return m.Logs
216 }
217 return nil
218}
219
220type SetStatusRequest struct {
221 Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
222 XXX_NoUnkeyedLiteral struct{} `json:"-"`
223 XXX_unrecognized []byte `json:"-"`
224 XXX_sizecache int32 `json:"-"`
225}
226
227func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
228func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
229func (*SetStatusRequest) ProtoMessage() {}
230func (*SetStatusRequest) Descriptor() ([]byte, []int) {
231 return fileDescriptor_log_service_f054fd4b5012319d, []int{4}
232}
233func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error {
234 return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b)
235}
236func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
237 return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic)
238}
239func (dst *SetStatusRequest) XXX_Merge(src proto.Message) {
240 xxx_messageInfo_SetStatusRequest.Merge(dst, src)
241}
242func (m *SetStatusRequest) XXX_Size() int {
243 return xxx_messageInfo_SetStatusRequest.Size(m)
244}
245func (m *SetStatusRequest) XXX_DiscardUnknown() {
246 xxx_messageInfo_SetStatusRequest.DiscardUnknown(m)
247}
248
249var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo
250
251func (m *SetStatusRequest) GetStatus() string {
252 if m != nil && m.Status != nil {
253 return *m.Status
254 }
255 return ""
256}
257
258type LogOffset struct {
259 RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
260 XXX_NoUnkeyedLiteral struct{} `json:"-"`
261 XXX_unrecognized []byte `json:"-"`
262 XXX_sizecache int32 `json:"-"`
263}
264
265func (m *LogOffset) Reset() { *m = LogOffset{} }
266func (m *LogOffset) String() string { return proto.CompactTextString(m) }
267func (*LogOffset) ProtoMessage() {}
268func (*LogOffset) Descriptor() ([]byte, []int) {
269 return fileDescriptor_log_service_f054fd4b5012319d, []int{5}
270}
271func (m *LogOffset) XXX_Unmarshal(b []byte) error {
272 return xxx_messageInfo_LogOffset.Unmarshal(m, b)
273}
274func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
275 return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic)
276}
277func (dst *LogOffset) XXX_Merge(src proto.Message) {
278 xxx_messageInfo_LogOffset.Merge(dst, src)
279}
280func (m *LogOffset) XXX_Size() int {
281 return xxx_messageInfo_LogOffset.Size(m)
282}
283func (m *LogOffset) XXX_DiscardUnknown() {
284 xxx_messageInfo_LogOffset.DiscardUnknown(m)
285}
286
287var xxx_messageInfo_LogOffset proto.InternalMessageInfo
288
289func (m *LogOffset) GetRequestId() []byte {
290 if m != nil {
291 return m.RequestId
292 }
293 return nil
294}
295
296type LogLine struct {
297 Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
298 Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
299 LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"`
300 XXX_NoUnkeyedLiteral struct{} `json:"-"`
301 XXX_unrecognized []byte `json:"-"`
302 XXX_sizecache int32 `json:"-"`
303}
304
305func (m *LogLine) Reset() { *m = LogLine{} }
306func (m *LogLine) String() string { return proto.CompactTextString(m) }
307func (*LogLine) ProtoMessage() {}
308func (*LogLine) Descriptor() ([]byte, []int) {
309 return fileDescriptor_log_service_f054fd4b5012319d, []int{6}
310}
311func (m *LogLine) XXX_Unmarshal(b []byte) error {
312 return xxx_messageInfo_LogLine.Unmarshal(m, b)
313}
314func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
315 return xxx_messageInfo_LogLine.Marshal(b, m, deterministic)
316}
317func (dst *LogLine) XXX_Merge(src proto.Message) {
318 xxx_messageInfo_LogLine.Merge(dst, src)
319}
320func (m *LogLine) XXX_Size() int {
321 return xxx_messageInfo_LogLine.Size(m)
322}
323func (m *LogLine) XXX_DiscardUnknown() {
324 xxx_messageInfo_LogLine.DiscardUnknown(m)
325}
326
327var xxx_messageInfo_LogLine proto.InternalMessageInfo
328
329func (m *LogLine) GetTime() int64 {
330 if m != nil && m.Time != nil {
331 return *m.Time
332 }
333 return 0
334}
335
336func (m *LogLine) GetLevel() int32 {
337 if m != nil && m.Level != nil {
338 return *m.Level
339 }
340 return 0
341}
342
343func (m *LogLine) GetLogMessage() string {
344 if m != nil && m.LogMessage != nil {
345 return *m.LogMessage
346 }
347 return ""
348}
349
350type RequestLog struct {
351 AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
352 ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
353 VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"`
354 RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"`
355 Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
356 Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
357 Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
358 StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"`
359 EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"`
360 Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
361 Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
362 Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
363 Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
364 HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"`
365 Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
366 ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"`
367 Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
368 UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"`
369 UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"`
370 Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
371 ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"`
372 Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
373 Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
374 TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"`
375 TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"`
376 WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"`
377 PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"`
378 ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"`
379 Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
380 CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"`
381 Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
382 LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"`
383 AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"`
384 ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"`
385 WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"`
386 WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"`
387 ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"`
388 ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"`
389 XXX_NoUnkeyedLiteral struct{} `json:"-"`
390 XXX_unrecognized []byte `json:"-"`
391 XXX_sizecache int32 `json:"-"`
392}
393
394func (m *RequestLog) Reset() { *m = RequestLog{} }
395func (m *RequestLog) String() string { return proto.CompactTextString(m) }
396func (*RequestLog) ProtoMessage() {}
397func (*RequestLog) Descriptor() ([]byte, []int) {
398 return fileDescriptor_log_service_f054fd4b5012319d, []int{7}
399}
400func (m *RequestLog) XXX_Unmarshal(b []byte) error {
401 return xxx_messageInfo_RequestLog.Unmarshal(m, b)
402}
403func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
404 return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic)
405}
406func (dst *RequestLog) XXX_Merge(src proto.Message) {
407 xxx_messageInfo_RequestLog.Merge(dst, src)
408}
409func (m *RequestLog) XXX_Size() int {
410 return xxx_messageInfo_RequestLog.Size(m)
411}
412func (m *RequestLog) XXX_DiscardUnknown() {
413 xxx_messageInfo_RequestLog.DiscardUnknown(m)
414}
415
416var xxx_messageInfo_RequestLog proto.InternalMessageInfo
417
418const Default_RequestLog_ModuleId string = "default"
419const Default_RequestLog_ReplicaIndex int32 = -1
420const Default_RequestLog_Finished bool = true
421
422func (m *RequestLog) GetAppId() string {
423 if m != nil && m.AppId != nil {
424 return *m.AppId
425 }
426 return ""
427}
428
429func (m *RequestLog) GetModuleId() string {
430 if m != nil && m.ModuleId != nil {
431 return *m.ModuleId
432 }
433 return Default_RequestLog_ModuleId
434}
435
436func (m *RequestLog) GetVersionId() string {
437 if m != nil && m.VersionId != nil {
438 return *m.VersionId
439 }
440 return ""
441}
442
443func (m *RequestLog) GetRequestId() []byte {
444 if m != nil {
445 return m.RequestId
446 }
447 return nil
448}
449
450func (m *RequestLog) GetOffset() *LogOffset {
451 if m != nil {
452 return m.Offset
453 }
454 return nil
455}
456
457func (m *RequestLog) GetIp() string {
458 if m != nil && m.Ip != nil {
459 return *m.Ip
460 }
461 return ""
462}
463
464func (m *RequestLog) GetNickname() string {
465 if m != nil && m.Nickname != nil {
466 return *m.Nickname
467 }
468 return ""
469}
470
471func (m *RequestLog) GetStartTime() int64 {
472 if m != nil && m.StartTime != nil {
473 return *m.StartTime
474 }
475 return 0
476}
477
478func (m *RequestLog) GetEndTime() int64 {
479 if m != nil && m.EndTime != nil {
480 return *m.EndTime
481 }
482 return 0
483}
484
485func (m *RequestLog) GetLatency() int64 {
486 if m != nil && m.Latency != nil {
487 return *m.Latency
488 }
489 return 0
490}
491
492func (m *RequestLog) GetMcycles() int64 {
493 if m != nil && m.Mcycles != nil {
494 return *m.Mcycles
495 }
496 return 0
497}
498
499func (m *RequestLog) GetMethod() string {
500 if m != nil && m.Method != nil {
501 return *m.Method
502 }
503 return ""
504}
505
506func (m *RequestLog) GetResource() string {
507 if m != nil && m.Resource != nil {
508 return *m.Resource
509 }
510 return ""
511}
512
513func (m *RequestLog) GetHttpVersion() string {
514 if m != nil && m.HttpVersion != nil {
515 return *m.HttpVersion
516 }
517 return ""
518}
519
520func (m *RequestLog) GetStatus() int32 {
521 if m != nil && m.Status != nil {
522 return *m.Status
523 }
524 return 0
525}
526
527func (m *RequestLog) GetResponseSize() int64 {
528 if m != nil && m.ResponseSize != nil {
529 return *m.ResponseSize
530 }
531 return 0
532}
533
534func (m *RequestLog) GetReferrer() string {
535 if m != nil && m.Referrer != nil {
536 return *m.Referrer
537 }
538 return ""
539}
540
541func (m *RequestLog) GetUserAgent() string {
542 if m != nil && m.UserAgent != nil {
543 return *m.UserAgent
544 }
545 return ""
546}
547
548func (m *RequestLog) GetUrlMapEntry() string {
549 if m != nil && m.UrlMapEntry != nil {
550 return *m.UrlMapEntry
551 }
552 return ""
553}
554
555func (m *RequestLog) GetCombined() string {
556 if m != nil && m.Combined != nil {
557 return *m.Combined
558 }
559 return ""
560}
561
562func (m *RequestLog) GetApiMcycles() int64 {
563 if m != nil && m.ApiMcycles != nil {
564 return *m.ApiMcycles
565 }
566 return 0
567}
568
569func (m *RequestLog) GetHost() string {
570 if m != nil && m.Host != nil {
571 return *m.Host
572 }
573 return ""
574}
575
576func (m *RequestLog) GetCost() float64 {
577 if m != nil && m.Cost != nil {
578 return *m.Cost
579 }
580 return 0
581}
582
583func (m *RequestLog) GetTaskQueueName() string {
584 if m != nil && m.TaskQueueName != nil {
585 return *m.TaskQueueName
586 }
587 return ""
588}
589
590func (m *RequestLog) GetTaskName() string {
591 if m != nil && m.TaskName != nil {
592 return *m.TaskName
593 }
594 return ""
595}
596
597func (m *RequestLog) GetWasLoadingRequest() bool {
598 if m != nil && m.WasLoadingRequest != nil {
599 return *m.WasLoadingRequest
600 }
601 return false
602}
603
604func (m *RequestLog) GetPendingTime() int64 {
605 if m != nil && m.PendingTime != nil {
606 return *m.PendingTime
607 }
608 return 0
609}
610
611func (m *RequestLog) GetReplicaIndex() int32 {
612 if m != nil && m.ReplicaIndex != nil {
613 return *m.ReplicaIndex
614 }
615 return Default_RequestLog_ReplicaIndex
616}
617
618func (m *RequestLog) GetFinished() bool {
619 if m != nil && m.Finished != nil {
620 return *m.Finished
621 }
622 return Default_RequestLog_Finished
623}
624
625func (m *RequestLog) GetCloneKey() []byte {
626 if m != nil {
627 return m.CloneKey
628 }
629 return nil
630}
631
632func (m *RequestLog) GetLine() []*LogLine {
633 if m != nil {
634 return m.Line
635 }
636 return nil
637}
638
639func (m *RequestLog) GetLinesIncomplete() bool {
640 if m != nil && m.LinesIncomplete != nil {
641 return *m.LinesIncomplete
642 }
643 return false
644}
645
646func (m *RequestLog) GetAppEngineRelease() []byte {
647 if m != nil {
648 return m.AppEngineRelease
649 }
650 return nil
651}
652
653func (m *RequestLog) GetExitReason() int32 {
654 if m != nil && m.ExitReason != nil {
655 return *m.ExitReason
656 }
657 return 0
658}
659
660func (m *RequestLog) GetWasThrottledForTime() bool {
661 if m != nil && m.WasThrottledForTime != nil {
662 return *m.WasThrottledForTime
663 }
664 return false
665}
666
667func (m *RequestLog) GetWasThrottledForRequests() bool {
668 if m != nil && m.WasThrottledForRequests != nil {
669 return *m.WasThrottledForRequests
670 }
671 return false
672}
673
674func (m *RequestLog) GetThrottledTime() int64 {
675 if m != nil && m.ThrottledTime != nil {
676 return *m.ThrottledTime
677 }
678 return 0
679}
680
681func (m *RequestLog) GetServerName() []byte {
682 if m != nil {
683 return m.ServerName
684 }
685 return nil
686}
687
688type LogModuleVersion struct {
689 ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
690 VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
691 XXX_NoUnkeyedLiteral struct{} `json:"-"`
692 XXX_unrecognized []byte `json:"-"`
693 XXX_sizecache int32 `json:"-"`
694}
695
696func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
697func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
698func (*LogModuleVersion) ProtoMessage() {}
699func (*LogModuleVersion) Descriptor() ([]byte, []int) {
700 return fileDescriptor_log_service_f054fd4b5012319d, []int{8}
701}
702func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error {
703 return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b)
704}
705func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
706 return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic)
707}
708func (dst *LogModuleVersion) XXX_Merge(src proto.Message) {
709 xxx_messageInfo_LogModuleVersion.Merge(dst, src)
710}
711func (m *LogModuleVersion) XXX_Size() int {
712 return xxx_messageInfo_LogModuleVersion.Size(m)
713}
714func (m *LogModuleVersion) XXX_DiscardUnknown() {
715 xxx_messageInfo_LogModuleVersion.DiscardUnknown(m)
716}
717
718var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo
719
720const Default_LogModuleVersion_ModuleId string = "default"
721
722func (m *LogModuleVersion) GetModuleId() string {
723 if m != nil && m.ModuleId != nil {
724 return *m.ModuleId
725 }
726 return Default_LogModuleVersion_ModuleId
727}
728
729func (m *LogModuleVersion) GetVersionId() string {
730 if m != nil && m.VersionId != nil {
731 return *m.VersionId
732 }
733 return ""
734}
735
736type LogReadRequest struct {
737 AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
738 VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
739 ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"`
740 StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
741 EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
742 Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
743 RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"`
744 MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"`
745 IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"`
746 Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
747 CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"`
748 HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"`
749 ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"`
750 IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"`
751 AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"`
752 IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"`
753 IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"`
754 CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"`
755 NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"`
756 XXX_NoUnkeyedLiteral struct{} `json:"-"`
757 XXX_unrecognized []byte `json:"-"`
758 XXX_sizecache int32 `json:"-"`
759}
760
761func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
762func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
763func (*LogReadRequest) ProtoMessage() {}
764func (*LogReadRequest) Descriptor() ([]byte, []int) {
765 return fileDescriptor_log_service_f054fd4b5012319d, []int{9}
766}
767func (m *LogReadRequest) XXX_Unmarshal(b []byte) error {
768 return xxx_messageInfo_LogReadRequest.Unmarshal(m, b)
769}
770func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
771 return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic)
772}
773func (dst *LogReadRequest) XXX_Merge(src proto.Message) {
774 xxx_messageInfo_LogReadRequest.Merge(dst, src)
775}
776func (m *LogReadRequest) XXX_Size() int {
777 return xxx_messageInfo_LogReadRequest.Size(m)
778}
779func (m *LogReadRequest) XXX_DiscardUnknown() {
780 xxx_messageInfo_LogReadRequest.DiscardUnknown(m)
781}
782
783var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo
784
785func (m *LogReadRequest) GetAppId() string {
786 if m != nil && m.AppId != nil {
787 return *m.AppId
788 }
789 return ""
790}
791
792func (m *LogReadRequest) GetVersionId() []string {
793 if m != nil {
794 return m.VersionId
795 }
796 return nil
797}
798
799func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
800 if m != nil {
801 return m.ModuleVersion
802 }
803 return nil
804}
805
806func (m *LogReadRequest) GetStartTime() int64 {
807 if m != nil && m.StartTime != nil {
808 return *m.StartTime
809 }
810 return 0
811}
812
813func (m *LogReadRequest) GetEndTime() int64 {
814 if m != nil && m.EndTime != nil {
815 return *m.EndTime
816 }
817 return 0
818}
819
820func (m *LogReadRequest) GetOffset() *LogOffset {
821 if m != nil {
822 return m.Offset
823 }
824 return nil
825}
826
827func (m *LogReadRequest) GetRequestId() [][]byte {
828 if m != nil {
829 return m.RequestId
830 }
831 return nil
832}
833
834func (m *LogReadRequest) GetMinimumLogLevel() int32 {
835 if m != nil && m.MinimumLogLevel != nil {
836 return *m.MinimumLogLevel
837 }
838 return 0
839}
840
841func (m *LogReadRequest) GetIncludeIncomplete() bool {
842 if m != nil && m.IncludeIncomplete != nil {
843 return *m.IncludeIncomplete
844 }
845 return false
846}
847
848func (m *LogReadRequest) GetCount() int64 {
849 if m != nil && m.Count != nil {
850 return *m.Count
851 }
852 return 0
853}
854
855func (m *LogReadRequest) GetCombinedLogRegex() string {
856 if m != nil && m.CombinedLogRegex != nil {
857 return *m.CombinedLogRegex
858 }
859 return ""
860}
861
862func (m *LogReadRequest) GetHostRegex() string {
863 if m != nil && m.HostRegex != nil {
864 return *m.HostRegex
865 }
866 return ""
867}
868
869func (m *LogReadRequest) GetReplicaIndex() int32 {
870 if m != nil && m.ReplicaIndex != nil {
871 return *m.ReplicaIndex
872 }
873 return 0
874}
875
876func (m *LogReadRequest) GetIncludeAppLogs() bool {
877 if m != nil && m.IncludeAppLogs != nil {
878 return *m.IncludeAppLogs
879 }
880 return false
881}
882
883func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
884 if m != nil && m.AppLogsPerRequest != nil {
885 return *m.AppLogsPerRequest
886 }
887 return 0
888}
889
890func (m *LogReadRequest) GetIncludeHost() bool {
891 if m != nil && m.IncludeHost != nil {
892 return *m.IncludeHost
893 }
894 return false
895}
896
897func (m *LogReadRequest) GetIncludeAll() bool {
898 if m != nil && m.IncludeAll != nil {
899 return *m.IncludeAll
900 }
901 return false
902}
903
904func (m *LogReadRequest) GetCacheIterator() bool {
905 if m != nil && m.CacheIterator != nil {
906 return *m.CacheIterator
907 }
908 return false
909}
910
911func (m *LogReadRequest) GetNumShards() int32 {
912 if m != nil && m.NumShards != nil {
913 return *m.NumShards
914 }
915 return 0
916}
917
918type LogReadResponse struct {
919 Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
920 Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
921 LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"`
922 XXX_NoUnkeyedLiteral struct{} `json:"-"`
923 XXX_unrecognized []byte `json:"-"`
924 XXX_sizecache int32 `json:"-"`
925}
926
927func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
928func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
929func (*LogReadResponse) ProtoMessage() {}
930func (*LogReadResponse) Descriptor() ([]byte, []int) {
931 return fileDescriptor_log_service_f054fd4b5012319d, []int{10}
932}
933func (m *LogReadResponse) XXX_Unmarshal(b []byte) error {
934 return xxx_messageInfo_LogReadResponse.Unmarshal(m, b)
935}
936func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
937 return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic)
938}
939func (dst *LogReadResponse) XXX_Merge(src proto.Message) {
940 xxx_messageInfo_LogReadResponse.Merge(dst, src)
941}
942func (m *LogReadResponse) XXX_Size() int {
943 return xxx_messageInfo_LogReadResponse.Size(m)
944}
945func (m *LogReadResponse) XXX_DiscardUnknown() {
946 xxx_messageInfo_LogReadResponse.DiscardUnknown(m)
947}
948
949var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo
950
951func (m *LogReadResponse) GetLog() []*RequestLog {
952 if m != nil {
953 return m.Log
954 }
955 return nil
956}
957
958func (m *LogReadResponse) GetOffset() *LogOffset {
959 if m != nil {
960 return m.Offset
961 }
962 return nil
963}
964
965func (m *LogReadResponse) GetLastEndTime() int64 {
966 if m != nil && m.LastEndTime != nil {
967 return *m.LastEndTime
968 }
969 return 0
970}
971
972type LogUsageRecord struct {
973 VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
974 StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
975 EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
976 Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
977 TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
978 Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
979 XXX_NoUnkeyedLiteral struct{} `json:"-"`
980 XXX_unrecognized []byte `json:"-"`
981 XXX_sizecache int32 `json:"-"`
982}
983
984func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
985func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
986func (*LogUsageRecord) ProtoMessage() {}
987func (*LogUsageRecord) Descriptor() ([]byte, []int) {
988 return fileDescriptor_log_service_f054fd4b5012319d, []int{11}
989}
990func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error {
991 return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b)
992}
993func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
994 return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic)
995}
996func (dst *LogUsageRecord) XXX_Merge(src proto.Message) {
997 xxx_messageInfo_LogUsageRecord.Merge(dst, src)
998}
999func (m *LogUsageRecord) XXX_Size() int {
1000 return xxx_messageInfo_LogUsageRecord.Size(m)
1001}
1002func (m *LogUsageRecord) XXX_DiscardUnknown() {
1003 xxx_messageInfo_LogUsageRecord.DiscardUnknown(m)
1004}
1005
1006var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo
1007
1008func (m *LogUsageRecord) GetVersionId() string {
1009 if m != nil && m.VersionId != nil {
1010 return *m.VersionId
1011 }
1012 return ""
1013}
1014
1015func (m *LogUsageRecord) GetStartTime() int32 {
1016 if m != nil && m.StartTime != nil {
1017 return *m.StartTime
1018 }
1019 return 0
1020}
1021
1022func (m *LogUsageRecord) GetEndTime() int32 {
1023 if m != nil && m.EndTime != nil {
1024 return *m.EndTime
1025 }
1026 return 0
1027}
1028
1029func (m *LogUsageRecord) GetCount() int64 {
1030 if m != nil && m.Count != nil {
1031 return *m.Count
1032 }
1033 return 0
1034}
1035
1036func (m *LogUsageRecord) GetTotalSize() int64 {
1037 if m != nil && m.TotalSize != nil {
1038 return *m.TotalSize
1039 }
1040 return 0
1041}
1042
1043func (m *LogUsageRecord) GetRecords() int32 {
1044 if m != nil && m.Records != nil {
1045 return *m.Records
1046 }
1047 return 0
1048}
1049
1050type LogUsageRequest struct {
1051 AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
1052 VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
1053 StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
1054 EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
1055 ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"`
1056 CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"`
1057 UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"`
1058 VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"`
1059 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1060 XXX_unrecognized []byte `json:"-"`
1061 XXX_sizecache int32 `json:"-"`
1062}
1063
1064func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
1065func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
1066func (*LogUsageRequest) ProtoMessage() {}
1067func (*LogUsageRequest) Descriptor() ([]byte, []int) {
1068 return fileDescriptor_log_service_f054fd4b5012319d, []int{12}
1069}
1070func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error {
1071 return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b)
1072}
1073func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1074 return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic)
1075}
1076func (dst *LogUsageRequest) XXX_Merge(src proto.Message) {
1077 xxx_messageInfo_LogUsageRequest.Merge(dst, src)
1078}
1079func (m *LogUsageRequest) XXX_Size() int {
1080 return xxx_messageInfo_LogUsageRequest.Size(m)
1081}
1082func (m *LogUsageRequest) XXX_DiscardUnknown() {
1083 xxx_messageInfo_LogUsageRequest.DiscardUnknown(m)
1084}
1085
1086var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo
1087
1088const Default_LogUsageRequest_ResolutionHours uint32 = 1
1089
1090func (m *LogUsageRequest) GetAppId() string {
1091 if m != nil && m.AppId != nil {
1092 return *m.AppId
1093 }
1094 return ""
1095}
1096
1097func (m *LogUsageRequest) GetVersionId() []string {
1098 if m != nil {
1099 return m.VersionId
1100 }
1101 return nil
1102}
1103
1104func (m *LogUsageRequest) GetStartTime() int32 {
1105 if m != nil && m.StartTime != nil {
1106 return *m.StartTime
1107 }
1108 return 0
1109}
1110
1111func (m *LogUsageRequest) GetEndTime() int32 {
1112 if m != nil && m.EndTime != nil {
1113 return *m.EndTime
1114 }
1115 return 0
1116}
1117
1118func (m *LogUsageRequest) GetResolutionHours() uint32 {
1119 if m != nil && m.ResolutionHours != nil {
1120 return *m.ResolutionHours
1121 }
1122 return Default_LogUsageRequest_ResolutionHours
1123}
1124
1125func (m *LogUsageRequest) GetCombineVersions() bool {
1126 if m != nil && m.CombineVersions != nil {
1127 return *m.CombineVersions
1128 }
1129 return false
1130}
1131
1132func (m *LogUsageRequest) GetUsageVersion() int32 {
1133 if m != nil && m.UsageVersion != nil {
1134 return *m.UsageVersion
1135 }
1136 return 0
1137}
1138
1139func (m *LogUsageRequest) GetVersionsOnly() bool {
1140 if m != nil && m.VersionsOnly != nil {
1141 return *m.VersionsOnly
1142 }
1143 return false
1144}
1145
1146type LogUsageResponse struct {
1147 Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
1148 Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
1149 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1150 XXX_unrecognized []byte `json:"-"`
1151 XXX_sizecache int32 `json:"-"`
1152}
1153
1154func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
1155func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
1156func (*LogUsageResponse) ProtoMessage() {}
1157func (*LogUsageResponse) Descriptor() ([]byte, []int) {
1158 return fileDescriptor_log_service_f054fd4b5012319d, []int{13}
1159}
1160func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error {
1161 return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b)
1162}
1163func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1164 return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic)
1165}
1166func (dst *LogUsageResponse) XXX_Merge(src proto.Message) {
1167 xxx_messageInfo_LogUsageResponse.Merge(dst, src)
1168}
1169func (m *LogUsageResponse) XXX_Size() int {
1170 return xxx_messageInfo_LogUsageResponse.Size(m)
1171}
1172func (m *LogUsageResponse) XXX_DiscardUnknown() {
1173 xxx_messageInfo_LogUsageResponse.DiscardUnknown(m)
1174}
1175
1176var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo
1177
1178func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
1179 if m != nil {
1180 return m.Usage
1181 }
1182 return nil
1183}
1184
1185func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
1186 if m != nil {
1187 return m.Summary
1188 }
1189 return nil
1190}
1191
1192func init() {
1193 proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError")
1194 proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine")
1195 proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup")
1196 proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest")
1197 proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest")
1198 proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset")
1199 proto.RegisterType((*LogLine)(nil), "appengine.LogLine")
1200 proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog")
1201 proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion")
1202 proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest")
1203 proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse")
1204 proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord")
1205 proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest")
1206 proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse")
1207}
1208
1209func init() {
1210 proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d)
1211}
1212
1213var fileDescriptor_log_service_f054fd4b5012319d = []byte{
1214 // 1553 bytes of a gzipped FileDescriptorProto
1215 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6,
1216 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e,
1217 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40,
1218 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72,
1219 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d,
1220 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9,
1221 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32,
1222 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5,
1223 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3,
1224 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17,
1225 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f,
1226 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3,
1227 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46,
1228 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26,
1229 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31,
1230 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5,
1231 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd,
1232 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46,
1233 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe,
1234 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc,
1235 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1,
1236 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f,
1237 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39,
1238 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35,
1239 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd,
1240 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b,
1241 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c,
1242 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0,
1243 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36,
1244 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f,
1245 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a,
1246 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed,
1247 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95,
1248 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91,
1249 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87,
1250 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1,
1251 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5,
1252 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf,
1253 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce,
1254 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66,
1255 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0,
1256 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62,
1257 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c,
1258 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba,
1259 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a,
1260 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa,
1261 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4,
1262 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1,
1263 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62,
1264 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b,
1265 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4,
1266 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90,
1267 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78,
1268 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff,
1269 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56,
1270 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5,
1271 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b,
1272 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21,
1273 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69,
1274 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98,
1275 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51,
1276 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17,
1277 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c,
1278 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f,
1279 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f,
1280 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b,
1281 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10,
1282 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54,
1283 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d,
1284 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98,
1285 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0,
1286 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0,
1287 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61,
1288 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83,
1289 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3,
1290 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75,
1291 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38,
1292 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37,
1293 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e,
1294 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d,
1295 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65,
1296 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43,
1297 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06,
1298 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2,
1299 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea,
1300 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64,
1301 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1,
1302 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf,
1303 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36,
1304 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53,
1305 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7,
1306 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56,
1307 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b,
1308 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77,
1309 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05,
1310 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd,
1311 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00,
1312 0x00,
1313}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
new file mode 100644
index 0000000..8981dc4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto
@@ -0,0 +1,150 @@
1syntax = "proto2";
2option go_package = "log";
3
4package appengine;
5
6message LogServiceError {
7 enum ErrorCode {
8 OK = 0;
9 INVALID_REQUEST = 1;
10 STORAGE_ERROR = 2;
11 }
12}
13
14message UserAppLogLine {
15 required int64 timestamp_usec = 1;
16 required int64 level = 2;
17 required string message = 3;
18}
19
20message UserAppLogGroup {
21 repeated UserAppLogLine log_line = 2;
22}
23
24message FlushRequest {
25 optional bytes logs = 1;
26}
27
28message SetStatusRequest {
29 required string status = 1;
30}
31
32
33message LogOffset {
34 optional bytes request_id = 1;
35}
36
37message LogLine {
38 required int64 time = 1;
39 required int32 level = 2;
40 required string log_message = 3;
41}
42
43message RequestLog {
44 required string app_id = 1;
45 optional string module_id = 37 [default="default"];
46 required string version_id = 2;
47 required bytes request_id = 3;
48 optional LogOffset offset = 35;
49 required string ip = 4;
50 optional string nickname = 5;
51 required int64 start_time = 6;
52 required int64 end_time = 7;
53 required int64 latency = 8;
54 required int64 mcycles = 9;
55 required string method = 10;
56 required string resource = 11;
57 required string http_version = 12;
58 required int32 status = 13;
59 required int64 response_size = 14;
60 optional string referrer = 15;
61 optional string user_agent = 16;
62 required string url_map_entry = 17;
63 required string combined = 18;
64 optional int64 api_mcycles = 19;
65 optional string host = 20;
66 optional double cost = 21;
67
68 optional string task_queue_name = 22;
69 optional string task_name = 23;
70
71 optional bool was_loading_request = 24;
72 optional int64 pending_time = 25;
73 optional int32 replica_index = 26 [default = -1];
74 optional bool finished = 27 [default = true];
75 optional bytes clone_key = 28;
76
77 repeated LogLine line = 29;
78
79 optional bool lines_incomplete = 36;
80 optional bytes app_engine_release = 38;
81
82 optional int32 exit_reason = 30;
83 optional bool was_throttled_for_time = 31;
84 optional bool was_throttled_for_requests = 32;
85 optional int64 throttled_time = 33;
86
87 optional bytes server_name = 34;
88}
89
90message LogModuleVersion {
91 optional string module_id = 1 [default="default"];
92 optional string version_id = 2;
93}
94
95message LogReadRequest {
96 required string app_id = 1;
97 repeated string version_id = 2;
98 repeated LogModuleVersion module_version = 19;
99
100 optional int64 start_time = 3;
101 optional int64 end_time = 4;
102 optional LogOffset offset = 5;
103 repeated bytes request_id = 6;
104
105 optional int32 minimum_log_level = 7;
106 optional bool include_incomplete = 8;
107 optional int64 count = 9;
108
109 optional string combined_log_regex = 14;
110 optional string host_regex = 15;
111 optional int32 replica_index = 16;
112
113 optional bool include_app_logs = 10;
114 optional int32 app_logs_per_request = 17;
115 optional bool include_host = 11;
116 optional bool include_all = 12;
117 optional bool cache_iterator = 13;
118 optional int32 num_shards = 18;
119}
120
121message LogReadResponse {
122 repeated RequestLog log = 1;
123 optional LogOffset offset = 2;
124 optional int64 last_end_time = 3;
125}
126
127message LogUsageRecord {
128 optional string version_id = 1;
129 optional int32 start_time = 2;
130 optional int32 end_time = 3;
131 optional int64 count = 4;
132 optional int64 total_size = 5;
133 optional int32 records = 6;
134}
135
136message LogUsageRequest {
137 required string app_id = 1;
138 repeated string version_id = 2;
139 optional int32 start_time = 3;
140 optional int32 end_time = 4;
141 optional uint32 resolution_hours = 5 [default = 1];
142 optional bool combine_versions = 6;
143 optional int32 usage_version = 7;
144 optional bool versions_only = 8;
145}
146
147message LogUsageResponse {
148 repeated LogUsageRecord usage = 1;
149 optional LogUsageRecord summary = 2;
150}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
new file mode 100644
index 0000000..1e76531
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main.go
@@ -0,0 +1,16 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// +build appengine
6
7package internal
8
9import (
10 "appengine_internal"
11)
12
13func Main() {
14 MainPath = ""
15 appengine_internal.Main()
16}
diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go
new file mode 100644
index 0000000..357dce4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_common.go
@@ -0,0 +1,7 @@
1package internal
2
3// MainPath stores the file path of the main package. On App Engine Standard
4// using Go version 1.9 and below, this will be unset. On App Engine Flex and
5// App Engine Standard second-gen (Go 1.11 and above), this will be the
6// filepath to package main.
7var MainPath string
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
new file mode 100644
index 0000000..ddb79a3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_vm.go
@@ -0,0 +1,69 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// +build !appengine
6
7package internal
8
9import (
10 "io"
11 "log"
12 "net/http"
13 "net/url"
14 "os"
15 "path/filepath"
16 "runtime"
17)
18
19func Main() {
20 MainPath = filepath.Dir(findMainPath())
21 installHealthChecker(http.DefaultServeMux)
22
23 port := "8080"
24 if s := os.Getenv("PORT"); s != "" {
25 port = s
26 }
27
28 host := ""
29 if IsDevAppServer() {
30 host = "127.0.0.1"
31 }
32 if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
33 log.Fatalf("http.ListenAndServe: %v", err)
34 }
35}
36
37// Find the path to package main by looking at the root Caller.
38func findMainPath() string {
39 pc := make([]uintptr, 100)
40 n := runtime.Callers(2, pc)
41 frames := runtime.CallersFrames(pc[:n])
42 for {
43 frame, more := frames.Next()
44 // Tests won't have package main, instead they have testing.tRunner
45 if frame.Function == "main.main" || frame.Function == "testing.tRunner" {
46 return frame.File
47 }
48 if !more {
49 break
50 }
51 }
52 return ""
53}
54
55func installHealthChecker(mux *http.ServeMux) {
56 // If no health check handler has been installed by this point, add a trivial one.
57 const healthPath = "/_ah/health"
58 hreq := &http.Request{
59 Method: "GET",
60 URL: &url.URL{
61 Path: healthPath,
62 },
63 }
64 if _, pat := mux.Handler(hreq); pat != healthPath {
65 mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
66 io.WriteString(w, "ok")
67 })
68 }
69}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 0000000..c4ba63b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,60 @@
1// Copyright 2014 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package internal
6
7// This file has code for accessing metadata.
8//
9// References:
10// https://cloud.google.com/compute/docs/metadata
11
12import (
13 "fmt"
14 "io/ioutil"
15 "net/http"
16 "net/url"
17)
18
19const (
20 metadataHost = "metadata"
21 metadataPath = "/computeMetadata/v1/"
22)
23
24var (
25 metadataRequestHeaders = http.Header{
26 "Metadata-Flavor": []string{"Google"},
27 }
28)
29
30// TODO(dsymonds): Do we need to support default values, like Python?
31func mustGetMetadata(key string) []byte {
32 b, err := getMetadata(key)
33 if err != nil {
34 panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err))
35 }
36 return b
37}
38
39func getMetadata(key string) ([]byte, error) {
40 // TODO(dsymonds): May need to use url.Parse to support keys with query args.
41 req := &http.Request{
42 Method: "GET",
43 URL: &url.URL{
44 Scheme: "http",
45 Host: metadataHost,
46 Path: metadataPath + key,
47 },
48 Header: metadataRequestHeaders,
49 Host: metadataHost,
50 }
51 resp, err := http.DefaultClient.Do(req)
52 if err != nil {
53 return nil, err
54 }
55 defer resp.Body.Close()
56 if resp.StatusCode != 200 {
57 return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
58 }
59 return ioutil.ReadAll(resp.Body)
60}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
new file mode 100644
index 0000000..ddfc0c0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
@@ -0,0 +1,786 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google.golang.org/appengine/internal/modules/modules_service.proto
3
4package modules
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21type ModulesServiceError_ErrorCode int32
22
23const (
24 ModulesServiceError_OK ModulesServiceError_ErrorCode = 0
25 ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1
26 ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2
27 ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
28 ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4
29 ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5
30)
31
32var ModulesServiceError_ErrorCode_name = map[int32]string{
33 0: "OK",
34 1: "INVALID_MODULE",
35 2: "INVALID_VERSION",
36 3: "INVALID_INSTANCES",
37 4: "TRANSIENT_ERROR",
38 5: "UNEXPECTED_STATE",
39}
40var ModulesServiceError_ErrorCode_value = map[string]int32{
41 "OK": 0,
42 "INVALID_MODULE": 1,
43 "INVALID_VERSION": 2,
44 "INVALID_INSTANCES": 3,
45 "TRANSIENT_ERROR": 4,
46 "UNEXPECTED_STATE": 5,
47}
48
49func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
50 p := new(ModulesServiceError_ErrorCode)
51 *p = x
52 return p
53}
54func (x ModulesServiceError_ErrorCode) String() string {
55 return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
56}
57func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
58 value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
59 if err != nil {
60 return err
61 }
62 *x = ModulesServiceError_ErrorCode(value)
63 return nil
64}
65func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
66 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0}
67}
68
69type ModulesServiceError struct {
70 XXX_NoUnkeyedLiteral struct{} `json:"-"`
71 XXX_unrecognized []byte `json:"-"`
72 XXX_sizecache int32 `json:"-"`
73}
74
75func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
76func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
77func (*ModulesServiceError) ProtoMessage() {}
78func (*ModulesServiceError) Descriptor() ([]byte, []int) {
79 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0}
80}
81func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error {
82 return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b)
83}
84func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
85 return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic)
86}
87func (dst *ModulesServiceError) XXX_Merge(src proto.Message) {
88 xxx_messageInfo_ModulesServiceError.Merge(dst, src)
89}
90func (m *ModulesServiceError) XXX_Size() int {
91 return xxx_messageInfo_ModulesServiceError.Size(m)
92}
93func (m *ModulesServiceError) XXX_DiscardUnknown() {
94 xxx_messageInfo_ModulesServiceError.DiscardUnknown(m)
95}
96
97var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo
98
99type GetModulesRequest struct {
100 XXX_NoUnkeyedLiteral struct{} `json:"-"`
101 XXX_unrecognized []byte `json:"-"`
102 XXX_sizecache int32 `json:"-"`
103}
104
105func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
106func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
107func (*GetModulesRequest) ProtoMessage() {}
108func (*GetModulesRequest) Descriptor() ([]byte, []int) {
109 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1}
110}
111func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error {
112 return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b)
113}
114func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
115 return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic)
116}
117func (dst *GetModulesRequest) XXX_Merge(src proto.Message) {
118 xxx_messageInfo_GetModulesRequest.Merge(dst, src)
119}
120func (m *GetModulesRequest) XXX_Size() int {
121 return xxx_messageInfo_GetModulesRequest.Size(m)
122}
123func (m *GetModulesRequest) XXX_DiscardUnknown() {
124 xxx_messageInfo_GetModulesRequest.DiscardUnknown(m)
125}
126
127var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo
128
129type GetModulesResponse struct {
130 Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
131 XXX_NoUnkeyedLiteral struct{} `json:"-"`
132 XXX_unrecognized []byte `json:"-"`
133 XXX_sizecache int32 `json:"-"`
134}
135
136func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
137func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
138func (*GetModulesResponse) ProtoMessage() {}
139func (*GetModulesResponse) Descriptor() ([]byte, []int) {
140 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2}
141}
142func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error {
143 return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b)
144}
145func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
146 return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic)
147}
148func (dst *GetModulesResponse) XXX_Merge(src proto.Message) {
149 xxx_messageInfo_GetModulesResponse.Merge(dst, src)
150}
151func (m *GetModulesResponse) XXX_Size() int {
152 return xxx_messageInfo_GetModulesResponse.Size(m)
153}
154func (m *GetModulesResponse) XXX_DiscardUnknown() {
155 xxx_messageInfo_GetModulesResponse.DiscardUnknown(m)
156}
157
158var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo
159
160func (m *GetModulesResponse) GetModule() []string {
161 if m != nil {
162 return m.Module
163 }
164 return nil
165}
166
167type GetVersionsRequest struct {
168 Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
169 XXX_NoUnkeyedLiteral struct{} `json:"-"`
170 XXX_unrecognized []byte `json:"-"`
171 XXX_sizecache int32 `json:"-"`
172}
173
174func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
175func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
176func (*GetVersionsRequest) ProtoMessage() {}
177func (*GetVersionsRequest) Descriptor() ([]byte, []int) {
178 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3}
179}
180func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error {
181 return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b)
182}
183func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
184 return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic)
185}
186func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) {
187 xxx_messageInfo_GetVersionsRequest.Merge(dst, src)
188}
189func (m *GetVersionsRequest) XXX_Size() int {
190 return xxx_messageInfo_GetVersionsRequest.Size(m)
191}
192func (m *GetVersionsRequest) XXX_DiscardUnknown() {
193 xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m)
194}
195
196var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo
197
198func (m *GetVersionsRequest) GetModule() string {
199 if m != nil && m.Module != nil {
200 return *m.Module
201 }
202 return ""
203}
204
205type GetVersionsResponse struct {
206 Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
207 XXX_NoUnkeyedLiteral struct{} `json:"-"`
208 XXX_unrecognized []byte `json:"-"`
209 XXX_sizecache int32 `json:"-"`
210}
211
212func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
213func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
214func (*GetVersionsResponse) ProtoMessage() {}
215func (*GetVersionsResponse) Descriptor() ([]byte, []int) {
216 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4}
217}
218func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error {
219 return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b)
220}
221func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
222 return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic)
223}
224func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) {
225 xxx_messageInfo_GetVersionsResponse.Merge(dst, src)
226}
227func (m *GetVersionsResponse) XXX_Size() int {
228 return xxx_messageInfo_GetVersionsResponse.Size(m)
229}
230func (m *GetVersionsResponse) XXX_DiscardUnknown() {
231 xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m)
232}
233
234var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo
235
236func (m *GetVersionsResponse) GetVersion() []string {
237 if m != nil {
238 return m.Version
239 }
240 return nil
241}
242
243type GetDefaultVersionRequest struct {
244 Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
245 XXX_NoUnkeyedLiteral struct{} `json:"-"`
246 XXX_unrecognized []byte `json:"-"`
247 XXX_sizecache int32 `json:"-"`
248}
249
250func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
251func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
252func (*GetDefaultVersionRequest) ProtoMessage() {}
253func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) {
254 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5}
255}
256func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error {
257 return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b)
258}
259func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
260 return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic)
261}
262func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) {
263 xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src)
264}
265func (m *GetDefaultVersionRequest) XXX_Size() int {
266 return xxx_messageInfo_GetDefaultVersionRequest.Size(m)
267}
268func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() {
269 xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m)
270}
271
272var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo
273
274func (m *GetDefaultVersionRequest) GetModule() string {
275 if m != nil && m.Module != nil {
276 return *m.Module
277 }
278 return ""
279}
280
281type GetDefaultVersionResponse struct {
282 Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
283 XXX_NoUnkeyedLiteral struct{} `json:"-"`
284 XXX_unrecognized []byte `json:"-"`
285 XXX_sizecache int32 `json:"-"`
286}
287
288func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
289func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
290func (*GetDefaultVersionResponse) ProtoMessage() {}
291func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) {
292 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6}
293}
294func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error {
295 return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b)
296}
297func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
298 return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic)
299}
300func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) {
301 xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src)
302}
303func (m *GetDefaultVersionResponse) XXX_Size() int {
304 return xxx_messageInfo_GetDefaultVersionResponse.Size(m)
305}
306func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() {
307 xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m)
308}
309
310var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo
311
312func (m *GetDefaultVersionResponse) GetVersion() string {
313 if m != nil && m.Version != nil {
314 return *m.Version
315 }
316 return ""
317}
318
319type GetNumInstancesRequest struct {
320 Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
321 Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
322 XXX_NoUnkeyedLiteral struct{} `json:"-"`
323 XXX_unrecognized []byte `json:"-"`
324 XXX_sizecache int32 `json:"-"`
325}
326
327func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
328func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
329func (*GetNumInstancesRequest) ProtoMessage() {}
330func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) {
331 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7}
332}
333func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error {
334 return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b)
335}
336func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
337 return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic)
338}
339func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) {
340 xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src)
341}
342func (m *GetNumInstancesRequest) XXX_Size() int {
343 return xxx_messageInfo_GetNumInstancesRequest.Size(m)
344}
345func (m *GetNumInstancesRequest) XXX_DiscardUnknown() {
346 xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m)
347}
348
349var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo
350
351func (m *GetNumInstancesRequest) GetModule() string {
352 if m != nil && m.Module != nil {
353 return *m.Module
354 }
355 return ""
356}
357
358func (m *GetNumInstancesRequest) GetVersion() string {
359 if m != nil && m.Version != nil {
360 return *m.Version
361 }
362 return ""
363}
364
365type GetNumInstancesResponse struct {
366 Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
367 XXX_NoUnkeyedLiteral struct{} `json:"-"`
368 XXX_unrecognized []byte `json:"-"`
369 XXX_sizecache int32 `json:"-"`
370}
371
372func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
373func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
374func (*GetNumInstancesResponse) ProtoMessage() {}
375func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) {
376 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8}
377}
378func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error {
379 return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b)
380}
381func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
382 return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic)
383}
384func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) {
385 xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src)
386}
387func (m *GetNumInstancesResponse) XXX_Size() int {
388 return xxx_messageInfo_GetNumInstancesResponse.Size(m)
389}
390func (m *GetNumInstancesResponse) XXX_DiscardUnknown() {
391 xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m)
392}
393
394var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo
395
396func (m *GetNumInstancesResponse) GetInstances() int64 {
397 if m != nil && m.Instances != nil {
398 return *m.Instances
399 }
400 return 0
401}
402
403type SetNumInstancesRequest struct {
404 Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
405 Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
406 Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
407 XXX_NoUnkeyedLiteral struct{} `json:"-"`
408 XXX_unrecognized []byte `json:"-"`
409 XXX_sizecache int32 `json:"-"`
410}
411
412func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
413func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
414func (*SetNumInstancesRequest) ProtoMessage() {}
415func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) {
416 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9}
417}
418func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error {
419 return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b)
420}
421func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
422 return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic)
423}
424func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) {
425 xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src)
426}
427func (m *SetNumInstancesRequest) XXX_Size() int {
428 return xxx_messageInfo_SetNumInstancesRequest.Size(m)
429}
430func (m *SetNumInstancesRequest) XXX_DiscardUnknown() {
431 xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m)
432}
433
434var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo
435
436func (m *SetNumInstancesRequest) GetModule() string {
437 if m != nil && m.Module != nil {
438 return *m.Module
439 }
440 return ""
441}
442
443func (m *SetNumInstancesRequest) GetVersion() string {
444 if m != nil && m.Version != nil {
445 return *m.Version
446 }
447 return ""
448}
449
450func (m *SetNumInstancesRequest) GetInstances() int64 {
451 if m != nil && m.Instances != nil {
452 return *m.Instances
453 }
454 return 0
455}
456
457type SetNumInstancesResponse struct {
458 XXX_NoUnkeyedLiteral struct{} `json:"-"`
459 XXX_unrecognized []byte `json:"-"`
460 XXX_sizecache int32 `json:"-"`
461}
462
463func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
464func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
465func (*SetNumInstancesResponse) ProtoMessage() {}
466func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) {
467 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10}
468}
469func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error {
470 return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b)
471}
472func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
473 return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic)
474}
475func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) {
476 xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src)
477}
478func (m *SetNumInstancesResponse) XXX_Size() int {
479 return xxx_messageInfo_SetNumInstancesResponse.Size(m)
480}
481func (m *SetNumInstancesResponse) XXX_DiscardUnknown() {
482 xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m)
483}
484
485var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo
486
487type StartModuleRequest struct {
488 Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
489 Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
490 XXX_NoUnkeyedLiteral struct{} `json:"-"`
491 XXX_unrecognized []byte `json:"-"`
492 XXX_sizecache int32 `json:"-"`
493}
494
495func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
496func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
497func (*StartModuleRequest) ProtoMessage() {}
498func (*StartModuleRequest) Descriptor() ([]byte, []int) {
499 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11}
500}
501func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error {
502 return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b)
503}
504func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
505 return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic)
506}
507func (dst *StartModuleRequest) XXX_Merge(src proto.Message) {
508 xxx_messageInfo_StartModuleRequest.Merge(dst, src)
509}
510func (m *StartModuleRequest) XXX_Size() int {
511 return xxx_messageInfo_StartModuleRequest.Size(m)
512}
513func (m *StartModuleRequest) XXX_DiscardUnknown() {
514 xxx_messageInfo_StartModuleRequest.DiscardUnknown(m)
515}
516
517var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo
518
519func (m *StartModuleRequest) GetModule() string {
520 if m != nil && m.Module != nil {
521 return *m.Module
522 }
523 return ""
524}
525
526func (m *StartModuleRequest) GetVersion() string {
527 if m != nil && m.Version != nil {
528 return *m.Version
529 }
530 return ""
531}
532
533type StartModuleResponse struct {
534 XXX_NoUnkeyedLiteral struct{} `json:"-"`
535 XXX_unrecognized []byte `json:"-"`
536 XXX_sizecache int32 `json:"-"`
537}
538
539func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
540func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
541func (*StartModuleResponse) ProtoMessage() {}
542func (*StartModuleResponse) Descriptor() ([]byte, []int) {
543 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12}
544}
545func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error {
546 return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b)
547}
548func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
549 return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic)
550}
551func (dst *StartModuleResponse) XXX_Merge(src proto.Message) {
552 xxx_messageInfo_StartModuleResponse.Merge(dst, src)
553}
554func (m *StartModuleResponse) XXX_Size() int {
555 return xxx_messageInfo_StartModuleResponse.Size(m)
556}
557func (m *StartModuleResponse) XXX_DiscardUnknown() {
558 xxx_messageInfo_StartModuleResponse.DiscardUnknown(m)
559}
560
561var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo
562
563type StopModuleRequest struct {
564 Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
565 Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
566 XXX_NoUnkeyedLiteral struct{} `json:"-"`
567 XXX_unrecognized []byte `json:"-"`
568 XXX_sizecache int32 `json:"-"`
569}
570
571func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
572func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
573func (*StopModuleRequest) ProtoMessage() {}
574func (*StopModuleRequest) Descriptor() ([]byte, []int) {
575 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13}
576}
577func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error {
578 return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b)
579}
580func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
581 return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic)
582}
583func (dst *StopModuleRequest) XXX_Merge(src proto.Message) {
584 xxx_messageInfo_StopModuleRequest.Merge(dst, src)
585}
586func (m *StopModuleRequest) XXX_Size() int {
587 return xxx_messageInfo_StopModuleRequest.Size(m)
588}
589func (m *StopModuleRequest) XXX_DiscardUnknown() {
590 xxx_messageInfo_StopModuleRequest.DiscardUnknown(m)
591}
592
593var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo
594
595func (m *StopModuleRequest) GetModule() string {
596 if m != nil && m.Module != nil {
597 return *m.Module
598 }
599 return ""
600}
601
602func (m *StopModuleRequest) GetVersion() string {
603 if m != nil && m.Version != nil {
604 return *m.Version
605 }
606 return ""
607}
608
609type StopModuleResponse struct {
610 XXX_NoUnkeyedLiteral struct{} `json:"-"`
611 XXX_unrecognized []byte `json:"-"`
612 XXX_sizecache int32 `json:"-"`
613}
614
615func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
616func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
617func (*StopModuleResponse) ProtoMessage() {}
618func (*StopModuleResponse) Descriptor() ([]byte, []int) {
619 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14}
620}
621func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error {
622 return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b)
623}
624func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
625 return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic)
626}
627func (dst *StopModuleResponse) XXX_Merge(src proto.Message) {
628 xxx_messageInfo_StopModuleResponse.Merge(dst, src)
629}
630func (m *StopModuleResponse) XXX_Size() int {
631 return xxx_messageInfo_StopModuleResponse.Size(m)
632}
633func (m *StopModuleResponse) XXX_DiscardUnknown() {
634 xxx_messageInfo_StopModuleResponse.DiscardUnknown(m)
635}
636
637var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo
638
639type GetHostnameRequest struct {
640 Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
641 Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
642 Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
643 XXX_NoUnkeyedLiteral struct{} `json:"-"`
644 XXX_unrecognized []byte `json:"-"`
645 XXX_sizecache int32 `json:"-"`
646}
647
648func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
649func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
650func (*GetHostnameRequest) ProtoMessage() {}
651func (*GetHostnameRequest) Descriptor() ([]byte, []int) {
652 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15}
653}
654func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error {
655 return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b)
656}
657func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
658 return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic)
659}
660func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) {
661 xxx_messageInfo_GetHostnameRequest.Merge(dst, src)
662}
663func (m *GetHostnameRequest) XXX_Size() int {
664 return xxx_messageInfo_GetHostnameRequest.Size(m)
665}
666func (m *GetHostnameRequest) XXX_DiscardUnknown() {
667 xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m)
668}
669
670var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo
671
672func (m *GetHostnameRequest) GetModule() string {
673 if m != nil && m.Module != nil {
674 return *m.Module
675 }
676 return ""
677}
678
679func (m *GetHostnameRequest) GetVersion() string {
680 if m != nil && m.Version != nil {
681 return *m.Version
682 }
683 return ""
684}
685
686func (m *GetHostnameRequest) GetInstance() string {
687 if m != nil && m.Instance != nil {
688 return *m.Instance
689 }
690 return ""
691}
692
693type GetHostnameResponse struct {
694 Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
695 XXX_NoUnkeyedLiteral struct{} `json:"-"`
696 XXX_unrecognized []byte `json:"-"`
697 XXX_sizecache int32 `json:"-"`
698}
699
700func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
701func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
702func (*GetHostnameResponse) ProtoMessage() {}
703func (*GetHostnameResponse) Descriptor() ([]byte, []int) {
704 return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16}
705}
706func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error {
707 return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b)
708}
709func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
710 return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic)
711}
712func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) {
713 xxx_messageInfo_GetHostnameResponse.Merge(dst, src)
714}
715func (m *GetHostnameResponse) XXX_Size() int {
716 return xxx_messageInfo_GetHostnameResponse.Size(m)
717}
718func (m *GetHostnameResponse) XXX_DiscardUnknown() {
719 xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m)
720}
721
722var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo
723
724func (m *GetHostnameResponse) GetHostname() string {
725 if m != nil && m.Hostname != nil {
726 return *m.Hostname
727 }
728 return ""
729}
730
731func init() {
732 proto.RegisterType((*ModulesServiceError)(nil), "appengine.ModulesServiceError")
733 proto.RegisterType((*GetModulesRequest)(nil), "appengine.GetModulesRequest")
734 proto.RegisterType((*GetModulesResponse)(nil), "appengine.GetModulesResponse")
735 proto.RegisterType((*GetVersionsRequest)(nil), "appengine.GetVersionsRequest")
736 proto.RegisterType((*GetVersionsResponse)(nil), "appengine.GetVersionsResponse")
737 proto.RegisterType((*GetDefaultVersionRequest)(nil), "appengine.GetDefaultVersionRequest")
738 proto.RegisterType((*GetDefaultVersionResponse)(nil), "appengine.GetDefaultVersionResponse")
739 proto.RegisterType((*GetNumInstancesRequest)(nil), "appengine.GetNumInstancesRequest")
740 proto.RegisterType((*GetNumInstancesResponse)(nil), "appengine.GetNumInstancesResponse")
741 proto.RegisterType((*SetNumInstancesRequest)(nil), "appengine.SetNumInstancesRequest")
742 proto.RegisterType((*SetNumInstancesResponse)(nil), "appengine.SetNumInstancesResponse")
743 proto.RegisterType((*StartModuleRequest)(nil), "appengine.StartModuleRequest")
744 proto.RegisterType((*StartModuleResponse)(nil), "appengine.StartModuleResponse")
745 proto.RegisterType((*StopModuleRequest)(nil), "appengine.StopModuleRequest")
746 proto.RegisterType((*StopModuleResponse)(nil), "appengine.StopModuleResponse")
747 proto.RegisterType((*GetHostnameRequest)(nil), "appengine.GetHostnameRequest")
748 proto.RegisterType((*GetHostnameResponse)(nil), "appengine.GetHostnameResponse")
749}
750
751func init() {
752 proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a)
753}
754
755var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{
756 // 457 bytes of a gzipped FileDescriptorProto
757 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30,
758 0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c,
759 0x50, 0x2b, 0x90, 0x10, 0xe7, 0xae, 0x35, 0x25, 0xb0, 0xa5, 0x28, 0xce, 0x2a, 0xc4, 0xa5, 0x0a,
760 0xdb, 0x23, 0x8b, 0x94, 0xda, 0xc1, 0x76, 0x77, 0xe4, 0xbf, 0xe0, 0xff, 0x45, 0x4b, 0xed, 0xb6,
761 0x81, 0x4e, 0x45, 0x68, 0xa7, 0xe4, 0x7d, 0xfe, 0xfc, 0x7b, 0x9f, 0x5f, 0xac, 0xc0, 0x59, 0x2e,
762 0x44, 0x5e, 0x62, 0x2f, 0x17, 0x65, 0xc6, 0xf3, 0x9e, 0x90, 0x79, 0x3f, 0xab, 0x2a, 0xe4, 0x79,
763 0xc1, 0xb1, 0x5f, 0x70, 0x8d, 0x92, 0x67, 0x65, 0x7f, 0x2e, 0xae, 0x17, 0x25, 0x2a, 0xfb, 0x9c,
764 0x29, 0x94, 0xb7, 0xc5, 0x15, 0xf6, 0x2a, 0x29, 0xb4, 0x20, 0xde, 0x6a, 0x47, 0xf8, 0xab, 0x05,
765 0xc1, 0xc5, 0xd2, 0xc4, 0x96, 0x1e, 0x2a, 0xa5, 0x90, 0xe1, 0x4f, 0xf0, 0xea, 0x97, 0xa1, 0xb8,
766 0x46, 0xb2, 0x07, 0xce, 0xe4, 0x93, 0xff, 0x88, 0x10, 0x78, 0x1a, 0xc5, 0xd3, 0xc1, 0x79, 0x34,
767 0x9a, 0x5d, 0x4c, 0x46, 0x97, 0xe7, 0xd4, 0x6f, 0x91, 0x00, 0x9e, 0x59, 0x6d, 0x4a, 0x13, 0x16,
768 0x4d, 0x62, 0xdf, 0x21, 0x47, 0xd0, 0xb6, 0x62, 0x14, 0xb3, 0x74, 0x10, 0x0f, 0x29, 0xf3, 0xdd,
769 0x3b, 0x6f, 0x9a, 0x0c, 0x62, 0x16, 0xd1, 0x38, 0x9d, 0xd1, 0x24, 0x99, 0x24, 0xfe, 0x63, 0x72,
770 0x08, 0xfe, 0x65, 0x4c, 0xbf, 0x7c, 0xa6, 0xc3, 0x94, 0x8e, 0x66, 0x2c, 0x1d, 0xa4, 0xd4, 0x7f,
771 0x12, 0x06, 0xd0, 0x1e, 0xa3, 0x36, 0xc9, 0x12, 0xfc, 0xb1, 0x40, 0xa5, 0xc3, 0x57, 0x40, 0x36,
772 0x45, 0x55, 0x09, 0xae, 0x90, 0x74, 0x60, 0x6f, 0x79, 0xcc, 0x6e, 0xeb, 0x85, 0xfb, 0xd2, 0x4b,
773 0x4c, 0x65, 0xdc, 0x53, 0x94, 0xaa, 0x10, 0xdc, 0x32, 0x1a, 0xee, 0xd6, 0x86, 0xbb, 0x0f, 0x41,
774 0xc3, 0x6d, 0xe0, 0x5d, 0xd8, 0xbf, 0x5d, 0x6a, 0x86, 0x6e, 0xcb, 0xf0, 0x0d, 0x74, 0xc7, 0xa8,
775 0x47, 0xf8, 0x3d, 0x5b, 0x94, 0x76, 0xdf, 0xae, 0x26, 0x6f, 0xe1, 0x64, 0xcb, 0x9e, 0x6d, 0xad,
776 0x9c, 0xcd, 0x56, 0x1f, 0xa1, 0x33, 0x46, 0x1d, 0x2f, 0xe6, 0x11, 0x57, 0x3a, 0xe3, 0x57, 0xb8,
777 0xeb, 0x34, 0x9b, 0x2c, 0xa7, 0x5e, 0x58, 0xb1, 0xde, 0xc1, 0xf1, 0x5f, 0x2c, 0x13, 0xe0, 0x39,
778 0x78, 0x85, 0x15, 0xeb, 0x08, 0x6e, 0xb2, 0x16, 0xc2, 0x1b, 0xe8, 0xb0, 0x07, 0x0a, 0xd1, 0xec,
779 0xe4, 0xfe, 0xd9, 0xe9, 0x04, 0x8e, 0xd9, 0xf6, 0x88, 0xe1, 0x7b, 0x20, 0x4c, 0x67, 0xd2, 0xdc,
780 0x81, 0x6d, 0x01, 0x9c, 0xfb, 0x02, 0x34, 0x26, 0x7a, 0x04, 0x41, 0x83, 0x63, 0xf0, 0x14, 0xda,
781 0x4c, 0x8b, 0xea, 0x7e, 0xfa, 0xbf, 0xcd, 0xf8, 0xf0, 0x2e, 0xe5, 0x1a, 0x63, 0xe0, 0xdf, 0xea,
782 0xfb, 0xf8, 0x41, 0x28, 0xcd, 0xb3, 0xf9, 0xff, 0xd3, 0xc9, 0x29, 0x1c, 0xd8, 0x59, 0x75, 0xdd,
783 0x7a, 0x69, 0x55, 0x87, 0xaf, 0xeb, 0x5b, 0xbc, 0xee, 0x61, 0xbe, 0xec, 0x29, 0x1c, 0xdc, 0x18,
784 0xcd, 0x8c, 0x68, 0x55, 0x9f, 0x79, 0x5f, 0xf7, 0xcd, 0x5f, 0xe2, 0x77, 0x00, 0x00, 0x00, 0xff,
785 0xff, 0x6e, 0xbc, 0xe0, 0x61, 0x5c, 0x04, 0x00, 0x00,
786}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
new file mode 100644
index 0000000..d29f006
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
@@ -0,0 +1,80 @@
1syntax = "proto2";
2option go_package = "modules";
3
4package appengine;
5
6message ModulesServiceError {
7 enum ErrorCode {
8 OK = 0;
9 INVALID_MODULE = 1;
10 INVALID_VERSION = 2;
11 INVALID_INSTANCES = 3;
12 TRANSIENT_ERROR = 4;
13 UNEXPECTED_STATE = 5;
14 }
15}
16
17message GetModulesRequest {
18}
19
20message GetModulesResponse {
21 repeated string module = 1;
22}
23
24message GetVersionsRequest {
25 optional string module = 1;
26}
27
28message GetVersionsResponse {
29 repeated string version = 1;
30}
31
32message GetDefaultVersionRequest {
33 optional string module = 1;
34}
35
36message GetDefaultVersionResponse {
37 required string version = 1;
38}
39
40message GetNumInstancesRequest {
41 optional string module = 1;
42 optional string version = 2;
43}
44
45message GetNumInstancesResponse {
46 required int64 instances = 1;
47}
48
49message SetNumInstancesRequest {
50 optional string module = 1;
51 optional string version = 2;
52 required int64 instances = 3;
53}
54
55message SetNumInstancesResponse {}
56
57message StartModuleRequest {
58 required string module = 1;
59 required string version = 2;
60}
61
62message StartModuleResponse {}
63
64message StopModuleRequest {
65 optional string module = 1;
66 optional string version = 2;
67}
68
69message StopModuleResponse {}
70
71message GetHostnameRequest {
72 optional string module = 1;
73 optional string version = 2;
74 optional string instance = 3;
75}
76
77message GetHostnameResponse {
78 required string hostname = 1;
79}
80
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
new file mode 100644
index 0000000..3b94cf0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,56 @@
1// Copyright 2014 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package internal
6
7// This file implements a network dialer that limits the number of concurrent connections.
8// It is only used for API calls.
9
10import (
11 "log"
12 "net"
13 "runtime"
14 "sync"
15 "time"
16)
17
18var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
19
20func limitRelease() {
21 // non-blocking
22 select {
23 case <-limitSem:
24 default:
25 // This should not normally happen.
26 log.Print("appengine: unbalanced limitSem release!")
27 }
28}
29
30func limitDial(network, addr string) (net.Conn, error) {
31 limitSem <- 1
32
33 // Dial with a timeout in case the API host is MIA.
34 // The connection should normally be very fast.
35 conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
36 if err != nil {
37 limitRelease()
38 return nil, err
39 }
40 lc := &limitConn{Conn: conn}
41 runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
42 return lc, nil
43}
44
45type limitConn struct {
46 close sync.Once
47 net.Conn
48}
49
50func (lc *limitConn) Close() error {
51 defer lc.close.Do(func() {
52 limitRelease()
53 runtime.SetFinalizer(lc, nil)
54 })
55 return lc.Conn.Close()
56}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
new file mode 100644
index 0000000..2fdb546
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,40 @@
1#!/bin/bash -e
2#
3# This script rebuilds the generated code for the protocol buffers.
4# To run this you will need protoc and goprotobuf installed;
5# see https://github.com/golang/protobuf for instructions.
6
7PKG=google.golang.org/appengine
8
9function die() {
10 echo 1>&2 $*
11 exit 1
12}
13
14# Sanity check that the right tools are accessible.
15for tool in go protoc protoc-gen-go; do
16 q=$(which $tool) || die "didn't find $tool"
17 echo 1>&2 "$tool: $q"
18done
19
20echo -n 1>&2 "finding package dir... "
21pkgdir=$(go list -f '{{.Dir}}' $PKG)
22echo 1>&2 $pkgdir
23base=$(echo $pkgdir | sed "s,/$PKG\$,,")
24echo 1>&2 "base: $base"
25cd $base
26
27# Run protoc once per package.
28for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
29 echo 1>&2 "* $dir"
30 protoc --go_out=. $dir/*.proto
31done
32
33for f in $(find $PKG/internal -name '*.pb.go'); do
34 # Remove proto.RegisterEnum calls.
35 # These cause duplicate registration panics when these packages
36 # are used on classic App Engine. proto.RegisterEnum only affects
37 # parsing the text format; we don't care about that.
38 # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
39 sed -i '/proto.RegisterEnum/d' $f
40done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
new file mode 100644
index 0000000..8d782a3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -0,0 +1,361 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
3
4package remote_api
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21type RpcError_ErrorCode int32
22
23const (
24 RpcError_UNKNOWN RpcError_ErrorCode = 0
25 RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
26 RpcError_PARSE_ERROR RpcError_ErrorCode = 2
27 RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
28 RpcError_OVER_QUOTA RpcError_ErrorCode = 4
29 RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
30 RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
31 RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
32 RpcError_BAD_REQUEST RpcError_ErrorCode = 8
33 RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
34 RpcError_CANCELLED RpcError_ErrorCode = 10
35 RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
36 RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
37)
38
39var RpcError_ErrorCode_name = map[int32]string{
40 0: "UNKNOWN",
41 1: "CALL_NOT_FOUND",
42 2: "PARSE_ERROR",
43 3: "SECURITY_VIOLATION",
44 4: "OVER_QUOTA",
45 5: "REQUEST_TOO_LARGE",
46 6: "CAPABILITY_DISABLED",
47 7: "FEATURE_DISABLED",
48 8: "BAD_REQUEST",
49 9: "RESPONSE_TOO_LARGE",
50 10: "CANCELLED",
51 11: "REPLAY_ERROR",
52 12: "DEADLINE_EXCEEDED",
53}
54var RpcError_ErrorCode_value = map[string]int32{
55 "UNKNOWN": 0,
56 "CALL_NOT_FOUND": 1,
57 "PARSE_ERROR": 2,
58 "SECURITY_VIOLATION": 3,
59 "OVER_QUOTA": 4,
60 "REQUEST_TOO_LARGE": 5,
61 "CAPABILITY_DISABLED": 6,
62 "FEATURE_DISABLED": 7,
63 "BAD_REQUEST": 8,
64 "RESPONSE_TOO_LARGE": 9,
65 "CANCELLED": 10,
66 "REPLAY_ERROR": 11,
67 "DEADLINE_EXCEEDED": 12,
68}
69
70func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
71 p := new(RpcError_ErrorCode)
72 *p = x
73 return p
74}
75func (x RpcError_ErrorCode) String() string {
76 return proto.EnumName(RpcError_ErrorCode_name, int32(x))
77}
78func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
79 value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
80 if err != nil {
81 return err
82 }
83 *x = RpcError_ErrorCode(value)
84 return nil
85}
86func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) {
87 return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0}
88}
89
90type Request struct {
91 ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
92 Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
93 Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
94 RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
95 XXX_NoUnkeyedLiteral struct{} `json:"-"`
96 XXX_unrecognized []byte `json:"-"`
97 XXX_sizecache int32 `json:"-"`
98}
99
100func (m *Request) Reset() { *m = Request{} }
101func (m *Request) String() string { return proto.CompactTextString(m) }
102func (*Request) ProtoMessage() {}
103func (*Request) Descriptor() ([]byte, []int) {
104 return fileDescriptor_remote_api_1978114ec33a273d, []int{0}
105}
106func (m *Request) XXX_Unmarshal(b []byte) error {
107 return xxx_messageInfo_Request.Unmarshal(m, b)
108}
109func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
110 return xxx_messageInfo_Request.Marshal(b, m, deterministic)
111}
112func (dst *Request) XXX_Merge(src proto.Message) {
113 xxx_messageInfo_Request.Merge(dst, src)
114}
115func (m *Request) XXX_Size() int {
116 return xxx_messageInfo_Request.Size(m)
117}
118func (m *Request) XXX_DiscardUnknown() {
119 xxx_messageInfo_Request.DiscardUnknown(m)
120}
121
122var xxx_messageInfo_Request proto.InternalMessageInfo
123
124func (m *Request) GetServiceName() string {
125 if m != nil && m.ServiceName != nil {
126 return *m.ServiceName
127 }
128 return ""
129}
130
131func (m *Request) GetMethod() string {
132 if m != nil && m.Method != nil {
133 return *m.Method
134 }
135 return ""
136}
137
138func (m *Request) GetRequest() []byte {
139 if m != nil {
140 return m.Request
141 }
142 return nil
143}
144
145func (m *Request) GetRequestId() string {
146 if m != nil && m.RequestId != nil {
147 return *m.RequestId
148 }
149 return ""
150}
151
152type ApplicationError struct {
153 Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
154 Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
155 XXX_NoUnkeyedLiteral struct{} `json:"-"`
156 XXX_unrecognized []byte `json:"-"`
157 XXX_sizecache int32 `json:"-"`
158}
159
160func (m *ApplicationError) Reset() { *m = ApplicationError{} }
161func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
162func (*ApplicationError) ProtoMessage() {}
163func (*ApplicationError) Descriptor() ([]byte, []int) {
164 return fileDescriptor_remote_api_1978114ec33a273d, []int{1}
165}
166func (m *ApplicationError) XXX_Unmarshal(b []byte) error {
167 return xxx_messageInfo_ApplicationError.Unmarshal(m, b)
168}
169func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
170 return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic)
171}
172func (dst *ApplicationError) XXX_Merge(src proto.Message) {
173 xxx_messageInfo_ApplicationError.Merge(dst, src)
174}
175func (m *ApplicationError) XXX_Size() int {
176 return xxx_messageInfo_ApplicationError.Size(m)
177}
178func (m *ApplicationError) XXX_DiscardUnknown() {
179 xxx_messageInfo_ApplicationError.DiscardUnknown(m)
180}
181
182var xxx_messageInfo_ApplicationError proto.InternalMessageInfo
183
184func (m *ApplicationError) GetCode() int32 {
185 if m != nil && m.Code != nil {
186 return *m.Code
187 }
188 return 0
189}
190
191func (m *ApplicationError) GetDetail() string {
192 if m != nil && m.Detail != nil {
193 return *m.Detail
194 }
195 return ""
196}
197
198type RpcError struct {
199 Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
200 Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
201 XXX_NoUnkeyedLiteral struct{} `json:"-"`
202 XXX_unrecognized []byte `json:"-"`
203 XXX_sizecache int32 `json:"-"`
204}
205
206func (m *RpcError) Reset() { *m = RpcError{} }
207func (m *RpcError) String() string { return proto.CompactTextString(m) }
208func (*RpcError) ProtoMessage() {}
209func (*RpcError) Descriptor() ([]byte, []int) {
210 return fileDescriptor_remote_api_1978114ec33a273d, []int{2}
211}
212func (m *RpcError) XXX_Unmarshal(b []byte) error {
213 return xxx_messageInfo_RpcError.Unmarshal(m, b)
214}
215func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
216 return xxx_messageInfo_RpcError.Marshal(b, m, deterministic)
217}
218func (dst *RpcError) XXX_Merge(src proto.Message) {
219 xxx_messageInfo_RpcError.Merge(dst, src)
220}
221func (m *RpcError) XXX_Size() int {
222 return xxx_messageInfo_RpcError.Size(m)
223}
224func (m *RpcError) XXX_DiscardUnknown() {
225 xxx_messageInfo_RpcError.DiscardUnknown(m)
226}
227
228var xxx_messageInfo_RpcError proto.InternalMessageInfo
229
230func (m *RpcError) GetCode() int32 {
231 if m != nil && m.Code != nil {
232 return *m.Code
233 }
234 return 0
235}
236
237func (m *RpcError) GetDetail() string {
238 if m != nil && m.Detail != nil {
239 return *m.Detail
240 }
241 return ""
242}
243
244type Response struct {
245 Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
246 Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
247 ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
248 JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
249 RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
250 XXX_NoUnkeyedLiteral struct{} `json:"-"`
251 XXX_unrecognized []byte `json:"-"`
252 XXX_sizecache int32 `json:"-"`
253}
254
255func (m *Response) Reset() { *m = Response{} }
256func (m *Response) String() string { return proto.CompactTextString(m) }
257func (*Response) ProtoMessage() {}
258func (*Response) Descriptor() ([]byte, []int) {
259 return fileDescriptor_remote_api_1978114ec33a273d, []int{3}
260}
261func (m *Response) XXX_Unmarshal(b []byte) error {
262 return xxx_messageInfo_Response.Unmarshal(m, b)
263}
264func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
265 return xxx_messageInfo_Response.Marshal(b, m, deterministic)
266}
267func (dst *Response) XXX_Merge(src proto.Message) {
268 xxx_messageInfo_Response.Merge(dst, src)
269}
270func (m *Response) XXX_Size() int {
271 return xxx_messageInfo_Response.Size(m)
272}
273func (m *Response) XXX_DiscardUnknown() {
274 xxx_messageInfo_Response.DiscardUnknown(m)
275}
276
277var xxx_messageInfo_Response proto.InternalMessageInfo
278
279func (m *Response) GetResponse() []byte {
280 if m != nil {
281 return m.Response
282 }
283 return nil
284}
285
286func (m *Response) GetException() []byte {
287 if m != nil {
288 return m.Exception
289 }
290 return nil
291}
292
293func (m *Response) GetApplicationError() *ApplicationError {
294 if m != nil {
295 return m.ApplicationError
296 }
297 return nil
298}
299
300func (m *Response) GetJavaException() []byte {
301 if m != nil {
302 return m.JavaException
303 }
304 return nil
305}
306
307func (m *Response) GetRpcError() *RpcError {
308 if m != nil {
309 return m.RpcError
310 }
311 return nil
312}
313
314func init() {
315 proto.RegisterType((*Request)(nil), "remote_api.Request")
316 proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError")
317 proto.RegisterType((*RpcError)(nil), "remote_api.RpcError")
318 proto.RegisterType((*Response)(nil), "remote_api.Response")
319}
320
321func init() {
322 proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d)
323}
324
325var fileDescriptor_remote_api_1978114ec33a273d = []byte{
326 // 531 bytes of a gzipped FileDescriptorProto
327 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40,
328 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42,
329 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e,
330 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c,
331 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2,
332 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa,
333 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a,
334 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98,
335 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6,
336 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca,
337 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60,
338 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9,
339 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a,
340 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba,
341 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6,
342 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86,
343 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf,
344 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21,
345 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f,
346 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53,
347 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2,
348 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f,
349 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3,
350 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0,
351 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef,
352 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64,
353 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b,
354 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5,
355 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c,
356 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf,
357 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7,
358 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e,
359 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f,
360 0x03, 0x00, 0x00,
361}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 0000000..f21763a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
1syntax = "proto2";
2option go_package = "remote_api";
3
4package remote_api;
5
6message Request {
7 required string service_name = 2;
8 required string method = 3;
9 required bytes request = 4;
10 optional string request_id = 5;
11}
12
13message ApplicationError {
14 required int32 code = 1;
15 required string detail = 2;
16}
17
18message RpcError {
19 enum ErrorCode {
20 UNKNOWN = 0;
21 CALL_NOT_FOUND = 1;
22 PARSE_ERROR = 2;
23 SECURITY_VIOLATION = 3;
24 OVER_QUOTA = 4;
25 REQUEST_TOO_LARGE = 5;
26 CAPABILITY_DISABLED = 6;
27 FEATURE_DISABLED = 7;
28 BAD_REQUEST = 8;
29 RESPONSE_TOO_LARGE = 9;
30 CANCELLED = 10;
31 REPLAY_ERROR = 11;
32 DEADLINE_EXCEEDED = 12;
33 }
34 required int32 code = 1;
35 optional string detail = 2;
36}
37
38message Response {
39 optional bytes response = 1;
40 optional bytes exception = 2;
41 optional ApplicationError application_error = 3;
42 optional bytes java_exception = 4;
43 optional RpcError rpc_error = 5;
44}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 0000000..9006ae6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,115 @@
1// Copyright 2014 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package internal
6
7// This file implements hooks for applying datastore transactions.
8
9import (
10 "errors"
11 "reflect"
12
13 "github.com/golang/protobuf/proto"
14 netcontext "golang.org/x/net/context"
15
16 basepb "google.golang.org/appengine/internal/base"
17 pb "google.golang.org/appengine/internal/datastore"
18)
19
20var transactionSetters = make(map[reflect.Type]reflect.Value)
21
22// RegisterTransactionSetter registers a function that sets transaction information
23// in a protocol buffer message. f should be a function with two arguments,
24// the first being a protocol buffer type, and the second being *datastore.Transaction.
25func RegisterTransactionSetter(f interface{}) {
26 v := reflect.ValueOf(f)
27 transactionSetters[v.Type().In(0)] = v
28}
29
30// applyTransaction applies the transaction t to message pb
31// by using the relevant setter passed to RegisterTransactionSetter.
32func applyTransaction(pb proto.Message, t *pb.Transaction) {
33 v := reflect.ValueOf(pb)
34 if f, ok := transactionSetters[v.Type()]; ok {
35 f.Call([]reflect.Value{v, reflect.ValueOf(t)})
36 }
37}
38
39var transactionKey = "used for *Transaction"
40
41func transactionFromContext(ctx netcontext.Context) *transaction {
42 t, _ := ctx.Value(&transactionKey).(*transaction)
43 return t
44}
45
46func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
47 return netcontext.WithValue(ctx, &transactionKey, t)
48}
49
50type transaction struct {
51 transaction pb.Transaction
52 finished bool
53}
54
55var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
56
57func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
58 if transactionFromContext(c) != nil {
59 return nil, errors.New("nested transactions are not supported")
60 }
61
62 // Begin the transaction.
63 t := &transaction{}
64 req := &pb.BeginTransactionRequest{
65 App: proto.String(FullyQualifiedAppID(c)),
66 }
67 if xg {
68 req.AllowMultipleEg = proto.Bool(true)
69 }
70 if previousTransaction != nil {
71 req.PreviousTransaction = previousTransaction
72 }
73 if readOnly {
74 req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum()
75 } else {
76 req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum()
77 }
78 if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
79 return nil, err
80 }
81
82 // Call f, rolling back the transaction if f returns a non-nil error, or panics.
83 // The panic is not recovered.
84 defer func() {
85 if t.finished {
86 return
87 }
88 t.finished = true
89 // Ignore the error return value, since we are already returning a non-nil
90 // error (or we're panicking).
91 Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
92 }()
93 if err := f(withTransaction(c, t)); err != nil {
94 return &t.transaction, err
95 }
96 t.finished = true
97
98 // Commit the transaction.
99 res := &pb.CommitResponse{}
100 err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
101 if ae, ok := err.(*APIError); ok {
102 /* TODO: restore this conditional
103 if appengine.IsDevAppServer() {
104 */
105 // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
106 // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
107 if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
108 return &t.transaction, ErrConcurrentTransaction
109 }
110 if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
111 return &t.transaction, ErrConcurrentTransaction
112 }
113 }
114 return &t.transaction, err
115}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
new file mode 100644
index 0000000..5f72775
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -0,0 +1,527 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
3
4package urlfetch
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21type URLFetchServiceError_ErrorCode int32
22
23const (
24 URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
25 URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
26 URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
27 URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
28 URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
29 URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
30 URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
31 URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
32 URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
33 URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
34 URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
35 URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
36 URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
37)
38
39var URLFetchServiceError_ErrorCode_name = map[int32]string{
40 0: "OK",
41 1: "INVALID_URL",
42 2: "FETCH_ERROR",
43 3: "UNSPECIFIED_ERROR",
44 4: "RESPONSE_TOO_LARGE",
45 5: "DEADLINE_EXCEEDED",
46 6: "SSL_CERTIFICATE_ERROR",
47 7: "DNS_ERROR",
48 8: "CLOSED",
49 9: "INTERNAL_TRANSIENT_ERROR",
50 10: "TOO_MANY_REDIRECTS",
51 11: "MALFORMED_REPLY",
52 12: "CONNECTION_ERROR",
53}
54var URLFetchServiceError_ErrorCode_value = map[string]int32{
55 "OK": 0,
56 "INVALID_URL": 1,
57 "FETCH_ERROR": 2,
58 "UNSPECIFIED_ERROR": 3,
59 "RESPONSE_TOO_LARGE": 4,
60 "DEADLINE_EXCEEDED": 5,
61 "SSL_CERTIFICATE_ERROR": 6,
62 "DNS_ERROR": 7,
63 "CLOSED": 8,
64 "INTERNAL_TRANSIENT_ERROR": 9,
65 "TOO_MANY_REDIRECTS": 10,
66 "MALFORMED_REPLY": 11,
67 "CONNECTION_ERROR": 12,
68}
69
70func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
71 p := new(URLFetchServiceError_ErrorCode)
72 *p = x
73 return p
74}
75func (x URLFetchServiceError_ErrorCode) String() string {
76 return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
77}
78func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
79 value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
80 if err != nil {
81 return err
82 }
83 *x = URLFetchServiceError_ErrorCode(value)
84 return nil
85}
86func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
87 return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0}
88}
89
90type URLFetchRequest_RequestMethod int32
91
92const (
93 URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
94 URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
95 URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
96 URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
97 URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
98 URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
99)
100
101var URLFetchRequest_RequestMethod_name = map[int32]string{
102 1: "GET",
103 2: "POST",
104 3: "HEAD",
105 4: "PUT",
106 5: "DELETE",
107 6: "PATCH",
108}
109var URLFetchRequest_RequestMethod_value = map[string]int32{
110 "GET": 1,
111 "POST": 2,
112 "HEAD": 3,
113 "PUT": 4,
114 "DELETE": 5,
115 "PATCH": 6,
116}
117
118func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
119 p := new(URLFetchRequest_RequestMethod)
120 *p = x
121 return p
122}
123func (x URLFetchRequest_RequestMethod) String() string {
124 return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
125}
126func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
127 value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
128 if err != nil {
129 return err
130 }
131 *x = URLFetchRequest_RequestMethod(value)
132 return nil
133}
134func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) {
135 return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
136}
137
138type URLFetchServiceError struct {
139 XXX_NoUnkeyedLiteral struct{} `json:"-"`
140 XXX_unrecognized []byte `json:"-"`
141 XXX_sizecache int32 `json:"-"`
142}
143
144func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
145func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
146func (*URLFetchServiceError) ProtoMessage() {}
147func (*URLFetchServiceError) Descriptor() ([]byte, []int) {
148 return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0}
149}
150func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error {
151 return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b)
152}
153func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
154 return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic)
155}
156func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) {
157 xxx_messageInfo_URLFetchServiceError.Merge(dst, src)
158}
159func (m *URLFetchServiceError) XXX_Size() int {
160 return xxx_messageInfo_URLFetchServiceError.Size(m)
161}
162func (m *URLFetchServiceError) XXX_DiscardUnknown() {
163 xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m)
164}
165
166var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo
167
168type URLFetchRequest struct {
169 Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
170 Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
171 Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
172 Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
173 FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
174 Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
175 MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
176 XXX_NoUnkeyedLiteral struct{} `json:"-"`
177 XXX_unrecognized []byte `json:"-"`
178 XXX_sizecache int32 `json:"-"`
179}
180
181func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
182func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
183func (*URLFetchRequest) ProtoMessage() {}
184func (*URLFetchRequest) Descriptor() ([]byte, []int) {
185 return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1}
186}
187func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error {
188 return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b)
189}
190func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
191 return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic)
192}
193func (dst *URLFetchRequest) XXX_Merge(src proto.Message) {
194 xxx_messageInfo_URLFetchRequest.Merge(dst, src)
195}
196func (m *URLFetchRequest) XXX_Size() int {
197 return xxx_messageInfo_URLFetchRequest.Size(m)
198}
199func (m *URLFetchRequest) XXX_DiscardUnknown() {
200 xxx_messageInfo_URLFetchRequest.DiscardUnknown(m)
201}
202
203var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo
204
205const Default_URLFetchRequest_FollowRedirects bool = true
206const Default_URLFetchRequest_MustValidateServerCertificate bool = true
207
208func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
209 if m != nil && m.Method != nil {
210 return *m.Method
211 }
212 return URLFetchRequest_GET
213}
214
215func (m *URLFetchRequest) GetUrl() string {
216 if m != nil && m.Url != nil {
217 return *m.Url
218 }
219 return ""
220}
221
222func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
223 if m != nil {
224 return m.Header
225 }
226 return nil
227}
228
229func (m *URLFetchRequest) GetPayload() []byte {
230 if m != nil {
231 return m.Payload
232 }
233 return nil
234}
235
236func (m *URLFetchRequest) GetFollowRedirects() bool {
237 if m != nil && m.FollowRedirects != nil {
238 return *m.FollowRedirects
239 }
240 return Default_URLFetchRequest_FollowRedirects
241}
242
243func (m *URLFetchRequest) GetDeadline() float64 {
244 if m != nil && m.Deadline != nil {
245 return *m.Deadline
246 }
247 return 0
248}
249
250func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
251 if m != nil && m.MustValidateServerCertificate != nil {
252 return *m.MustValidateServerCertificate
253 }
254 return Default_URLFetchRequest_MustValidateServerCertificate
255}
256
257type URLFetchRequest_Header struct {
258 Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
259 Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
260 XXX_NoUnkeyedLiteral struct{} `json:"-"`
261 XXX_unrecognized []byte `json:"-"`
262 XXX_sizecache int32 `json:"-"`
263}
264
265func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
266func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
267func (*URLFetchRequest_Header) ProtoMessage() {}
268func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) {
269 return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
270}
271func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error {
272 return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b)
273}
274func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
275 return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic)
276}
277func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) {
278 xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src)
279}
280func (m *URLFetchRequest_Header) XXX_Size() int {
281 return xxx_messageInfo_URLFetchRequest_Header.Size(m)
282}
283func (m *URLFetchRequest_Header) XXX_DiscardUnknown() {
284 xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m)
285}
286
287var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo
288
289func (m *URLFetchRequest_Header) GetKey() string {
290 if m != nil && m.Key != nil {
291 return *m.Key
292 }
293 return ""
294}
295
296func (m *URLFetchRequest_Header) GetValue() string {
297 if m != nil && m.Value != nil {
298 return *m.Value
299 }
300 return ""
301}
302
303type URLFetchResponse struct {
304 Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
305 StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
306 Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
307 ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
308 ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
309 ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
310 FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
311 ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
312 ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
313 ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
314 XXX_NoUnkeyedLiteral struct{} `json:"-"`
315 XXX_unrecognized []byte `json:"-"`
316 XXX_sizecache int32 `json:"-"`
317}
318
319func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
320func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
321func (*URLFetchResponse) ProtoMessage() {}
322func (*URLFetchResponse) Descriptor() ([]byte, []int) {
323 return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2}
324}
325func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error {
326 return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b)
327}
328func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
329 return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic)
330}
331func (dst *URLFetchResponse) XXX_Merge(src proto.Message) {
332 xxx_messageInfo_URLFetchResponse.Merge(dst, src)
333}
334func (m *URLFetchResponse) XXX_Size() int {
335 return xxx_messageInfo_URLFetchResponse.Size(m)
336}
337func (m *URLFetchResponse) XXX_DiscardUnknown() {
338 xxx_messageInfo_URLFetchResponse.DiscardUnknown(m)
339}
340
341var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo
342
343const Default_URLFetchResponse_ContentWasTruncated bool = false
344const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
345const Default_URLFetchResponse_ApiBytesSent int64 = 0
346const Default_URLFetchResponse_ApiBytesReceived int64 = 0
347
348func (m *URLFetchResponse) GetContent() []byte {
349 if m != nil {
350 return m.Content
351 }
352 return nil
353}
354
355func (m *URLFetchResponse) GetStatusCode() int32 {
356 if m != nil && m.StatusCode != nil {
357 return *m.StatusCode
358 }
359 return 0
360}
361
362func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
363 if m != nil {
364 return m.Header
365 }
366 return nil
367}
368
369func (m *URLFetchResponse) GetContentWasTruncated() bool {
370 if m != nil && m.ContentWasTruncated != nil {
371 return *m.ContentWasTruncated
372 }
373 return Default_URLFetchResponse_ContentWasTruncated
374}
375
376func (m *URLFetchResponse) GetExternalBytesSent() int64 {
377 if m != nil && m.ExternalBytesSent != nil {
378 return *m.ExternalBytesSent
379 }
380 return 0
381}
382
383func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
384 if m != nil && m.ExternalBytesReceived != nil {
385 return *m.ExternalBytesReceived
386 }
387 return 0
388}
389
390func (m *URLFetchResponse) GetFinalUrl() string {
391 if m != nil && m.FinalUrl != nil {
392 return *m.FinalUrl
393 }
394 return ""
395}
396
397func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
398 if m != nil && m.ApiCpuMilliseconds != nil {
399 return *m.ApiCpuMilliseconds
400 }
401 return Default_URLFetchResponse_ApiCpuMilliseconds
402}
403
404func (m *URLFetchResponse) GetApiBytesSent() int64 {
405 if m != nil && m.ApiBytesSent != nil {
406 return *m.ApiBytesSent
407 }
408 return Default_URLFetchResponse_ApiBytesSent
409}
410
411func (m *URLFetchResponse) GetApiBytesReceived() int64 {
412 if m != nil && m.ApiBytesReceived != nil {
413 return *m.ApiBytesReceived
414 }
415 return Default_URLFetchResponse_ApiBytesReceived
416}
417
418type URLFetchResponse_Header struct {
419 Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
420 Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
421 XXX_NoUnkeyedLiteral struct{} `json:"-"`
422 XXX_unrecognized []byte `json:"-"`
423 XXX_sizecache int32 `json:"-"`
424}
425
426func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
427func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
428func (*URLFetchResponse_Header) ProtoMessage() {}
429func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) {
430 return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0}
431}
432func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error {
433 return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b)
434}
435func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
436 return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic)
437}
438func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) {
439 xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src)
440}
441func (m *URLFetchResponse_Header) XXX_Size() int {
442 return xxx_messageInfo_URLFetchResponse_Header.Size(m)
443}
444func (m *URLFetchResponse_Header) XXX_DiscardUnknown() {
445 xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m)
446}
447
448var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo
449
450func (m *URLFetchResponse_Header) GetKey() string {
451 if m != nil && m.Key != nil {
452 return *m.Key
453 }
454 return ""
455}
456
457func (m *URLFetchResponse_Header) GetValue() string {
458 if m != nil && m.Value != nil {
459 return *m.Value
460 }
461 return ""
462}
463
464func init() {
465 proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError")
466 proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest")
467 proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header")
468 proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse")
469 proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header")
470}
471
472func init() {
473 proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced)
474}
475
476var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{
477 // 770 bytes of a gzipped FileDescriptorProto
478 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54,
479 0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29,
480 0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e,
481 0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d,
482 0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b,
483 0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27,
484 0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92,
485 0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7,
486 0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17,
487 0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec,
488 0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c,
489 0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a,
490 0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01,
491 0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14,
492 0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f,
493 0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07,
494 0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87,
495 0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a,
496 0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a,
497 0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37,
498 0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc,
499 0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde,
500 0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71,
501 0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17,
502 0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea,
503 0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4,
504 0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6,
505 0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96,
506 0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d,
507 0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d,
508 0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb,
509 0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad,
510 0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86,
511 0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20,
512 0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e,
513 0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f,
514 0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21,
515 0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c,
516 0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b,
517 0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6,
518 0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02,
519 0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b,
520 0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9,
521 0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e,
522 0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97,
523 0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3,
524 0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8,
525 0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05,
526 0x00, 0x00,
527}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 0000000..f695edf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
1syntax = "proto2";
2option go_package = "urlfetch";
3
4package appengine;
5
6message URLFetchServiceError {
7 enum ErrorCode {
8 OK = 0;
9 INVALID_URL = 1;
10 FETCH_ERROR = 2;
11 UNSPECIFIED_ERROR = 3;
12 RESPONSE_TOO_LARGE = 4;
13 DEADLINE_EXCEEDED = 5;
14 SSL_CERTIFICATE_ERROR = 6;
15 DNS_ERROR = 7;
16 CLOSED = 8;
17 INTERNAL_TRANSIENT_ERROR = 9;
18 TOO_MANY_REDIRECTS = 10;
19 MALFORMED_REPLY = 11;
20 CONNECTION_ERROR = 12;
21 }
22}
23
24message URLFetchRequest {
25 enum RequestMethod {
26 GET = 1;
27 POST = 2;
28 HEAD = 3;
29 PUT = 4;
30 DELETE = 5;
31 PATCH = 6;
32 }
33 required RequestMethod Method = 1;
34 required string Url = 2;
35 repeated group Header = 3 {
36 required string Key = 4;
37 required string Value = 5;
38 }
39 optional bytes Payload = 6 [ctype=CORD];
40
41 optional bool FollowRedirects = 7 [default=true];
42
43 optional double Deadline = 8;
44
45 optional bool MustValidateServerCertificate = 9 [default=true];
46}
47
48message URLFetchResponse {
49 optional bytes Content = 1;
50 required int32 StatusCode = 2;
51 repeated group Header = 3 {
52 required string Key = 4;
53 required string Value = 5;
54 }
55 optional bool ContentWasTruncated = 6 [default=false];
56 optional int64 ExternalBytesSent = 7;
57 optional int64 ExternalBytesReceived = 8;
58
59 optional string FinalUrl = 9;
60
61 optional int64 ApiCpuMilliseconds = 10 [default=0];
62 optional int64 ApiBytesSent = 11 [default=0];
63 optional int64 ApiBytesReceived = 12 [default=0];
64}
diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
new file mode 100644
index 0000000..21860ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/namespace.go
@@ -0,0 +1,25 @@
1// Copyright 2012 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package appengine
6
7import (
8 "fmt"
9 "regexp"
10
11 "golang.org/x/net/context"
12
13 "google.golang.org/appengine/internal"
14)
15
16// Namespace returns a replacement context that operates within the given namespace.
17func Namespace(c context.Context, namespace string) (context.Context, error) {
18 if !validNamespace.MatchString(namespace) {
19 return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
20 }
21 return internal.NamespacedContext(c, namespace), nil
22}
23
24// validNamespace matches valid namespace names.
25var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
new file mode 100644
index 0000000..05642a9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/timeout.go
@@ -0,0 +1,20 @@
1// Copyright 2013 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5package appengine
6
7import "golang.org/x/net/context"
8
9// IsTimeoutError reports whether err is a timeout error.
10func IsTimeoutError(err error) bool {
11 if err == context.DeadlineExceeded {
12 return true
13 }
14 if t, ok := err.(interface {
15 IsTimeout() bool
16 }); ok {
17 return t.IsTimeout()
18 }
19 return false
20}
diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh
new file mode 100644
index 0000000..785b62f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/travis_install.sh
@@ -0,0 +1,18 @@
1#!/bin/bash
2set -e
3
4if [[ $GO111MODULE == "on" ]]; then
5 go get .
6else
7 go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine)
8fi
9
10if [[ $GOAPP == "true" ]]; then
11 mkdir /tmp/sdk
12 curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip"
13 unzip -q /tmp/sdk.zip -d /tmp/sdk
14 # NOTE: Set the following env vars in the test script:
15 # export PATH="$PATH:/tmp/sdk/go_appengine"
16 # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py
17fi
18
diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh
new file mode 100644
index 0000000..d4390f0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/travis_test.sh
@@ -0,0 +1,12 @@
1#!/bin/bash
2set -e
3
4go version
5go test -v google.golang.org/appengine/...
6go test -v -race google.golang.org/appengine/...
7if [[ $GOAPP == "true" ]]; then
8 export PATH="$PATH:/tmp/sdk/go_appengine"
9 export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py
10 goapp version
11 goapp test -v google.golang.org/appengine/...
12fi
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 0000000..6ffe1e6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,210 @@
1// Copyright 2011 Google Inc. All rights reserved.
2// Use of this source code is governed by the Apache 2.0
3// license that can be found in the LICENSE file.
4
5// Package urlfetch provides an http.RoundTripper implementation
6// for fetching URLs via App Engine's urlfetch service.
7package urlfetch // import "google.golang.org/appengine/urlfetch"
8
9import (
10 "errors"
11 "fmt"
12 "io"
13 "io/ioutil"
14 "net/http"
15 "net/url"
16 "strconv"
17 "strings"
18 "time"
19
20 "github.com/golang/protobuf/proto"
21 "golang.org/x/net/context"
22
23 "google.golang.org/appengine/internal"
24 pb "google.golang.org/appengine/internal/urlfetch"
25)
26
27// Transport is an implementation of http.RoundTripper for
28// App Engine. Users should generally create an http.Client using
29// this transport and use the Client rather than using this transport
30// directly.
31type Transport struct {
32 Context context.Context
33
34 // Controls whether the application checks the validity of SSL certificates
35 // over HTTPS connections. A value of false (the default) instructs the
36 // application to send a request to the server only if the certificate is
37 // valid and signed by a trusted certificate authority (CA), and also
38 // includes a hostname that matches the certificate. A value of true
39 // instructs the application to perform no certificate validation.
40 AllowInvalidServerCertificate bool
41}
42
43// Verify statically that *Transport implements http.RoundTripper.
44var _ http.RoundTripper = (*Transport)(nil)
45
46// Client returns an *http.Client using a default urlfetch Transport. This
47// client will have the default deadline of 5 seconds, and will check the
48// validity of SSL certificates.
49//
50// Any deadline of the provided context will be used for requests through this client;
51// if the client does not have a deadline then a 5 second default is used.
52func Client(ctx context.Context) *http.Client {
53 return &http.Client{
54 Transport: &Transport{
55 Context: ctx,
56 },
57 }
58}
59
60type bodyReader struct {
61 content []byte
62 truncated bool
63 closed bool
64}
65
66// ErrTruncatedBody is the error returned after the final Read() from a
67// response's Body if the body has been truncated by App Engine's proxy.
68var ErrTruncatedBody = errors.New("urlfetch: truncated body")
69
70func statusCodeToText(code int) string {
71 if t := http.StatusText(code); t != "" {
72 return t
73 }
74 return strconv.Itoa(code)
75}
76
77func (br *bodyReader) Read(p []byte) (n int, err error) {
78 if br.closed {
79 if br.truncated {
80 return 0, ErrTruncatedBody
81 }
82 return 0, io.EOF
83 }
84 n = copy(p, br.content)
85 if n > 0 {
86 br.content = br.content[n:]
87 return
88 }
89 if br.truncated {
90 br.closed = true
91 return 0, ErrTruncatedBody
92 }
93 return 0, io.EOF
94}
95
96func (br *bodyReader) Close() error {
97 br.closed = true
98 br.content = nil
99 return nil
100}
101
102// A map of the URL Fetch-accepted methods that take a request body.
103var methodAcceptsRequestBody = map[string]bool{
104 "POST": true,
105 "PUT": true,
106 "PATCH": true,
107}
108
109// urlString returns a valid string given a URL. This function is necessary because
110// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
111// See http://code.google.com/p/go/issues/detail?id=4860.
112func urlString(u *url.URL) string {
113 if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
114 return u.String()
115 }
116 aux := *u
117 aux.Opaque = "//" + aux.Host + aux.Opaque
118 return aux.String()
119}
120
121// RoundTrip issues a single HTTP request and returns its response. Per the
122// http.RoundTripper interface, RoundTrip only returns an error if there
123// was an unsupported request or the URL Fetch proxy fails.
124// Note that HTTP response codes such as 5xx, 403, 404, etc are not
125// errors as far as the transport is concerned and will be returned
126// with err set to nil.
127func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
128 methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
129 if !ok {
130 return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
131 }
132
133 method := pb.URLFetchRequest_RequestMethod(methNum)
134
135 freq := &pb.URLFetchRequest{
136 Method: &method,
137 Url: proto.String(urlString(req.URL)),
138 FollowRedirects: proto.Bool(false), // http.Client's responsibility
139 MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
140 }
141 if deadline, ok := t.Context.Deadline(); ok {
142 freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
143 }
144
145 for k, vals := range req.Header {
146 for _, val := range vals {
147 freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
148 Key: proto.String(k),
149 Value: proto.String(val),
150 })
151 }
152 }
153 if methodAcceptsRequestBody[req.Method] && req.Body != nil {
154 // Avoid a []byte copy if req.Body has a Bytes method.
155 switch b := req.Body.(type) {
156 case interface {
157 Bytes() []byte
158 }:
159 freq.Payload = b.Bytes()
160 default:
161 freq.Payload, err = ioutil.ReadAll(req.Body)
162 if err != nil {
163 return nil, err
164 }
165 }
166 }
167
168 fres := &pb.URLFetchResponse{}
169 if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
170 return nil, err
171 }
172
173 res = &http.Response{}
174 res.StatusCode = int(*fres.StatusCode)
175 res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
176 res.Header = make(http.Header)
177 res.Request = req
178
179 // Faked:
180 res.ProtoMajor = 1
181 res.ProtoMinor = 1
182 res.Proto = "HTTP/1.1"
183 res.Close = true
184
185 for _, h := range fres.Header {
186 hkey := http.CanonicalHeaderKey(*h.Key)
187 hval := *h.Value
188 if hkey == "Content-Length" {
189 // Will get filled in below for all but HEAD requests.
190 if req.Method == "HEAD" {
191 res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
192 }
193 continue
194 }
195 res.Header.Add(hkey, hval)
196 }
197
198 if req.Method != "HEAD" {
199 res.ContentLength = int64(len(fres.Content))
200 }
201
202 truncated := fres.GetContentWasTruncated()
203 res.Body = &bodyReader{content: fres.Content, truncated: truncated}
204 return
205}
206
207func init() {
208 internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
209 internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
210}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
new file mode 100644
index 0000000..9521b50
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
@@ -0,0 +1,54 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/api/annotations.proto
3
4package annotations // import "google.golang.org/genproto/googleapis/api/annotations"
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
10
11// Reference imports to suppress errors if they are not otherwise used.
12var _ = proto.Marshal
13var _ = fmt.Errorf
14var _ = math.Inf
15
16// This is a compile-time assertion to ensure that this generated file
17// is compatible with the proto package it is being compiled against.
18// A compilation error at this line likely means your copy of the
19// proto package needs to be updated.
20const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
21
22var E_Http = &proto.ExtensionDesc{
23 ExtendedType: (*descriptor.MethodOptions)(nil),
24 ExtensionType: (*HttpRule)(nil),
25 Field: 72295728,
26 Name: "google.api.http",
27 Tag: "bytes,72295728,opt,name=http",
28 Filename: "google/api/annotations.proto",
29}
30
31func init() {
32 proto.RegisterExtension(E_Http)
33}
34
35func init() {
36 proto.RegisterFile("google/api/annotations.proto", fileDescriptor_annotations_55609bb51d80951d)
37}
38
39var fileDescriptor_annotations_55609bb51d80951d = []byte{
40 // 208 bytes of a gzipped FileDescriptorProto
41 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f,
42 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc,
43 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64,
44 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79,
45 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15,
46 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53,
47 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51,
48 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a,
49 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08,
50 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5,
51 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64,
52 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d,
53 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00,
54}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
new file mode 100644
index 0000000..1a8a27b
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
@@ -0,0 +1,688 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/api/http.proto
3
4package annotations // import "google.golang.org/genproto/googleapis/api/annotations"
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21// Defines the HTTP configuration for an API service. It contains a list of
22// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
23// to one or more HTTP REST API methods.
24type Http struct {
25 // A list of HTTP configuration rules that apply to individual API methods.
26 //
27 // **NOTE:** All service configuration rules follow "last one wins" order.
28 Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
29 // When set to true, URL path parmeters will be fully URI-decoded except in
30 // cases of single segment matches in reserved expansion, where "%2F" will be
31 // left encoded.
32 //
33 // The default behavior is to not decode RFC 6570 reserved characters in multi
34 // segment matches.
35 FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
36 XXX_NoUnkeyedLiteral struct{} `json:"-"`
37 XXX_unrecognized []byte `json:"-"`
38 XXX_sizecache int32 `json:"-"`
39}
40
41func (m *Http) Reset() { *m = Http{} }
42func (m *Http) String() string { return proto.CompactTextString(m) }
43func (*Http) ProtoMessage() {}
44func (*Http) Descriptor() ([]byte, []int) {
45 return fileDescriptor_http_e457621dddd7365b, []int{0}
46}
47func (m *Http) XXX_Unmarshal(b []byte) error {
48 return xxx_messageInfo_Http.Unmarshal(m, b)
49}
50func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
51 return xxx_messageInfo_Http.Marshal(b, m, deterministic)
52}
53func (dst *Http) XXX_Merge(src proto.Message) {
54 xxx_messageInfo_Http.Merge(dst, src)
55}
56func (m *Http) XXX_Size() int {
57 return xxx_messageInfo_Http.Size(m)
58}
59func (m *Http) XXX_DiscardUnknown() {
60 xxx_messageInfo_Http.DiscardUnknown(m)
61}
62
63var xxx_messageInfo_Http proto.InternalMessageInfo
64
65func (m *Http) GetRules() []*HttpRule {
66 if m != nil {
67 return m.Rules
68 }
69 return nil
70}
71
72func (m *Http) GetFullyDecodeReservedExpansion() bool {
73 if m != nil {
74 return m.FullyDecodeReservedExpansion
75 }
76 return false
77}
78
79// `HttpRule` defines the mapping of an RPC method to one or more HTTP
80// REST API methods. The mapping specifies how different portions of the RPC
81// request message are mapped to URL path, URL query parameters, and
82// HTTP request body. The mapping is typically specified as an
83// `google.api.http` annotation on the RPC method,
84// see "google/api/annotations.proto" for details.
85//
86// The mapping consists of a field specifying the path template and
87// method kind. The path template can refer to fields in the request
88// message, as in the example below which describes a REST GET
89// operation on a resource collection of messages:
90//
91//
92// service Messaging {
93// rpc GetMessage(GetMessageRequest) returns (Message) {
94// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
95// }
96// }
97// message GetMessageRequest {
98// message SubMessage {
99// string subfield = 1;
100// }
101// string message_id = 1; // mapped to the URL
102// SubMessage sub = 2; // `sub.subfield` is url-mapped
103// }
104// message Message {
105// string text = 1; // content of the resource
106// }
107//
108// The same http annotation can alternatively be expressed inside the
109// `GRPC API Configuration` YAML file.
110//
111// http:
112// rules:
113// - selector: <proto_package_name>.Messaging.GetMessage
114// get: /v1/messages/{message_id}/{sub.subfield}
115//
116// This definition enables an automatic, bidrectional mapping of HTTP
117// JSON to RPC. Example:
118//
119// HTTP | RPC
120// -----|-----
121// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
122//
123// In general, not only fields but also field paths can be referenced
124// from a path pattern. Fields mapped to the path pattern cannot be
125// repeated and must have a primitive (non-message) type.
126//
127// Any fields in the request message which are not bound by the path
128// pattern automatically become (optional) HTTP query
129// parameters. Assume the following definition of the request message:
130//
131//
132// service Messaging {
133// rpc GetMessage(GetMessageRequest) returns (Message) {
134// option (google.api.http).get = "/v1/messages/{message_id}";
135// }
136// }
137// message GetMessageRequest {
138// message SubMessage {
139// string subfield = 1;
140// }
141// string message_id = 1; // mapped to the URL
142// int64 revision = 2; // becomes a parameter
143// SubMessage sub = 3; // `sub.subfield` becomes a parameter
144// }
145//
146//
147// This enables a HTTP JSON to RPC mapping as below:
148//
149// HTTP | RPC
150// -----|-----
151// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
152//
153// Note that fields which are mapped to HTTP parameters must have a
154// primitive type or a repeated primitive type. Message types are not
155// allowed. In the case of a repeated type, the parameter can be
156// repeated in the URL, as in `...?param=A&param=B`.
157//
158// For HTTP method kinds which allow a request body, the `body` field
159// specifies the mapping. Consider a REST update method on the
160// message resource collection:
161//
162//
163// service Messaging {
164// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
165// option (google.api.http) = {
166// put: "/v1/messages/{message_id}"
167// body: "message"
168// };
169// }
170// }
171// message UpdateMessageRequest {
172// string message_id = 1; // mapped to the URL
173// Message message = 2; // mapped to the body
174// }
175//
176//
177// The following HTTP JSON to RPC mapping is enabled, where the
178// representation of the JSON in the request body is determined by
179// protos JSON encoding:
180//
181// HTTP | RPC
182// -----|-----
183// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
184//
185// The special name `*` can be used in the body mapping to define that
186// every field not bound by the path template should be mapped to the
187// request body. This enables the following alternative definition of
188// the update method:
189//
190// service Messaging {
191// rpc UpdateMessage(Message) returns (Message) {
192// option (google.api.http) = {
193// put: "/v1/messages/{message_id}"
194// body: "*"
195// };
196// }
197// }
198// message Message {
199// string message_id = 1;
200// string text = 2;
201// }
202//
203//
204// The following HTTP JSON to RPC mapping is enabled:
205//
206// HTTP | RPC
207// -----|-----
208// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
209//
210// Note that when using `*` in the body mapping, it is not possible to
211// have HTTP parameters, as all fields not bound by the path end in
212// the body. This makes this option more rarely used in practice of
213// defining REST APIs. The common usage of `*` is in custom methods
214// which don't use the URL at all for transferring data.
215//
216// It is possible to define multiple HTTP methods for one RPC by using
217// the `additional_bindings` option. Example:
218//
219// service Messaging {
220// rpc GetMessage(GetMessageRequest) returns (Message) {
221// option (google.api.http) = {
222// get: "/v1/messages/{message_id}"
223// additional_bindings {
224// get: "/v1/users/{user_id}/messages/{message_id}"
225// }
226// };
227// }
228// }
229// message GetMessageRequest {
230// string message_id = 1;
231// string user_id = 2;
232// }
233//
234//
235// This enables the following two alternative HTTP JSON to RPC
236// mappings:
237//
238// HTTP | RPC
239// -----|-----
240// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
241// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
242//
243// # Rules for HTTP mapping
244//
245// The rules for mapping HTTP path, query parameters, and body fields
246// to the request message are as follows:
247//
248// 1. The `body` field specifies either `*` or a field path, or is
249// omitted. If omitted, it indicates there is no HTTP request body.
250// 2. Leaf fields (recursive expansion of nested messages in the
251// request) can be classified into three types:
252// (a) Matched in the URL template.
253// (b) Covered by body (if body is `*`, everything except (a) fields;
254// else everything under the body field)
255// (c) All other fields.
256// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
257// 4. Any body sent with an HTTP request can contain only (b) fields.
258//
259// The syntax of the path template is as follows:
260//
261// Template = "/" Segments [ Verb ] ;
262// Segments = Segment { "/" Segment } ;
263// Segment = "*" | "**" | LITERAL | Variable ;
264// Variable = "{" FieldPath [ "=" Segments ] "}" ;
265// FieldPath = IDENT { "." IDENT } ;
266// Verb = ":" LITERAL ;
267//
268// The syntax `*` matches a single path segment. The syntax `**` matches zero
269// or more path segments, which must be the last part of the path except the
270// `Verb`. The syntax `LITERAL` matches literal text in the path.
271//
272// The syntax `Variable` matches part of the URL path as specified by its
273// template. A variable template must not contain other variables. If a variable
274// matches a single path segment, its template may be omitted, e.g. `{var}`
275// is equivalent to `{var=*}`.
276//
277// If a variable contains exactly one path segment, such as `"{var}"` or
278// `"{var=*}"`, when such a variable is expanded into a URL path, all characters
279// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the
280// Discovery Document as `{var}`.
281//
282// If a variable contains one or more path segments, such as `"{var=foo/*}"`
283// or `"{var=**}"`, when such a variable is expanded into a URL path, all
284// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables
285// show up in the Discovery Document as `{+var}`.
286//
287// NOTE: While the single segment variable matches the semantics of
288// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2
289// Simple String Expansion, the multi segment variable **does not** match
290// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion
291// does not expand special characters like `?` and `#`, which would lead
292// to invalid URLs.
293//
294// NOTE: the field paths in variables and in the `body` must not refer to
295// repeated fields or map fields.
296type HttpRule struct {
297 // Selects methods to which this rule applies.
298 //
299 // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
300 Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
301 // Determines the URL pattern is matched by this rules. This pattern can be
302 // used with any of the {get|put|post|delete|patch} methods. A custom method
303 // can be defined using the 'custom' field.
304 //
305 // Types that are valid to be assigned to Pattern:
306 // *HttpRule_Get
307 // *HttpRule_Put
308 // *HttpRule_Post
309 // *HttpRule_Delete
310 // *HttpRule_Patch
311 // *HttpRule_Custom
312 Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
313 // The name of the request field whose value is mapped to the HTTP body, or
314 // `*` for mapping all fields not captured by the path pattern to the HTTP
315 // body. NOTE: the referred field must not be a repeated field and must be
316 // present at the top-level of request message type.
317 Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
318 // Optional. The name of the response field whose value is mapped to the HTTP
319 // body of response. Other response fields are ignored. When
320 // not set, the response message will be used as HTTP body of response.
321 ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
322 // Additional HTTP bindings for the selector. Nested bindings must
323 // not contain an `additional_bindings` field themselves (that is,
324 // the nesting may only be one level deep).
325 AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
326 XXX_NoUnkeyedLiteral struct{} `json:"-"`
327 XXX_unrecognized []byte `json:"-"`
328 XXX_sizecache int32 `json:"-"`
329}
330
331func (m *HttpRule) Reset() { *m = HttpRule{} }
332func (m *HttpRule) String() string { return proto.CompactTextString(m) }
333func (*HttpRule) ProtoMessage() {}
334func (*HttpRule) Descriptor() ([]byte, []int) {
335 return fileDescriptor_http_e457621dddd7365b, []int{1}
336}
337func (m *HttpRule) XXX_Unmarshal(b []byte) error {
338 return xxx_messageInfo_HttpRule.Unmarshal(m, b)
339}
340func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
341 return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic)
342}
343func (dst *HttpRule) XXX_Merge(src proto.Message) {
344 xxx_messageInfo_HttpRule.Merge(dst, src)
345}
346func (m *HttpRule) XXX_Size() int {
347 return xxx_messageInfo_HttpRule.Size(m)
348}
349func (m *HttpRule) XXX_DiscardUnknown() {
350 xxx_messageInfo_HttpRule.DiscardUnknown(m)
351}
352
353var xxx_messageInfo_HttpRule proto.InternalMessageInfo
354
355func (m *HttpRule) GetSelector() string {
356 if m != nil {
357 return m.Selector
358 }
359 return ""
360}
361
362type isHttpRule_Pattern interface {
363 isHttpRule_Pattern()
364}
365
366type HttpRule_Get struct {
367 Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"`
368}
369
370type HttpRule_Put struct {
371 Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"`
372}
373
374type HttpRule_Post struct {
375 Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"`
376}
377
378type HttpRule_Delete struct {
379 Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"`
380}
381
382type HttpRule_Patch struct {
383 Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"`
384}
385
386type HttpRule_Custom struct {
387 Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"`
388}
389
390func (*HttpRule_Get) isHttpRule_Pattern() {}
391
392func (*HttpRule_Put) isHttpRule_Pattern() {}
393
394func (*HttpRule_Post) isHttpRule_Pattern() {}
395
396func (*HttpRule_Delete) isHttpRule_Pattern() {}
397
398func (*HttpRule_Patch) isHttpRule_Pattern() {}
399
400func (*HttpRule_Custom) isHttpRule_Pattern() {}
401
402func (m *HttpRule) GetPattern() isHttpRule_Pattern {
403 if m != nil {
404 return m.Pattern
405 }
406 return nil
407}
408
409func (m *HttpRule) GetGet() string {
410 if x, ok := m.GetPattern().(*HttpRule_Get); ok {
411 return x.Get
412 }
413 return ""
414}
415
416func (m *HttpRule) GetPut() string {
417 if x, ok := m.GetPattern().(*HttpRule_Put); ok {
418 return x.Put
419 }
420 return ""
421}
422
423func (m *HttpRule) GetPost() string {
424 if x, ok := m.GetPattern().(*HttpRule_Post); ok {
425 return x.Post
426 }
427 return ""
428}
429
430func (m *HttpRule) GetDelete() string {
431 if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
432 return x.Delete
433 }
434 return ""
435}
436
437func (m *HttpRule) GetPatch() string {
438 if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
439 return x.Patch
440 }
441 return ""
442}
443
444func (m *HttpRule) GetCustom() *CustomHttpPattern {
445 if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
446 return x.Custom
447 }
448 return nil
449}
450
451func (m *HttpRule) GetBody() string {
452 if m != nil {
453 return m.Body
454 }
455 return ""
456}
457
458func (m *HttpRule) GetResponseBody() string {
459 if m != nil {
460 return m.ResponseBody
461 }
462 return ""
463}
464
465func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
466 if m != nil {
467 return m.AdditionalBindings
468 }
469 return nil
470}
471
472// XXX_OneofFuncs is for the internal use of the proto package.
473func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
474 return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{
475 (*HttpRule_Get)(nil),
476 (*HttpRule_Put)(nil),
477 (*HttpRule_Post)(nil),
478 (*HttpRule_Delete)(nil),
479 (*HttpRule_Patch)(nil),
480 (*HttpRule_Custom)(nil),
481 }
482}
483
484func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
485 m := msg.(*HttpRule)
486 // pattern
487 switch x := m.Pattern.(type) {
488 case *HttpRule_Get:
489 b.EncodeVarint(2<<3 | proto.WireBytes)
490 b.EncodeStringBytes(x.Get)
491 case *HttpRule_Put:
492 b.EncodeVarint(3<<3 | proto.WireBytes)
493 b.EncodeStringBytes(x.Put)
494 case *HttpRule_Post:
495 b.EncodeVarint(4<<3 | proto.WireBytes)
496 b.EncodeStringBytes(x.Post)
497 case *HttpRule_Delete:
498 b.EncodeVarint(5<<3 | proto.WireBytes)
499 b.EncodeStringBytes(x.Delete)
500 case *HttpRule_Patch:
501 b.EncodeVarint(6<<3 | proto.WireBytes)
502 b.EncodeStringBytes(x.Patch)
503 case *HttpRule_Custom:
504 b.EncodeVarint(8<<3 | proto.WireBytes)
505 if err := b.EncodeMessage(x.Custom); err != nil {
506 return err
507 }
508 case nil:
509 default:
510 return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x)
511 }
512 return nil
513}
514
515func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
516 m := msg.(*HttpRule)
517 switch tag {
518 case 2: // pattern.get
519 if wire != proto.WireBytes {
520 return true, proto.ErrInternalBadWireType
521 }
522 x, err := b.DecodeStringBytes()
523 m.Pattern = &HttpRule_Get{x}
524 return true, err
525 case 3: // pattern.put
526 if wire != proto.WireBytes {
527 return true, proto.ErrInternalBadWireType
528 }
529 x, err := b.DecodeStringBytes()
530 m.Pattern = &HttpRule_Put{x}
531 return true, err
532 case 4: // pattern.post
533 if wire != proto.WireBytes {
534 return true, proto.ErrInternalBadWireType
535 }
536 x, err := b.DecodeStringBytes()
537 m.Pattern = &HttpRule_Post{x}
538 return true, err
539 case 5: // pattern.delete
540 if wire != proto.WireBytes {
541 return true, proto.ErrInternalBadWireType
542 }
543 x, err := b.DecodeStringBytes()
544 m.Pattern = &HttpRule_Delete{x}
545 return true, err
546 case 6: // pattern.patch
547 if wire != proto.WireBytes {
548 return true, proto.ErrInternalBadWireType
549 }
550 x, err := b.DecodeStringBytes()
551 m.Pattern = &HttpRule_Patch{x}
552 return true, err
553 case 8: // pattern.custom
554 if wire != proto.WireBytes {
555 return true, proto.ErrInternalBadWireType
556 }
557 msg := new(CustomHttpPattern)
558 err := b.DecodeMessage(msg)
559 m.Pattern = &HttpRule_Custom{msg}
560 return true, err
561 default:
562 return false, nil
563 }
564}
565
566func _HttpRule_OneofSizer(msg proto.Message) (n int) {
567 m := msg.(*HttpRule)
568 // pattern
569 switch x := m.Pattern.(type) {
570 case *HttpRule_Get:
571 n += 1 // tag and wire
572 n += proto.SizeVarint(uint64(len(x.Get)))
573 n += len(x.Get)
574 case *HttpRule_Put:
575 n += 1 // tag and wire
576 n += proto.SizeVarint(uint64(len(x.Put)))
577 n += len(x.Put)
578 case *HttpRule_Post:
579 n += 1 // tag and wire
580 n += proto.SizeVarint(uint64(len(x.Post)))
581 n += len(x.Post)
582 case *HttpRule_Delete:
583 n += 1 // tag and wire
584 n += proto.SizeVarint(uint64(len(x.Delete)))
585 n += len(x.Delete)
586 case *HttpRule_Patch:
587 n += 1 // tag and wire
588 n += proto.SizeVarint(uint64(len(x.Patch)))
589 n += len(x.Patch)
590 case *HttpRule_Custom:
591 s := proto.Size(x.Custom)
592 n += 1 // tag and wire
593 n += proto.SizeVarint(uint64(s))
594 n += s
595 case nil:
596 default:
597 panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
598 }
599 return n
600}
601
602// A custom pattern is used for defining custom HTTP verb.
603type CustomHttpPattern struct {
604 // The name of this custom HTTP verb.
605 Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
606 // The path matched by this custom verb.
607 Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
608 XXX_NoUnkeyedLiteral struct{} `json:"-"`
609 XXX_unrecognized []byte `json:"-"`
610 XXX_sizecache int32 `json:"-"`
611}
612
613func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
614func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
615func (*CustomHttpPattern) ProtoMessage() {}
616func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
617 return fileDescriptor_http_e457621dddd7365b, []int{2}
618}
619func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error {
620 return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b)
621}
622func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
623 return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic)
624}
625func (dst *CustomHttpPattern) XXX_Merge(src proto.Message) {
626 xxx_messageInfo_CustomHttpPattern.Merge(dst, src)
627}
628func (m *CustomHttpPattern) XXX_Size() int {
629 return xxx_messageInfo_CustomHttpPattern.Size(m)
630}
631func (m *CustomHttpPattern) XXX_DiscardUnknown() {
632 xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m)
633}
634
635var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo
636
637func (m *CustomHttpPattern) GetKind() string {
638 if m != nil {
639 return m.Kind
640 }
641 return ""
642}
643
644func (m *CustomHttpPattern) GetPath() string {
645 if m != nil {
646 return m.Path
647 }
648 return ""
649}
650
651func init() {
652 proto.RegisterType((*Http)(nil), "google.api.Http")
653 proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule")
654 proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern")
655}
656
657func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor_http_e457621dddd7365b) }
658
659var fileDescriptor_http_e457621dddd7365b = []byte{
660 // 419 bytes of a gzipped FileDescriptorProto
661 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30,
662 0x10, 0x86, 0x49, 0x9b, 0x76, 0xdb, 0xe9, 0x82, 0x84, 0x59, 0x90, 0x85, 0x40, 0x54, 0xe5, 0x52,
663 0x71, 0x48, 0xa5, 0xe5, 0xc0, 0x61, 0x4f, 0x1b, 0xa8, 0x58, 0x6e, 0x55, 0x8e, 0x5c, 0x22, 0x37,
664 0x1e, 0x52, 0x83, 0xd7, 0xb6, 0xe2, 0x09, 0xa2, 0xaf, 0xc3, 0x63, 0xf1, 0x24, 0x1c, 0x91, 0x9d,
665 0x84, 0x56, 0x42, 0xe2, 0x36, 0xf3, 0xff, 0x9f, 0xa7, 0x7f, 0x27, 0x03, 0x4f, 0x6b, 0x6b, 0x6b,
666 0x8d, 0x1b, 0xe1, 0xd4, 0xe6, 0x40, 0xe4, 0x32, 0xd7, 0x58, 0xb2, 0x0c, 0x3a, 0x39, 0x13, 0x4e,
667 0xad, 0x8e, 0x90, 0xde, 0x11, 0x39, 0xf6, 0x06, 0x26, 0x4d, 0xab, 0xd1, 0xf3, 0x64, 0x39, 0x5e,
668 0x2f, 0xae, 0xaf, 0xb2, 0x13, 0x93, 0x05, 0xa0, 0x68, 0x35, 0x16, 0x1d, 0xc2, 0xb6, 0xf0, 0xea,
669 0x4b, 0xab, 0xf5, 0xb1, 0x94, 0x58, 0x59, 0x89, 0x65, 0x83, 0x1e, 0x9b, 0xef, 0x28, 0x4b, 0xfc,
670 0xe1, 0x84, 0xf1, 0xca, 0x1a, 0x3e, 0x5a, 0x26, 0xeb, 0x59, 0xf1, 0x22, 0x62, 0x1f, 0x22, 0x55,
671 0xf4, 0xd0, 0x76, 0x60, 0x56, 0xbf, 0x46, 0x30, 0x1b, 0x46, 0xb3, 0xe7, 0x30, 0xf3, 0xa8, 0xb1,
672 0x22, 0xdb, 0xf0, 0x64, 0x99, 0xac, 0xe7, 0xc5, 0xdf, 0x9e, 0x31, 0x18, 0xd7, 0x48, 0x71, 0xe6,
673 0xfc, 0xee, 0x41, 0x11, 0x9a, 0xa0, 0xb9, 0x96, 0xf8, 0x78, 0xd0, 0x5c, 0x4b, 0xec, 0x0a, 0x52,
674 0x67, 0x3d, 0xf1, 0xb4, 0x17, 0x63, 0xc7, 0x38, 0x4c, 0x25, 0x6a, 0x24, 0xe4, 0x93, 0x5e, 0xef,
675 0x7b, 0xf6, 0x0c, 0x26, 0x4e, 0x50, 0x75, 0xe0, 0xd3, 0xde, 0xe8, 0x5a, 0xf6, 0x0e, 0xa6, 0x55,
676 0xeb, 0xc9, 0xde, 0xf3, 0xd9, 0x32, 0x59, 0x2f, 0xae, 0x5f, 0x9e, 0x2f, 0xe3, 0x7d, 0x74, 0x42,
677 0xee, 0x9d, 0x20, 0xc2, 0xc6, 0x84, 0x81, 0x1d, 0xce, 0x18, 0xa4, 0x7b, 0x2b, 0x8f, 0xfc, 0x22,
678 0xfe, 0x81, 0x58, 0xb3, 0xd7, 0xf0, 0xb0, 0x41, 0xef, 0xac, 0xf1, 0x58, 0x46, 0xf3, 0x32, 0x9a,
679 0x97, 0x83, 0x98, 0x07, 0x68, 0x0b, 0x4f, 0x84, 0x94, 0x8a, 0x94, 0x35, 0x42, 0x97, 0x7b, 0x65,
680 0xa4, 0x32, 0xb5, 0xe7, 0x8b, 0xff, 0x7c, 0x0b, 0x76, 0x7a, 0x90, 0xf7, 0x7c, 0x3e, 0x87, 0x0b,
681 0xd7, 0x85, 0x5a, 0xdd, 0xc0, 0xe3, 0x7f, 0x92, 0x86, 0x7c, 0xdf, 0x94, 0x91, 0xfd, 0x82, 0x63,
682 0x1d, 0x34, 0x27, 0xe8, 0xd0, 0x6d, 0xb7, 0x88, 0x75, 0xfe, 0x15, 0x1e, 0x55, 0xf6, 0xfe, 0xec,
683 0x67, 0xf3, 0x79, 0x1c, 0x13, 0xae, 0x67, 0x97, 0x7c, 0xbe, 0xed, 0x8d, 0xda, 0x6a, 0x61, 0xea,
684 0xcc, 0x36, 0xf5, 0xa6, 0x46, 0x13, 0x6f, 0x6b, 0xd3, 0x59, 0xc2, 0x29, 0x1f, 0xaf, 0x4e, 0x18,
685 0x63, 0x49, 0x84, 0x98, 0xfe, 0xe6, 0xac, 0xfe, 0x9d, 0x24, 0x3f, 0x47, 0xe9, 0xc7, 0xdb, 0xdd,
686 0xa7, 0xfd, 0x34, 0xbe, 0x7b, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xae, 0xde, 0xa1, 0xd0, 0xac,
687 0x02, 0x00, 0x00,
688}
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
new file mode 100644
index 0000000..dfc8796
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
@@ -0,0 +1,411 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/iam/v1/iam_policy.proto
3
4package iam // import "google.golang.org/genproto/googleapis/iam/v1"
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9import _ "google.golang.org/genproto/googleapis/api/annotations"
10
11import (
12 context "golang.org/x/net/context"
13 grpc "google.golang.org/grpc"
14)
15
16// Reference imports to suppress errors if they are not otherwise used.
17var _ = proto.Marshal
18var _ = fmt.Errorf
19var _ = math.Inf
20
21// This is a compile-time assertion to ensure that this generated file
22// is compatible with the proto package it is being compiled against.
23// A compilation error at this line likely means your copy of the
24// proto package needs to be updated.
25const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
26
27// Request message for `SetIamPolicy` method.
28type SetIamPolicyRequest struct {
29 // REQUIRED: The resource for which the policy is being specified.
30 // `resource` is usually specified as a path. For example, a Project
31 // resource is specified as `projects/{project}`.
32 Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
33 // REQUIRED: The complete policy to be applied to the `resource`. The size of
34 // the policy is limited to a few 10s of KB. An empty policy is a
35 // valid policy but certain Cloud Platform services (such as Projects)
36 // might reject them.
37 Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"`
38 XXX_NoUnkeyedLiteral struct{} `json:"-"`
39 XXX_unrecognized []byte `json:"-"`
40 XXX_sizecache int32 `json:"-"`
41}
42
43func (m *SetIamPolicyRequest) Reset() { *m = SetIamPolicyRequest{} }
44func (m *SetIamPolicyRequest) String() string { return proto.CompactTextString(m) }
45func (*SetIamPolicyRequest) ProtoMessage() {}
46func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) {
47 return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{0}
48}
49func (m *SetIamPolicyRequest) XXX_Unmarshal(b []byte) error {
50 return xxx_messageInfo_SetIamPolicyRequest.Unmarshal(m, b)
51}
52func (m *SetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
53 return xxx_messageInfo_SetIamPolicyRequest.Marshal(b, m, deterministic)
54}
55func (dst *SetIamPolicyRequest) XXX_Merge(src proto.Message) {
56 xxx_messageInfo_SetIamPolicyRequest.Merge(dst, src)
57}
58func (m *SetIamPolicyRequest) XXX_Size() int {
59 return xxx_messageInfo_SetIamPolicyRequest.Size(m)
60}
61func (m *SetIamPolicyRequest) XXX_DiscardUnknown() {
62 xxx_messageInfo_SetIamPolicyRequest.DiscardUnknown(m)
63}
64
65var xxx_messageInfo_SetIamPolicyRequest proto.InternalMessageInfo
66
67func (m *SetIamPolicyRequest) GetResource() string {
68 if m != nil {
69 return m.Resource
70 }
71 return ""
72}
73
74func (m *SetIamPolicyRequest) GetPolicy() *Policy {
75 if m != nil {
76 return m.Policy
77 }
78 return nil
79}
80
81// Request message for `GetIamPolicy` method.
82type GetIamPolicyRequest struct {
83 // REQUIRED: The resource for which the policy is being requested.
84 // `resource` is usually specified as a path. For example, a Project
85 // resource is specified as `projects/{project}`.
86 Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
87 XXX_NoUnkeyedLiteral struct{} `json:"-"`
88 XXX_unrecognized []byte `json:"-"`
89 XXX_sizecache int32 `json:"-"`
90}
91
92func (m *GetIamPolicyRequest) Reset() { *m = GetIamPolicyRequest{} }
93func (m *GetIamPolicyRequest) String() string { return proto.CompactTextString(m) }
94func (*GetIamPolicyRequest) ProtoMessage() {}
95func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) {
96 return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{1}
97}
98func (m *GetIamPolicyRequest) XXX_Unmarshal(b []byte) error {
99 return xxx_messageInfo_GetIamPolicyRequest.Unmarshal(m, b)
100}
101func (m *GetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
102 return xxx_messageInfo_GetIamPolicyRequest.Marshal(b, m, deterministic)
103}
104func (dst *GetIamPolicyRequest) XXX_Merge(src proto.Message) {
105 xxx_messageInfo_GetIamPolicyRequest.Merge(dst, src)
106}
107func (m *GetIamPolicyRequest) XXX_Size() int {
108 return xxx_messageInfo_GetIamPolicyRequest.Size(m)
109}
110func (m *GetIamPolicyRequest) XXX_DiscardUnknown() {
111 xxx_messageInfo_GetIamPolicyRequest.DiscardUnknown(m)
112}
113
114var xxx_messageInfo_GetIamPolicyRequest proto.InternalMessageInfo
115
116func (m *GetIamPolicyRequest) GetResource() string {
117 if m != nil {
118 return m.Resource
119 }
120 return ""
121}
122
123// Request message for `TestIamPermissions` method.
124type TestIamPermissionsRequest struct {
125 // REQUIRED: The resource for which the policy detail is being requested.
126 // `resource` is usually specified as a path. For example, a Project
127 // resource is specified as `projects/{project}`.
128 Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
129 // The set of permissions to check for the `resource`. Permissions with
130 // wildcards (such as '*' or 'storage.*') are not allowed. For more
131 // information see
132 // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
133 Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"`
134 XXX_NoUnkeyedLiteral struct{} `json:"-"`
135 XXX_unrecognized []byte `json:"-"`
136 XXX_sizecache int32 `json:"-"`
137}
138
139func (m *TestIamPermissionsRequest) Reset() { *m = TestIamPermissionsRequest{} }
140func (m *TestIamPermissionsRequest) String() string { return proto.CompactTextString(m) }
141func (*TestIamPermissionsRequest) ProtoMessage() {}
142func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) {
143 return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{2}
144}
145func (m *TestIamPermissionsRequest) XXX_Unmarshal(b []byte) error {
146 return xxx_messageInfo_TestIamPermissionsRequest.Unmarshal(m, b)
147}
148func (m *TestIamPermissionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
149 return xxx_messageInfo_TestIamPermissionsRequest.Marshal(b, m, deterministic)
150}
151func (dst *TestIamPermissionsRequest) XXX_Merge(src proto.Message) {
152 xxx_messageInfo_TestIamPermissionsRequest.Merge(dst, src)
153}
154func (m *TestIamPermissionsRequest) XXX_Size() int {
155 return xxx_messageInfo_TestIamPermissionsRequest.Size(m)
156}
157func (m *TestIamPermissionsRequest) XXX_DiscardUnknown() {
158 xxx_messageInfo_TestIamPermissionsRequest.DiscardUnknown(m)
159}
160
161var xxx_messageInfo_TestIamPermissionsRequest proto.InternalMessageInfo
162
163func (m *TestIamPermissionsRequest) GetResource() string {
164 if m != nil {
165 return m.Resource
166 }
167 return ""
168}
169
170func (m *TestIamPermissionsRequest) GetPermissions() []string {
171 if m != nil {
172 return m.Permissions
173 }
174 return nil
175}
176
177// Response message for `TestIamPermissions` method.
178type TestIamPermissionsResponse struct {
179 // A subset of `TestPermissionsRequest.permissions` that the caller is
180 // allowed.
181 Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"`
182 XXX_NoUnkeyedLiteral struct{} `json:"-"`
183 XXX_unrecognized []byte `json:"-"`
184 XXX_sizecache int32 `json:"-"`
185}
186
187func (m *TestIamPermissionsResponse) Reset() { *m = TestIamPermissionsResponse{} }
188func (m *TestIamPermissionsResponse) String() string { return proto.CompactTextString(m) }
189func (*TestIamPermissionsResponse) ProtoMessage() {}
190func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) {
191 return fileDescriptor_iam_policy_58547b5cf2e9d67a, []int{3}
192}
193func (m *TestIamPermissionsResponse) XXX_Unmarshal(b []byte) error {
194 return xxx_messageInfo_TestIamPermissionsResponse.Unmarshal(m, b)
195}
196func (m *TestIamPermissionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
197 return xxx_messageInfo_TestIamPermissionsResponse.Marshal(b, m, deterministic)
198}
199func (dst *TestIamPermissionsResponse) XXX_Merge(src proto.Message) {
200 xxx_messageInfo_TestIamPermissionsResponse.Merge(dst, src)
201}
202func (m *TestIamPermissionsResponse) XXX_Size() int {
203 return xxx_messageInfo_TestIamPermissionsResponse.Size(m)
204}
205func (m *TestIamPermissionsResponse) XXX_DiscardUnknown() {
206 xxx_messageInfo_TestIamPermissionsResponse.DiscardUnknown(m)
207}
208
209var xxx_messageInfo_TestIamPermissionsResponse proto.InternalMessageInfo
210
211func (m *TestIamPermissionsResponse) GetPermissions() []string {
212 if m != nil {
213 return m.Permissions
214 }
215 return nil
216}
217
218func init() {
219 proto.RegisterType((*SetIamPolicyRequest)(nil), "google.iam.v1.SetIamPolicyRequest")
220 proto.RegisterType((*GetIamPolicyRequest)(nil), "google.iam.v1.GetIamPolicyRequest")
221 proto.RegisterType((*TestIamPermissionsRequest)(nil), "google.iam.v1.TestIamPermissionsRequest")
222 proto.RegisterType((*TestIamPermissionsResponse)(nil), "google.iam.v1.TestIamPermissionsResponse")
223}
224
225// Reference imports to suppress errors if they are not otherwise used.
226var _ context.Context
227var _ grpc.ClientConn
228
229// This is a compile-time assertion to ensure that this generated file
230// is compatible with the grpc package it is being compiled against.
231const _ = grpc.SupportPackageIsVersion4
232
233// IAMPolicyClient is the client API for IAMPolicy service.
234//
235// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
236type IAMPolicyClient interface {
237 // Sets the access control policy on the specified resource. Replaces any
238 // existing policy.
239 SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
240 // Gets the access control policy for a resource.
241 // Returns an empty policy if the resource exists and does not have a policy
242 // set.
243 GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
244 // Returns permissions that a caller has on the specified resource.
245 // If the resource does not exist, this will return an empty set of
246 // permissions, not a NOT_FOUND error.
247 TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error)
248}
249
250type iAMPolicyClient struct {
251 cc *grpc.ClientConn
252}
253
254func NewIAMPolicyClient(cc *grpc.ClientConn) IAMPolicyClient {
255 return &iAMPolicyClient{cc}
256}
257
258func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
259 out := new(Policy)
260 err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, opts...)
261 if err != nil {
262 return nil, err
263 }
264 return out, nil
265}
266
267func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
268 out := new(Policy)
269 err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, opts...)
270 if err != nil {
271 return nil, err
272 }
273 return out, nil
274}
275
276func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) {
277 out := new(TestIamPermissionsResponse)
278 err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, opts...)
279 if err != nil {
280 return nil, err
281 }
282 return out, nil
283}
284
285// IAMPolicyServer is the server API for IAMPolicy service.
286type IAMPolicyServer interface {
287 // Sets the access control policy on the specified resource. Replaces any
288 // existing policy.
289 SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error)
290 // Gets the access control policy for a resource.
291 // Returns an empty policy if the resource exists and does not have a policy
292 // set.
293 GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error)
294 // Returns permissions that a caller has on the specified resource.
295 // If the resource does not exist, this will return an empty set of
296 // permissions, not a NOT_FOUND error.
297 TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error)
298}
299
300func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) {
301 s.RegisterService(&_IAMPolicy_serviceDesc, srv)
302}
303
304func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
305 in := new(SetIamPolicyRequest)
306 if err := dec(in); err != nil {
307 return nil, err
308 }
309 if interceptor == nil {
310 return srv.(IAMPolicyServer).SetIamPolicy(ctx, in)
311 }
312 info := &grpc.UnaryServerInfo{
313 Server: srv,
314 FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy",
315 }
316 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
317 return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest))
318 }
319 return interceptor(ctx, in, info, handler)
320}
321
322func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
323 in := new(GetIamPolicyRequest)
324 if err := dec(in); err != nil {
325 return nil, err
326 }
327 if interceptor == nil {
328 return srv.(IAMPolicyServer).GetIamPolicy(ctx, in)
329 }
330 info := &grpc.UnaryServerInfo{
331 Server: srv,
332 FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy",
333 }
334 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
335 return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest))
336 }
337 return interceptor(ctx, in, info, handler)
338}
339
340func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
341 in := new(TestIamPermissionsRequest)
342 if err := dec(in); err != nil {
343 return nil, err
344 }
345 if interceptor == nil {
346 return srv.(IAMPolicyServer).TestIamPermissions(ctx, in)
347 }
348 info := &grpc.UnaryServerInfo{
349 Server: srv,
350 FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions",
351 }
352 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
353 return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest))
354 }
355 return interceptor(ctx, in, info, handler)
356}
357
358var _IAMPolicy_serviceDesc = grpc.ServiceDesc{
359 ServiceName: "google.iam.v1.IAMPolicy",
360 HandlerType: (*IAMPolicyServer)(nil),
361 Methods: []grpc.MethodDesc{
362 {
363 MethodName: "SetIamPolicy",
364 Handler: _IAMPolicy_SetIamPolicy_Handler,
365 },
366 {
367 MethodName: "GetIamPolicy",
368 Handler: _IAMPolicy_GetIamPolicy_Handler,
369 },
370 {
371 MethodName: "TestIamPermissions",
372 Handler: _IAMPolicy_TestIamPermissions_Handler,
373 },
374 },
375 Streams: []grpc.StreamDesc{},
376 Metadata: "google/iam/v1/iam_policy.proto",
377}
378
379func init() {
380 proto.RegisterFile("google/iam/v1/iam_policy.proto", fileDescriptor_iam_policy_58547b5cf2e9d67a)
381}
382
383var fileDescriptor_iam_policy_58547b5cf2e9d67a = []byte{
384 // 411 bytes of a gzipped FileDescriptorProto
385 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
386 0xcf, 0x49, 0xd5, 0xcf, 0x4c, 0xcc, 0xd5, 0x2f, 0x33, 0x04, 0x51, 0xf1, 0x05, 0xf9, 0x39, 0x99,
387 0xc9, 0x95, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xbc, 0x10, 0x79, 0xbd, 0xcc, 0xc4, 0x5c,
388 0xbd, 0x32, 0x43, 0x29, 0x19, 0xa8, 0xf2, 0xc4, 0x82, 0x4c, 0xfd, 0xc4, 0xbc, 0xbc, 0xfc, 0x92,
389 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0x88, 0x62, 0x29, 0x29, 0x54, 0xc3, 0x90, 0x0d, 0x52, 0x4a,
390 0xe0, 0x12, 0x0e, 0x4e, 0x2d, 0xf1, 0x4c, 0xcc, 0x0d, 0x00, 0x8b, 0x06, 0xa5, 0x16, 0x96, 0xa6,
391 0x16, 0x97, 0x08, 0x49, 0x71, 0x71, 0x14, 0xa5, 0x16, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0x4a, 0x30,
392 0x2a, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xf9, 0x42, 0xba, 0x5c, 0x6c, 0x10, 0x23, 0x24, 0x98, 0x14,
393 0x18, 0x35, 0xb8, 0x8d, 0x44, 0xf5, 0x50, 0x1c, 0xa3, 0x07, 0x35, 0x09, 0xaa, 0x48, 0xc9, 0x90,
394 0x4b, 0xd8, 0x9d, 0x34, 0x1b, 0x94, 0x22, 0xb9, 0x24, 0x43, 0x52, 0x8b, 0xc1, 0x7a, 0x52, 0x8b,
395 0x72, 0x33, 0x8b, 0x8b, 0x41, 0x9e, 0x21, 0xc6, 0x69, 0x0a, 0x5c, 0xdc, 0x05, 0x08, 0x1d, 0x12,
396 0x4c, 0x0a, 0xcc, 0x1a, 0x9c, 0x41, 0xc8, 0x42, 0x4a, 0x76, 0x5c, 0x52, 0xd8, 0x8c, 0x2e, 0x2e,
397 0xc8, 0xcf, 0x2b, 0xc6, 0xd0, 0xcf, 0x88, 0xa1, 0xdf, 0x68, 0x0a, 0x33, 0x17, 0xa7, 0xa7, 0xa3,
398 0x2f, 0xc4, 0x2f, 0x42, 0x25, 0x5c, 0x3c, 0xc8, 0xa1, 0x27, 0xa4, 0x84, 0x16, 0x14, 0x58, 0x82,
399 0x56, 0x0a, 0x7b, 0x70, 0x29, 0x69, 0x36, 0x5d, 0x7e, 0x32, 0x99, 0x49, 0x59, 0x49, 0x0e, 0x14,
400 0x45, 0xd5, 0x30, 0x1f, 0xd9, 0x6a, 0x69, 0xd5, 0x5a, 0x15, 0x23, 0x99, 0x62, 0xc5, 0xa8, 0x05,
401 0xb2, 0xd5, 0x1d, 0x9f, 0xad, 0xee, 0x54, 0xb1, 0x35, 0x1d, 0xcd, 0xd6, 0x59, 0x8c, 0x5c, 0x42,
402 0x98, 0x41, 0x27, 0xa4, 0x81, 0x66, 0x30, 0xce, 0x88, 0x93, 0xd2, 0x24, 0x42, 0x25, 0x24, 0x1e,
403 0x94, 0xf4, 0xc1, 0xce, 0xd2, 0x54, 0x52, 0xc1, 0x74, 0x56, 0x09, 0x86, 0x2e, 0x2b, 0x46, 0x2d,
404 0xa7, 0x36, 0x46, 0x2e, 0xc1, 0xe4, 0xfc, 0x5c, 0x54, 0x1b, 0x9c, 0xf8, 0xe0, 0x1e, 0x08, 0x00,
405 0x25, 0xf6, 0x00, 0xc6, 0x28, 0x03, 0xa8, 0x82, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc,
406 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0x70, 0x56, 0xd0, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x43,
407 0x73, 0x8a, 0x75, 0x66, 0x62, 0xee, 0x0f, 0x46, 0xc6, 0x55, 0x4c, 0xc2, 0xee, 0x10, 0x5d, 0xce,
408 0x39, 0xf9, 0xa5, 0x29, 0x7a, 0x9e, 0x89, 0xb9, 0x7a, 0x61, 0x86, 0xa7, 0x60, 0xa2, 0x31, 0x60,
409 0xd1, 0x18, 0xcf, 0xc4, 0xdc, 0x98, 0x30, 0xc3, 0x24, 0x36, 0xb0, 0x59, 0xc6, 0x80, 0x00, 0x00,
410 0x00, 0xff, 0xff, 0xea, 0x62, 0x8f, 0x22, 0xc1, 0x03, 0x00, 0x00,
411}
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go
new file mode 100644
index 0000000..99dd75f
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go
@@ -0,0 +1,366 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/iam/v1/policy.proto
3
4package iam // import "google.golang.org/genproto/googleapis/iam/v1"
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9import _ "google.golang.org/genproto/googleapis/api/annotations"
10
11// Reference imports to suppress errors if they are not otherwise used.
12var _ = proto.Marshal
13var _ = fmt.Errorf
14var _ = math.Inf
15
16// This is a compile-time assertion to ensure that this generated file
17// is compatible with the proto package it is being compiled against.
18// A compilation error at this line likely means your copy of the
19// proto package needs to be updated.
20const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
21
22// The type of action performed on a Binding in a policy.
23type BindingDelta_Action int32
24
25const (
26 // Unspecified.
27 BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0
28 // Addition of a Binding.
29 BindingDelta_ADD BindingDelta_Action = 1
30 // Removal of a Binding.
31 BindingDelta_REMOVE BindingDelta_Action = 2
32)
33
34var BindingDelta_Action_name = map[int32]string{
35 0: "ACTION_UNSPECIFIED",
36 1: "ADD",
37 2: "REMOVE",
38}
39var BindingDelta_Action_value = map[string]int32{
40 "ACTION_UNSPECIFIED": 0,
41 "ADD": 1,
42 "REMOVE": 2,
43}
44
45func (x BindingDelta_Action) String() string {
46 return proto.EnumName(BindingDelta_Action_name, int32(x))
47}
48func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) {
49 return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{3, 0}
50}
51
52// Defines an Identity and Access Management (IAM) policy. It is used to
53// specify access control policies for Cloud Platform resources.
54//
55//
56// A `Policy` consists of a list of `bindings`. A `Binding` binds a list of
57// `members` to a `role`, where the members can be user accounts, Google groups,
58// Google domains, and service accounts. A `role` is a named list of permissions
59// defined by IAM.
60//
61// **Example**
62//
63// {
64// "bindings": [
65// {
66// "role": "roles/owner",
67// "members": [
68// "user:mike@example.com",
69// "group:admins@example.com",
70// "domain:google.com",
71// "serviceAccount:my-other-app@appspot.gserviceaccount.com",
72// ]
73// },
74// {
75// "role": "roles/viewer",
76// "members": ["user:sean@example.com"]
77// }
78// ]
79// }
80//
81// For a description of IAM and its features, see the
82// [IAM developer's guide](https://cloud.google.com/iam).
83type Policy struct {
84 // Version of the `Policy`. The default version is 0.
85 Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
86 // Associates a list of `members` to a `role`.
87 // Multiple `bindings` must not be specified for the same `role`.
88 // `bindings` with no members will result in an error.
89 Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"`
90 // `etag` is used for optimistic concurrency control as a way to help
91 // prevent simultaneous updates of a policy from overwriting each other.
92 // It is strongly suggested that systems make use of the `etag` in the
93 // read-modify-write cycle to perform policy updates in order to avoid race
94 // conditions: An `etag` is returned in the response to `getIamPolicy`, and
95 // systems are expected to put that etag in the request to `setIamPolicy` to
96 // ensure that their change will be applied to the same version of the policy.
97 //
98 // If no `etag` is provided in the call to `setIamPolicy`, then the existing
99 // policy is overwritten blindly.
100 Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"`
101 XXX_NoUnkeyedLiteral struct{} `json:"-"`
102 XXX_unrecognized []byte `json:"-"`
103 XXX_sizecache int32 `json:"-"`
104}
105
106func (m *Policy) Reset() { *m = Policy{} }
107func (m *Policy) String() string { return proto.CompactTextString(m) }
108func (*Policy) ProtoMessage() {}
109func (*Policy) Descriptor() ([]byte, []int) {
110 return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{0}
111}
112func (m *Policy) XXX_Unmarshal(b []byte) error {
113 return xxx_messageInfo_Policy.Unmarshal(m, b)
114}
115func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
116 return xxx_messageInfo_Policy.Marshal(b, m, deterministic)
117}
118func (dst *Policy) XXX_Merge(src proto.Message) {
119 xxx_messageInfo_Policy.Merge(dst, src)
120}
121func (m *Policy) XXX_Size() int {
122 return xxx_messageInfo_Policy.Size(m)
123}
124func (m *Policy) XXX_DiscardUnknown() {
125 xxx_messageInfo_Policy.DiscardUnknown(m)
126}
127
128var xxx_messageInfo_Policy proto.InternalMessageInfo
129
130func (m *Policy) GetVersion() int32 {
131 if m != nil {
132 return m.Version
133 }
134 return 0
135}
136
137func (m *Policy) GetBindings() []*Binding {
138 if m != nil {
139 return m.Bindings
140 }
141 return nil
142}
143
144func (m *Policy) GetEtag() []byte {
145 if m != nil {
146 return m.Etag
147 }
148 return nil
149}
150
151// Associates `members` with a `role`.
152type Binding struct {
153 // Role that is assigned to `members`.
154 // For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
155 // Required
156 Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
157 // Specifies the identities requesting access for a Cloud Platform resource.
158 // `members` can have the following values:
159 //
160 // * `allUsers`: A special identifier that represents anyone who is
161 // on the internet; with or without a Google account.
162 //
163 // * `allAuthenticatedUsers`: A special identifier that represents anyone
164 // who is authenticated with a Google account or a service account.
165 //
166 // * `user:{emailid}`: An email address that represents a specific Google
167 // account. For example, `alice@gmail.com` or `joe@example.com`.
168 //
169 //
170 // * `serviceAccount:{emailid}`: An email address that represents a service
171 // account. For example, `my-other-app@appspot.gserviceaccount.com`.
172 //
173 // * `group:{emailid}`: An email address that represents a Google group.
174 // For example, `admins@example.com`.
175 //
176 // * `domain:{domain}`: A Google Apps domain name that represents all the
177 // users of that domain. For example, `google.com` or `example.com`.
178 //
179 //
180 Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
181 XXX_NoUnkeyedLiteral struct{} `json:"-"`
182 XXX_unrecognized []byte `json:"-"`
183 XXX_sizecache int32 `json:"-"`
184}
185
186func (m *Binding) Reset() { *m = Binding{} }
187func (m *Binding) String() string { return proto.CompactTextString(m) }
188func (*Binding) ProtoMessage() {}
189func (*Binding) Descriptor() ([]byte, []int) {
190 return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{1}
191}
192func (m *Binding) XXX_Unmarshal(b []byte) error {
193 return xxx_messageInfo_Binding.Unmarshal(m, b)
194}
195func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
196 return xxx_messageInfo_Binding.Marshal(b, m, deterministic)
197}
198func (dst *Binding) XXX_Merge(src proto.Message) {
199 xxx_messageInfo_Binding.Merge(dst, src)
200}
201func (m *Binding) XXX_Size() int {
202 return xxx_messageInfo_Binding.Size(m)
203}
204func (m *Binding) XXX_DiscardUnknown() {
205 xxx_messageInfo_Binding.DiscardUnknown(m)
206}
207
208var xxx_messageInfo_Binding proto.InternalMessageInfo
209
210func (m *Binding) GetRole() string {
211 if m != nil {
212 return m.Role
213 }
214 return ""
215}
216
217func (m *Binding) GetMembers() []string {
218 if m != nil {
219 return m.Members
220 }
221 return nil
222}
223
224// The difference delta between two policies.
225type PolicyDelta struct {
226 // The delta for Bindings between two policies.
227 BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"`
228 XXX_NoUnkeyedLiteral struct{} `json:"-"`
229 XXX_unrecognized []byte `json:"-"`
230 XXX_sizecache int32 `json:"-"`
231}
232
233func (m *PolicyDelta) Reset() { *m = PolicyDelta{} }
234func (m *PolicyDelta) String() string { return proto.CompactTextString(m) }
235func (*PolicyDelta) ProtoMessage() {}
236func (*PolicyDelta) Descriptor() ([]byte, []int) {
237 return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{2}
238}
239func (m *PolicyDelta) XXX_Unmarshal(b []byte) error {
240 return xxx_messageInfo_PolicyDelta.Unmarshal(m, b)
241}
242func (m *PolicyDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
243 return xxx_messageInfo_PolicyDelta.Marshal(b, m, deterministic)
244}
245func (dst *PolicyDelta) XXX_Merge(src proto.Message) {
246 xxx_messageInfo_PolicyDelta.Merge(dst, src)
247}
248func (m *PolicyDelta) XXX_Size() int {
249 return xxx_messageInfo_PolicyDelta.Size(m)
250}
251func (m *PolicyDelta) XXX_DiscardUnknown() {
252 xxx_messageInfo_PolicyDelta.DiscardUnknown(m)
253}
254
255var xxx_messageInfo_PolicyDelta proto.InternalMessageInfo
256
257func (m *PolicyDelta) GetBindingDeltas() []*BindingDelta {
258 if m != nil {
259 return m.BindingDeltas
260 }
261 return nil
262}
263
264// One delta entry for Binding. Each individual change (only one member in each
265// entry) to a binding will be a separate entry.
266type BindingDelta struct {
267 // The action that was performed on a Binding.
268 // Required
269 Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"`
270 // Role that is assigned to `members`.
271 // For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
272 // Required
273 Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
274 // A single identity requesting access for a Cloud Platform resource.
275 // Follows the same format of Binding.members.
276 // Required
277 Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"`
278 XXX_NoUnkeyedLiteral struct{} `json:"-"`
279 XXX_unrecognized []byte `json:"-"`
280 XXX_sizecache int32 `json:"-"`
281}
282
283func (m *BindingDelta) Reset() { *m = BindingDelta{} }
284func (m *BindingDelta) String() string { return proto.CompactTextString(m) }
285func (*BindingDelta) ProtoMessage() {}
286func (*BindingDelta) Descriptor() ([]byte, []int) {
287 return fileDescriptor_policy_6ba2a3dcbcdd909c, []int{3}
288}
289func (m *BindingDelta) XXX_Unmarshal(b []byte) error {
290 return xxx_messageInfo_BindingDelta.Unmarshal(m, b)
291}
292func (m *BindingDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
293 return xxx_messageInfo_BindingDelta.Marshal(b, m, deterministic)
294}
295func (dst *BindingDelta) XXX_Merge(src proto.Message) {
296 xxx_messageInfo_BindingDelta.Merge(dst, src)
297}
298func (m *BindingDelta) XXX_Size() int {
299 return xxx_messageInfo_BindingDelta.Size(m)
300}
301func (m *BindingDelta) XXX_DiscardUnknown() {
302 xxx_messageInfo_BindingDelta.DiscardUnknown(m)
303}
304
305var xxx_messageInfo_BindingDelta proto.InternalMessageInfo
306
307func (m *BindingDelta) GetAction() BindingDelta_Action {
308 if m != nil {
309 return m.Action
310 }
311 return BindingDelta_ACTION_UNSPECIFIED
312}
313
314func (m *BindingDelta) GetRole() string {
315 if m != nil {
316 return m.Role
317 }
318 return ""
319}
320
321func (m *BindingDelta) GetMember() string {
322 if m != nil {
323 return m.Member
324 }
325 return ""
326}
327
328func init() {
329 proto.RegisterType((*Policy)(nil), "google.iam.v1.Policy")
330 proto.RegisterType((*Binding)(nil), "google.iam.v1.Binding")
331 proto.RegisterType((*PolicyDelta)(nil), "google.iam.v1.PolicyDelta")
332 proto.RegisterType((*BindingDelta)(nil), "google.iam.v1.BindingDelta")
333 proto.RegisterEnum("google.iam.v1.BindingDelta_Action", BindingDelta_Action_name, BindingDelta_Action_value)
334}
335
336func init() { proto.RegisterFile("google/iam/v1/policy.proto", fileDescriptor_policy_6ba2a3dcbcdd909c) }
337
338var fileDescriptor_policy_6ba2a3dcbcdd909c = []byte{
339 // 403 bytes of a gzipped FileDescriptorProto
340 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4d, 0xab, 0x13, 0x31,
341 0x14, 0x35, 0xed, 0x73, 0x6a, 0xef, 0xfb, 0xa0, 0x46, 0x28, 0xc3, 0xd3, 0x45, 0x99, 0x55, 0x57,
342 0x19, 0x5b, 0x11, 0x41, 0x57, 0xfd, 0x18, 0x65, 0x16, 0xbe, 0x37, 0x46, 0xed, 0x42, 0x0a, 0x8f,
343 0x4c, 0x1b, 0x42, 0x64, 0x92, 0x0c, 0x33, 0x63, 0xc1, 0xb5, 0xff, 0x46, 0xf0, 0x8f, 0xf8, 0x8b,
344 0x5c, 0xca, 0x24, 0x99, 0x47, 0x0b, 0xe2, 0x2e, 0xe7, 0x9e, 0x73, 0x72, 0xcf, 0xcd, 0x0d, 0x5c,
345 0x0b, 0x63, 0x44, 0xc1, 0x63, 0xc9, 0x54, 0x7c, 0x98, 0xc5, 0xa5, 0x29, 0xe4, 0xee, 0x3b, 0x29,
346 0x2b, 0xd3, 0x18, 0x7c, 0xe9, 0x38, 0x22, 0x99, 0x22, 0x87, 0xd9, 0xf5, 0x33, 0x2f, 0x65, 0xa5,
347 0x8c, 0x99, 0xd6, 0xa6, 0x61, 0x8d, 0x34, 0xba, 0x76, 0xe2, 0xe8, 0x2b, 0x04, 0x99, 0x35, 0xe3,
348 0x10, 0x06, 0x07, 0x5e, 0xd5, 0xd2, 0xe8, 0x10, 0x4d, 0xd0, 0xf4, 0x21, 0xed, 0x20, 0x9e, 0xc3,
349 0xa3, 0x5c, 0xea, 0xbd, 0xd4, 0xa2, 0x0e, 0xcf, 0x26, 0xfd, 0xe9, 0xf9, 0x7c, 0x4c, 0x4e, 0x7a,
350 0x90, 0xa5, 0xa3, 0xe9, 0xbd, 0x0e, 0x63, 0x38, 0xe3, 0x0d, 0x13, 0x61, 0x7f, 0x82, 0xa6, 0x17,
351 0xd4, 0x9e, 0xa3, 0x57, 0x30, 0xf0, 0xc2, 0x96, 0xae, 0x4c, 0xc1, 0x6d, 0xa7, 0x21, 0xb5, 0xe7,
352 0x36, 0x80, 0xe2, 0x2a, 0xe7, 0x55, 0x1d, 0xf6, 0x26, 0xfd, 0xe9, 0x90, 0x76, 0x30, 0xfa, 0x00,
353 0xe7, 0x2e, 0xe4, 0x9a, 0x17, 0x0d, 0xc3, 0x4b, 0xb8, 0xf2, 0x7d, 0xee, 0xf6, 0x6d, 0xa1, 0x0e,
354 0x91, 0x4d, 0xf5, 0xf4, 0xdf, 0xa9, 0xac, 0x89, 0x5e, 0xe6, 0x47, 0xa8, 0x8e, 0x7e, 0x21, 0xb8,
355 0x38, 0xe6, 0xf1, 0x6b, 0x08, 0xd8, 0xae, 0xe9, 0xa6, 0xbf, 0x9a, 0x47, 0xff, 0xb9, 0x8c, 0x2c,
356 0xac, 0x92, 0x7a, 0xc7, 0xfd, 0x34, 0xbd, 0xa3, 0x69, 0xc6, 0x10, 0xb8, 0xf8, 0xf6, 0x09, 0x86,
357 0xd4, 0xa3, 0xe8, 0x25, 0x04, 0xce, 0x8d, 0xc7, 0x80, 0x17, 0xab, 0x4f, 0xe9, 0xed, 0xcd, 0xdd,
358 0xe7, 0x9b, 0x8f, 0x59, 0xb2, 0x4a, 0xdf, 0xa6, 0xc9, 0x7a, 0xf4, 0x00, 0x0f, 0xa0, 0xbf, 0x58,
359 0xaf, 0x47, 0x08, 0x03, 0x04, 0x34, 0x79, 0x7f, 0xbb, 0x49, 0x46, 0xbd, 0xe5, 0x0f, 0x04, 0x8f,
360 0x77, 0x46, 0x9d, 0x86, 0x5a, 0xfa, 0x67, 0xc9, 0xda, 0x55, 0x66, 0xe8, 0xcb, 0x73, 0xcf, 0x0a,
361 0x53, 0x30, 0x2d, 0x88, 0xa9, 0x44, 0x2c, 0xb8, 0xb6, 0x8b, 0x8e, 0x1d, 0xc5, 0x4a, 0x59, 0xfb,
362 0x4f, 0xf3, 0x46, 0x32, 0xf5, 0x07, 0xa1, 0x9f, 0xbd, 0x27, 0xef, 0x9c, 0x6b, 0x55, 0x98, 0x6f,
363 0x7b, 0x92, 0x32, 0x45, 0x36, 0xb3, 0xdf, 0x5d, 0x75, 0x6b, 0xab, 0xdb, 0x94, 0xa9, 0xed, 0x66,
364 0x96, 0x07, 0xf6, 0xae, 0x17, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x18, 0xca, 0xaa, 0x7f,
365 0x02, 0x00, 0x00,
366}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
new file mode 100644
index 0000000..410e374
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go
@@ -0,0 +1,246 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/rpc/code.proto
3
4package code // import "google.golang.org/genproto/googleapis/rpc/code"
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10// Reference imports to suppress errors if they are not otherwise used.
11var _ = proto.Marshal
12var _ = fmt.Errorf
13var _ = math.Inf
14
15// This is a compile-time assertion to ensure that this generated file
16// is compatible with the proto package it is being compiled against.
17// A compilation error at this line likely means your copy of the
18// proto package needs to be updated.
19const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
20
21// The canonical error codes for Google APIs.
22//
23//
24// Sometimes multiple error codes may apply. Services should return
25// the most specific error code that applies. For example, prefer
26// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
27// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
28type Code int32
29
30const (
31 // Not an error; returned on success
32 //
33 // HTTP Mapping: 200 OK
34 Code_OK Code = 0
35 // The operation was cancelled, typically by the caller.
36 //
37 // HTTP Mapping: 499 Client Closed Request
38 Code_CANCELLED Code = 1
39 // Unknown error. For example, this error may be returned when
40 // a `Status` value received from another address space belongs to
41 // an error space that is not known in this address space. Also
42 // errors raised by APIs that do not return enough error information
43 // may be converted to this error.
44 //
45 // HTTP Mapping: 500 Internal Server Error
46 Code_UNKNOWN Code = 2
47 // The client specified an invalid argument. Note that this differs
48 // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
49 // that are problematic regardless of the state of the system
50 // (e.g., a malformed file name).
51 //
52 // HTTP Mapping: 400 Bad Request
53 Code_INVALID_ARGUMENT Code = 3
54 // The deadline expired before the operation could complete. For operations
55 // that change the state of the system, this error may be returned
56 // even if the operation has completed successfully. For example, a
57 // successful response from a server could have been delayed long
58 // enough for the deadline to expire.
59 //
60 // HTTP Mapping: 504 Gateway Timeout
61 Code_DEADLINE_EXCEEDED Code = 4
62 // Some requested entity (e.g., file or directory) was not found.
63 //
64 // Note to server developers: if a request is denied for an entire class
65 // of users, such as gradual feature rollout or undocumented whitelist,
66 // `NOT_FOUND` may be used. If a request is denied for some users within
67 // a class of users, such as user-based access control, `PERMISSION_DENIED`
68 // must be used.
69 //
70 // HTTP Mapping: 404 Not Found
71 Code_NOT_FOUND Code = 5
72 // The entity that a client attempted to create (e.g., file or directory)
73 // already exists.
74 //
75 // HTTP Mapping: 409 Conflict
76 Code_ALREADY_EXISTS Code = 6
77 // The caller does not have permission to execute the specified
78 // operation. `PERMISSION_DENIED` must not be used for rejections
79 // caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
80 // instead for those errors). `PERMISSION_DENIED` must not be
81 // used if the caller can not be identified (use `UNAUTHENTICATED`
82 // instead for those errors). This error code does not imply the
83 // request is valid or the requested entity exists or satisfies
84 // other pre-conditions.
85 //
86 // HTTP Mapping: 403 Forbidden
87 Code_PERMISSION_DENIED Code = 7
88 // The request does not have valid authentication credentials for the
89 // operation.
90 //
91 // HTTP Mapping: 401 Unauthorized
92 Code_UNAUTHENTICATED Code = 16
93 // Some resource has been exhausted, perhaps a per-user quota, or
94 // perhaps the entire file system is out of space.
95 //
96 // HTTP Mapping: 429 Too Many Requests
97 Code_RESOURCE_EXHAUSTED Code = 8
98 // The operation was rejected because the system is not in a state
99 // required for the operation's execution. For example, the directory
100 // to be deleted is non-empty, an rmdir operation is applied to
101 // a non-directory, etc.
102 //
103 // Service implementors can use the following guidelines to decide
104 // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
105 // (a) Use `UNAVAILABLE` if the client can retry just the failing call.
106 // (b) Use `ABORTED` if the client should retry at a higher level
107 // (e.g., when a client-specified test-and-set fails, indicating the
108 // client should restart a read-modify-write sequence).
109 // (c) Use `FAILED_PRECONDITION` if the client should not retry until
110 // the system state has been explicitly fixed. E.g., if an "rmdir"
111 // fails because the directory is non-empty, `FAILED_PRECONDITION`
112 // should be returned since the client should not retry unless
113 // the files are deleted from the directory.
114 //
115 // HTTP Mapping: 400 Bad Request
116 Code_FAILED_PRECONDITION Code = 9
117 // The operation was aborted, typically due to a concurrency issue such as
118 // a sequencer check failure or transaction abort.
119 //
120 // See the guidelines above for deciding between `FAILED_PRECONDITION`,
121 // `ABORTED`, and `UNAVAILABLE`.
122 //
123 // HTTP Mapping: 409 Conflict
124 Code_ABORTED Code = 10
125 // The operation was attempted past the valid range. E.g., seeking or
126 // reading past end-of-file.
127 //
128 // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
129 // be fixed if the system state changes. For example, a 32-bit file
130 // system will generate `INVALID_ARGUMENT` if asked to read at an
131 // offset that is not in the range [0,2^32-1], but it will generate
132 // `OUT_OF_RANGE` if asked to read from an offset past the current
133 // file size.
134 //
135 // There is a fair bit of overlap between `FAILED_PRECONDITION` and
136 // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
137 // error) when it applies so that callers who are iterating through
138 // a space can easily look for an `OUT_OF_RANGE` error to detect when
139 // they are done.
140 //
141 // HTTP Mapping: 400 Bad Request
142 Code_OUT_OF_RANGE Code = 11
143 // The operation is not implemented or is not supported/enabled in this
144 // service.
145 //
146 // HTTP Mapping: 501 Not Implemented
147 Code_UNIMPLEMENTED Code = 12
148 // Internal errors. This means that some invariants expected by the
149 // underlying system have been broken. This error code is reserved
150 // for serious errors.
151 //
152 // HTTP Mapping: 500 Internal Server Error
153 Code_INTERNAL Code = 13
154 // The service is currently unavailable. This is most likely a
155 // transient condition, which can be corrected by retrying with
156 // a backoff.
157 //
158 // See the guidelines above for deciding between `FAILED_PRECONDITION`,
159 // `ABORTED`, and `UNAVAILABLE`.
160 //
161 // HTTP Mapping: 503 Service Unavailable
162 Code_UNAVAILABLE Code = 14
163 // Unrecoverable data loss or corruption.
164 //
165 // HTTP Mapping: 500 Internal Server Error
166 Code_DATA_LOSS Code = 15
167)
168
169var Code_name = map[int32]string{
170 0: "OK",
171 1: "CANCELLED",
172 2: "UNKNOWN",
173 3: "INVALID_ARGUMENT",
174 4: "DEADLINE_EXCEEDED",
175 5: "NOT_FOUND",
176 6: "ALREADY_EXISTS",
177 7: "PERMISSION_DENIED",
178 16: "UNAUTHENTICATED",
179 8: "RESOURCE_EXHAUSTED",
180 9: "FAILED_PRECONDITION",
181 10: "ABORTED",
182 11: "OUT_OF_RANGE",
183 12: "UNIMPLEMENTED",
184 13: "INTERNAL",
185 14: "UNAVAILABLE",
186 15: "DATA_LOSS",
187}
188var Code_value = map[string]int32{
189 "OK": 0,
190 "CANCELLED": 1,
191 "UNKNOWN": 2,
192 "INVALID_ARGUMENT": 3,
193 "DEADLINE_EXCEEDED": 4,
194 "NOT_FOUND": 5,
195 "ALREADY_EXISTS": 6,
196 "PERMISSION_DENIED": 7,
197 "UNAUTHENTICATED": 16,
198 "RESOURCE_EXHAUSTED": 8,
199 "FAILED_PRECONDITION": 9,
200 "ABORTED": 10,
201 "OUT_OF_RANGE": 11,
202 "UNIMPLEMENTED": 12,
203 "INTERNAL": 13,
204 "UNAVAILABLE": 14,
205 "DATA_LOSS": 15,
206}
207
208func (x Code) String() string {
209 return proto.EnumName(Code_name, int32(x))
210}
211func (Code) EnumDescriptor() ([]byte, []int) {
212 return fileDescriptor_code_932ba152e0df0902, []int{0}
213}
214
215func init() {
216 proto.RegisterEnum("google.rpc.Code", Code_name, Code_value)
217}
218
219func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptor_code_932ba152e0df0902) }
220
221var fileDescriptor_code_932ba152e0df0902 = []byte{
222 // 362 bytes of a gzipped FileDescriptorProto
223 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x51, 0xcd, 0x6e, 0x93, 0x31,
224 0x10, 0xa4, 0x69, 0x49, 0x9b, 0xcd, 0xdf, 0xd6, 0xa5, 0xf0, 0x0e, 0x1c, 0x92, 0x43, 0x8f, 0x9c,
225 0x36, 0x9f, 0x37, 0xad, 0x55, 0x67, 0xfd, 0xc9, 0x3f, 0x25, 0x70, 0xb1, 0x4a, 0x1a, 0x7d, 0x42,
226 0x2a, 0x75, 0xf4, 0xc1, 0x13, 0xf1, 0x12, 0xbc, 0x1e, 0x72, 0x8b, 0xe8, 0xc5, 0x87, 0x99, 0xf1,
227 0xee, 0xce, 0x0c, 0x5c, 0x76, 0xa5, 0x74, 0x8f, 0xfb, 0x65, 0x7f, 0xd8, 0x2d, 0x77, 0xe5, 0x61,
228 0xbf, 0x38, 0xf4, 0xe5, 0x57, 0x51, 0xf0, 0x02, 0x2f, 0xfa, 0xc3, 0xee, 0xe3, 0x9f, 0x01, 0x9c,
229 0x34, 0xe5, 0x61, 0xaf, 0x86, 0x30, 0x70, 0xb7, 0xf8, 0x46, 0x4d, 0x61, 0xd4, 0x90, 0x34, 0x6c,
230 0x2d, 0x6b, 0x3c, 0x52, 0x63, 0x38, 0x4d, 0x72, 0x2b, 0xee, 0xb3, 0xe0, 0x40, 0xbd, 0x03, 0x34,
231 0x72, 0x47, 0xd6, 0xe8, 0x4c, 0xfe, 0x3a, 0x6d, 0x58, 0x22, 0x1e, 0xab, 0x4b, 0x38, 0xd7, 0x4c,
232 0xda, 0x1a, 0xe1, 0xcc, 0xdb, 0x86, 0x59, 0xb3, 0xc6, 0x93, 0x3a, 0x48, 0x5c, 0xcc, 0x6b, 0x97,
233 0x44, 0xe3, 0x5b, 0xa5, 0x60, 0x46, 0xd6, 0x33, 0xe9, 0x2f, 0x99, 0xb7, 0x26, 0xc4, 0x80, 0xc3,
234 0xfa, 0xb3, 0x65, 0xbf, 0x31, 0x21, 0x18, 0x27, 0x59, 0xb3, 0x18, 0xd6, 0x78, 0xaa, 0x2e, 0x60,
235 0x9e, 0x84, 0x52, 0xbc, 0x61, 0x89, 0xa6, 0xa1, 0xc8, 0x1a, 0x51, 0xbd, 0x07, 0xe5, 0x39, 0xb8,
236 0xe4, 0x9b, 0xba, 0xe5, 0x86, 0x52, 0xa8, 0xf8, 0x99, 0xfa, 0x00, 0x17, 0x6b, 0x32, 0x96, 0x75,
237 0x6e, 0x3d, 0x37, 0x4e, 0xb4, 0x89, 0xc6, 0x09, 0x8e, 0xea, 0xe5, 0xb4, 0x72, 0xbe, 0xaa, 0x40,
238 0x21, 0x4c, 0x5c, 0x8a, 0xd9, 0xad, 0xb3, 0x27, 0xb9, 0x66, 0x1c, 0xab, 0x73, 0x98, 0x26, 0x31,
239 0x9b, 0xd6, 0x72, 0xb5, 0xc1, 0x1a, 0x27, 0x6a, 0x02, 0x67, 0x46, 0x22, 0x7b, 0x21, 0x8b, 0x53,
240 0x35, 0x87, 0x71, 0x12, 0xba, 0x23, 0x63, 0x69, 0x65, 0x19, 0x67, 0xd5, 0x90, 0xa6, 0x48, 0xd9,
241 0xba, 0x10, 0x70, 0xbe, 0xda, 0xc2, 0x6c, 0x57, 0x7e, 0x2c, 0x5e, 0xb3, 0x5c, 0x8d, 0x6a, 0x90,
242 0x6d, 0x8d, 0xb8, 0x3d, 0xfa, 0x7a, 0xf5, 0x8f, 0xe8, 0xca, 0xe3, 0xfd, 0x53, 0xb7, 0x28, 0x7d,
243 0xb7, 0xec, 0xf6, 0x4f, 0xcf, 0x05, 0x2c, 0x5f, 0xa8, 0xfb, 0xc3, 0xf7, 0x9f, 0xff, 0xab, 0xf9,
244 0x54, 0x9f, 0xdf, 0x83, 0x63, 0xdf, 0x36, 0xdf, 0x86, 0xcf, 0xaa, 0xab, 0xbf, 0x01, 0x00, 0x00,
245 0xff, 0xff, 0x8e, 0x97, 0x77, 0xc2, 0xbf, 0x01, 0x00, 0x00,
246}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
index 8867ae7..7bfe37a 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -1,21 +1,12 @@
1// Code generated by protoc-gen-go. DO NOT EDIT. 1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: google/rpc/status.proto 2// source: google/rpc/status.proto
3 3
4/* 4package status // import "google.golang.org/genproto/googleapis/rpc/status"
5Package status is a generated protocol buffer package.
6
7It is generated from these files:
8 google/rpc/status.proto
9
10It has these top-level messages:
11 Status
12*/
13package status
14 5
15import proto "github.com/golang/protobuf/proto" 6import proto "github.com/golang/protobuf/proto"
16import fmt "fmt" 7import fmt "fmt"
17import math "math" 8import math "math"
18import google_protobuf "github.com/golang/protobuf/ptypes/any" 9import any "github.com/golang/protobuf/ptypes/any"
19 10
20// Reference imports to suppress errors if they are not otherwise used. 11// Reference imports to suppress errors if they are not otherwise used.
21var _ = proto.Marshal 12var _ = proto.Marshal
@@ -82,20 +73,42 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
82// be used directly after any stripping needed for security/privacy reasons. 73// be used directly after any stripping needed for security/privacy reasons.
83type Status struct { 74type Status struct {
84 // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. 75 // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
85 Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` 76 Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
86 // A developer-facing error message, which should be in English. Any 77 // A developer-facing error message, which should be in English. Any
87 // user-facing error message should be localized and sent in the 78 // user-facing error message should be localized and sent in the
88 // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. 79 // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
89 Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` 80 Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
90 // A list of messages that carry the error details. There is a common set of 81 // A list of messages that carry the error details. There is a common set of
91 // message types for APIs to use. 82 // message types for APIs to use.
92 Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` 83 Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
84 XXX_NoUnkeyedLiteral struct{} `json:"-"`
85 XXX_unrecognized []byte `json:"-"`
86 XXX_sizecache int32 `json:"-"`
87}
88
89func (m *Status) Reset() { *m = Status{} }
90func (m *Status) String() string { return proto.CompactTextString(m) }
91func (*Status) ProtoMessage() {}
92func (*Status) Descriptor() ([]byte, []int) {
93 return fileDescriptor_status_c6e4de62dcdf2edf, []int{0}
94}
95func (m *Status) XXX_Unmarshal(b []byte) error {
96 return xxx_messageInfo_Status.Unmarshal(m, b)
97}
98func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
99 return xxx_messageInfo_Status.Marshal(b, m, deterministic)
100}
101func (dst *Status) XXX_Merge(src proto.Message) {
102 xxx_messageInfo_Status.Merge(dst, src)
103}
104func (m *Status) XXX_Size() int {
105 return xxx_messageInfo_Status.Size(m)
106}
107func (m *Status) XXX_DiscardUnknown() {
108 xxx_messageInfo_Status.DiscardUnknown(m)
93} 109}
94 110
95func (m *Status) Reset() { *m = Status{} } 111var xxx_messageInfo_Status proto.InternalMessageInfo
96func (m *Status) String() string { return proto.CompactTextString(m) }
97func (*Status) ProtoMessage() {}
98func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
99 112
100func (m *Status) GetCode() int32 { 113func (m *Status) GetCode() int32 {
101 if m != nil { 114 if m != nil {
@@ -111,7 +124,7 @@ func (m *Status) GetMessage() string {
111 return "" 124 return ""
112} 125}
113 126
114func (m *Status) GetDetails() []*google_protobuf.Any { 127func (m *Status) GetDetails() []*any.Any {
115 if m != nil { 128 if m != nil {
116 return m.Details 129 return m.Details
117 } 130 }
@@ -122,9 +135,9 @@ func init() {
122 proto.RegisterType((*Status)(nil), "google.rpc.Status") 135 proto.RegisterType((*Status)(nil), "google.rpc.Status")
123} 136}
124 137
125func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) } 138func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_c6e4de62dcdf2edf) }
126 139
127var fileDescriptor0 = []byte{ 140var fileDescriptor_status_c6e4de62dcdf2edf = []byte{
128 // 209 bytes of a gzipped FileDescriptorProto 141 // 209 bytes of a gzipped FileDescriptorProto
129 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, 142 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
130 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, 143 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml
index 88a785d..f443eec 100644
--- a/vendor/google.golang.org/grpc/.travis.yml
+++ b/vendor/google.golang.org/grpc/.travis.yml
@@ -1,21 +1,37 @@
1language: go 1language: go
2 2
3go: 3matrix:
4 - 1.6.x 4 include:
5 - 1.7.x 5 - go: 1.11.x
6 - 1.8.x 6 env: VET=1 GO111MODULE=on
7 - go: 1.11.x
8 env: RACE=1 GO111MODULE=on
9 - go: 1.11.x
10 env: RUN386=1
11 - go: 1.11.x
12 env: GRPC_GO_RETRY=on
13 - go: 1.10.x
14 - go: 1.9.x
15 - go: 1.9.x
16 env: GAE=1
7 17
8go_import_path: google.golang.org/grpc 18go_import_path: google.golang.org/grpc
9 19
10before_install: 20before_install:
11 - if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then go get -u github.com/golang/lint/golint honnef.co/go/tools/cmd/staticcheck; fi 21 - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi
12 - go get -u golang.org/x/tools/cmd/goimports github.com/axw/gocov/gocov github.com/mattn/goveralls golang.org/x/tools/cmd/cover 22 - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi
23 - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi
24 - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then VET_SKIP_PROTO=1; fi
25
26install:
27 - try3() { eval "$*" || eval "$*" || eval "$*"; }
28 - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi'
29 - if [[ "${GAE}" = 1 ]]; then source ./install_gae.sh; make testappenginedeps; fi
30 - if [[ "${VET}" = 1 ]]; then ./vet.sh -install; fi
13 31
14script: 32script:
15 - 'set -o pipefail && git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read)' 33 - set -e
16 - 'set -o pipefail && gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read)' 34 - if [[ "${VET}" = 1 ]]; then ./vet.sh; fi
17 - 'set -o pipefail && goimports -l . 2>&1 | tee /dev/stderr | (! read)' 35 - if [[ "${GAE}" = 1 ]]; then make testappengine; exit 0; fi
18 - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' 36 - if [[ "${RACE}" = 1 ]]; then make testrace; exit 0; fi
19 - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! go tool vet -all . 2>&1 | grep -vF .pb.go:; fi' # https://github.com/golang/protobuf/issues/214 37 - make test
20 - make test testrace
21 - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then staticcheck -ignore google.golang.org/grpc/transport/transport_test.go:SA2002 ./...; fi' # TODO(menghanl): fix these
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index a5c6e06..0863eb2 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -7,7 +7,7 @@ If you are new to github, please start by reading [Pull Request howto](https://h
7## Legal requirements 7## Legal requirements
8 8
9In order to protect both you and ourselves, you will need to sign the 9In order to protect both you and ourselves, you will need to sign the
10[Contributor License Agreement](https://cla.developers.google.com/clas). 10[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
11 11
12## Guidelines for Pull Requests 12## Guidelines for Pull Requests
13How to get your contributions merged smoothly and quickly. 13How to get your contributions merged smoothly and quickly.
@@ -27,6 +27,10 @@ How to get your contributions merged smoothly and quickly.
27- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). 27- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change).
28 28
29- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. 29- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on.
30 - `make all` to test everything, OR
31 - `make vet` to catch vet errors
32 - `make test` to run the tests
33 - `make testrace` to run tests in race mode
30 34
31- Exceptions to the rules can be made if there's a compelling reason for doing so. 35- Exceptions to the rules can be made if there's a compelling reason for doing so.
32 36
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
index 03bb01f..41a754f 100644
--- a/vendor/google.golang.org/grpc/Makefile
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -1,52 +1,60 @@
1all: test testrace 1all: vet test testrace testappengine
2
3deps:
4 go get -d -v google.golang.org/grpc/...
5
6updatedeps:
7 go get -d -v -u -f google.golang.org/grpc/...
8
9testdeps:
10 go get -d -v -t google.golang.org/grpc/...
11
12updatetestdeps:
13 go get -d -v -t -u -f google.golang.org/grpc/...
14 2
15build: deps 3build: deps
16 go build google.golang.org/grpc/... 4 go build google.golang.org/grpc/...
17 5
6clean:
7 go clean -i google.golang.org/grpc/...
8
9deps:
10 go get -d -v google.golang.org/grpc/...
11
18proto: 12proto:
19 @ if ! which protoc > /dev/null; then \ 13 @ if ! which protoc > /dev/null; then \
20 echo "error: protoc not installed" >&2; \ 14 echo "error: protoc not installed" >&2; \
21 exit 1; \ 15 exit 1; \
22 fi 16 fi
23 go get -u -v github.com/golang/protobuf/protoc-gen-go 17 go generate google.golang.org/grpc/...
24 # use $$dir as the root for all proto files in the same directory
25 for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \
26 protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \
27 done
28 18
29test: testdeps 19test: testdeps
30 go test -v -cpu 1,4 google.golang.org/grpc/... 20 go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
21
22testappengine: testappenginedeps
23 goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
24
25testappenginedeps:
26 goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/...
27
28testdeps:
29 go get -d -v -t google.golang.org/grpc/...
31 30
32testrace: testdeps 31testrace: testdeps
33 go test -v -race -cpu 1,4 google.golang.org/grpc/... 32 go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
34 33
35clean: 34updatedeps:
36 go clean -i google.golang.org/grpc/... 35 go get -d -v -u -f google.golang.org/grpc/...
36
37updatetestdeps:
38 go get -d -v -t -u -f google.golang.org/grpc/...
39
40vet: vetdeps
41 ./vet.sh
37 42
38coverage: testdeps 43vetdeps:
39 ./coverage.sh --coveralls 44 ./vet.sh -install
40 45
41.PHONY: \ 46.PHONY: \
42 all \ 47 all \
43 deps \
44 updatedeps \
45 testdeps \
46 updatetestdeps \
47 build \ 48 build \
49 clean \
50 deps \
48 proto \ 51 proto \
49 test \ 52 test \
53 testappengine \
54 testappenginedeps \
55 testdeps \
50 testrace \ 56 testrace \
51 clean \ 57 updatedeps \
52 coverage 58 updatetestdeps \
59 vet \
60 vetdeps
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index 72c7325..e3fb3c7 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -1,6 +1,6 @@
1# gRPC-Go 1# gRPC-Go
2 2
3[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) 3[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
4 4
5The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. 5The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide.
6 6
@@ -10,13 +10,13 @@ Installation
10To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: 10To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run:
11 11
12``` 12```
13$ go get google.golang.org/grpc 13$ go get -u google.golang.org/grpc
14``` 14```
15 15
16Prerequisites 16Prerequisites
17------------- 17-------------
18 18
19This requires Go 1.6 or later. 19gRPC-Go requires Go 1.9 or later.
20 20
21Constraints 21Constraints
22----------- 22-----------
@@ -43,3 +43,25 @@ Please update proto package, gRPC package and rebuild the proto files:
43 - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` 43 - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
44 - `go get -u google.golang.org/grpc` 44 - `go get -u google.golang.org/grpc`
45 - `protoc --go_out=plugins=grpc:. *.proto` 45 - `protoc --go_out=plugins=grpc:. *.proto`
46
47#### How to turn on logging
48
49The default logger is controlled by the environment variables. Turn everything
50on by setting:
51
52```
53GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
54```
55
56#### The RPC failed with error `"code = Unavailable desc = transport is closing"`
57
58This error means the connection the RPC is using was closed, and there are many
59possible reasons, including:
60 1. mis-configured transport credentials, connection failed on handshaking
61 1. bytes disrupted, possibly by a proxy in between
62 1. server shutdown
63
64It can be tricky to debug this because the error happens on the client side but
65the root cause of the connection being closed is on the server side. Turn on
66logging on __both client and server__, and see if there are any transport
67errors.
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
index 090fbe8..fa31565 100644
--- a/vendor/google.golang.org/grpc/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -16,83 +16,23 @@
16 * 16 *
17 */ 17 */
18 18
19// See internal/backoff package for the backoff implementation. This file is
20// kept for the exported types and API backward compatility.
21
19package grpc 22package grpc
20 23
21import ( 24import (
22 "math/rand"
23 "time" 25 "time"
24) 26)
25 27
26// DefaultBackoffConfig uses values specified for backoff in 28// DefaultBackoffConfig uses values specified for backoff in
27// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. 29// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
28var ( 30var DefaultBackoffConfig = BackoffConfig{
29 DefaultBackoffConfig = BackoffConfig{ 31 MaxDelay: 120 * time.Second,
30 MaxDelay: 120 * time.Second,
31 baseDelay: 1.0 * time.Second,
32 factor: 1.6,
33 jitter: 0.2,
34 }
35)
36
37// backoffStrategy defines the methodology for backing off after a grpc
38// connection failure.
39//
40// This is unexported until the gRPC project decides whether or not to allow
41// alternative backoff strategies. Once a decision is made, this type and its
42// method may be exported.
43type backoffStrategy interface {
44 // backoff returns the amount of time to wait before the next retry given
45 // the number of consecutive failures.
46 backoff(retries int) time.Duration
47} 32}
48 33
49// BackoffConfig defines the parameters for the default gRPC backoff strategy. 34// BackoffConfig defines the parameters for the default gRPC backoff strategy.
50type BackoffConfig struct { 35type BackoffConfig struct {
51 // MaxDelay is the upper bound of backoff delay. 36 // MaxDelay is the upper bound of backoff delay.
52 MaxDelay time.Duration 37 MaxDelay time.Duration
53
54 // TODO(stevvooe): The following fields are not exported, as allowing
55 // changes would violate the current gRPC specification for backoff. If
56 // gRPC decides to allow more interesting backoff strategies, these fields
57 // may be opened up in the future.
58
59 // baseDelay is the amount of time to wait before retrying after the first
60 // failure.
61 baseDelay time.Duration
62
63 // factor is applied to the backoff after each retry.
64 factor float64
65
66 // jitter provides a range to randomize backoff delays.
67 jitter float64
68}
69
70func setDefaults(bc *BackoffConfig) {
71 md := bc.MaxDelay
72 *bc = DefaultBackoffConfig
73
74 if md > 0 {
75 bc.MaxDelay = md
76 }
77}
78
79func (bc BackoffConfig) backoff(retries int) time.Duration {
80 if retries == 0 {
81 return bc.baseDelay
82 }
83 backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
84 for backoff < max && retries > 0 {
85 backoff *= bc.factor
86 retries--
87 }
88 if backoff > max {
89 backoff = max
90 }
91 // Randomize backoff delays so that if a cluster of requests start at
92 // the same time, they won't operate in lockstep.
93 backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
94 if backoff < 0 {
95 return 0
96 }
97 return time.Duration(backoff)
98} 38}
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
index cde472c..a78e702 100644
--- a/vendor/google.golang.org/grpc/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer.go
@@ -19,19 +19,20 @@
19package grpc 19package grpc
20 20
21import ( 21import (
22 "fmt" 22 "context"
23 "net" 23 "net"
24 "sync" 24 "sync"
25 25
26 "golang.org/x/net/context"
27 "google.golang.org/grpc/codes" 26 "google.golang.org/grpc/codes"
28 "google.golang.org/grpc/credentials" 27 "google.golang.org/grpc/credentials"
29 "google.golang.org/grpc/grpclog" 28 "google.golang.org/grpc/grpclog"
30 "google.golang.org/grpc/naming" 29 "google.golang.org/grpc/naming"
30 "google.golang.org/grpc/status"
31) 31)
32 32
33// Address represents a server the client connects to. 33// Address represents a server the client connects to.
34// This is the EXPERIMENTAL API and may be changed or extended in the future. 34//
35// Deprecated: please use package balancer.
35type Address struct { 36type Address struct {
36 // Addr is the server address on which a connection will be established. 37 // Addr is the server address on which a connection will be established.
37 Addr string 38 Addr string
@@ -41,6 +42,8 @@ type Address struct {
41} 42}
42 43
43// BalancerConfig specifies the configurations for Balancer. 44// BalancerConfig specifies the configurations for Balancer.
45//
46// Deprecated: please use package balancer.
44type BalancerConfig struct { 47type BalancerConfig struct {
45 // DialCreds is the transport credential the Balancer implementation can 48 // DialCreds is the transport credential the Balancer implementation can
46 // use to dial to a remote load balancer server. The Balancer implementations 49 // use to dial to a remote load balancer server. The Balancer implementations
@@ -53,7 +56,8 @@ type BalancerConfig struct {
53} 56}
54 57
55// BalancerGetOptions configures a Get call. 58// BalancerGetOptions configures a Get call.
56// This is the EXPERIMENTAL API and may be changed or extended in the future. 59//
60// Deprecated: please use package balancer.
57type BalancerGetOptions struct { 61type BalancerGetOptions struct {
58 // BlockingWait specifies whether Get should block when there is no 62 // BlockingWait specifies whether Get should block when there is no
59 // connected address. 63 // connected address.
@@ -61,7 +65,8 @@ type BalancerGetOptions struct {
61} 65}
62 66
63// Balancer chooses network addresses for RPCs. 67// Balancer chooses network addresses for RPCs.
64// This is the EXPERIMENTAL API and may be changed or extended in the future. 68//
69// Deprecated: please use package balancer.
65type Balancer interface { 70type Balancer interface {
66 // Start does the initialization work to bootstrap a Balancer. For example, 71 // Start does the initialization work to bootstrap a Balancer. For example,
67 // this function may start the name resolution and watch the updates. It will 72 // this function may start the name resolution and watch the updates. It will
@@ -112,28 +117,10 @@ type Balancer interface {
112 Close() error 117 Close() error
113} 118}
114 119
115// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
116// call of Balancer.
117type downErr struct {
118 timeout bool
119 temporary bool
120 desc string
121}
122
123func (e downErr) Error() string { return e.desc }
124func (e downErr) Timeout() bool { return e.timeout }
125func (e downErr) Temporary() bool { return e.temporary }
126
127func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
128 return downErr{
129 timeout: timeout,
130 temporary: temporary,
131 desc: fmt.Sprintf(format, a...),
132 }
133}
134
135// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch 120// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
136// the name resolution updates and updates the addresses available correspondingly. 121// the name resolution updates and updates the addresses available correspondingly.
122//
123// Deprecated: please use package balancer/roundrobin.
137func RoundRobin(r naming.Resolver) Balancer { 124func RoundRobin(r naming.Resolver) Balancer {
138 return &roundRobin{r: r} 125 return &roundRobin{r: r}
139} 126}
@@ -310,7 +297,7 @@ func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Ad
310 if !opts.BlockingWait { 297 if !opts.BlockingWait {
311 if len(rr.addrs) == 0 { 298 if len(rr.addrs) == 0 {
312 rr.mu.Unlock() 299 rr.mu.Unlock()
313 err = Errorf(codes.Unavailable, "there is no address available") 300 err = status.Errorf(codes.Unavailable, "there is no address available")
314 return 301 return
315 } 302 }
316 // Returns the next addr on rr.addrs for failfast RPCs. 303 // Returns the next addr on rr.addrs for failfast RPCs.
@@ -395,3 +382,10 @@ func (rr *roundRobin) Close() error {
395 } 382 }
396 return nil 383 return nil
397} 384}
385
386// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
387// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
388// returns the only address Up by resetTransport().
389type pickFirst struct {
390 *roundRobin
391}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
new file mode 100644
index 0000000..317c2e7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -0,0 +1,303 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package balancer defines APIs for load balancing in gRPC.
20// All APIs in this package are experimental.
21package balancer
22
23import (
24 "context"
25 "errors"
26 "net"
27 "strings"
28
29 "google.golang.org/grpc/connectivity"
30 "google.golang.org/grpc/credentials"
31 "google.golang.org/grpc/internal"
32 "google.golang.org/grpc/metadata"
33 "google.golang.org/grpc/resolver"
34)
35
36var (
37 // m is a map from name to balancer builder.
38 m = make(map[string]Builder)
39)
40
41// Register registers the balancer builder to the balancer map. b.Name
42// (lowercased) will be used as the name registered with this builder.
43//
44// NOTE: this function must only be called during initialization time (i.e. in
45// an init() function), and is not thread-safe. If multiple Balancers are
46// registered with the same name, the one registered last will take effect.
47func Register(b Builder) {
48 m[strings.ToLower(b.Name())] = b
49}
50
51// unregisterForTesting deletes the balancer with the given name from the
52// balancer map.
53//
54// This function is not thread-safe.
55func unregisterForTesting(name string) {
56 delete(m, name)
57}
58
59func init() {
60 internal.BalancerUnregister = unregisterForTesting
61}
62
63// Get returns the resolver builder registered with the given name.
64// Note that the compare is done in a case-insenstive fashion.
65// If no builder is register with the name, nil will be returned.
66func Get(name string) Builder {
67 if b, ok := m[strings.ToLower(name)]; ok {
68 return b
69 }
70 return nil
71}
72
73// SubConn represents a gRPC sub connection.
74// Each sub connection contains a list of addresses. gRPC will
75// try to connect to them (in sequence), and stop trying the
76// remainder once one connection is successful.
77//
78// The reconnect backoff will be applied on the list, not a single address.
79// For example, try_on_all_addresses -> backoff -> try_on_all_addresses.
80//
81// All SubConns start in IDLE, and will not try to connect. To trigger
82// the connecting, Balancers must call Connect.
83// When the connection encounters an error, it will reconnect immediately.
84// When the connection becomes IDLE, it will not reconnect unless Connect is
85// called.
86//
87// This interface is to be implemented by gRPC. Users should not need a
88// brand new implementation of this interface. For the situations like
89// testing, the new implementation should embed this interface. This allows
90// gRPC to add new methods to this interface.
91type SubConn interface {
92 // UpdateAddresses updates the addresses used in this SubConn.
93 // gRPC checks if currently-connected address is still in the new list.
94 // If it's in the list, the connection will be kept.
95 // If it's not in the list, the connection will gracefully closed, and
96 // a new connection will be created.
97 //
98 // This will trigger a state transition for the SubConn.
99 UpdateAddresses([]resolver.Address)
100 // Connect starts the connecting for this SubConn.
101 Connect()
102}
103
104// NewSubConnOptions contains options to create new SubConn.
105type NewSubConnOptions struct {
106 // CredsBundle is the credentials bundle that will be used in the created
107 // SubConn. If it's nil, the original creds from grpc DialOptions will be
108 // used.
109 CredsBundle credentials.Bundle
110 // HealthCheckEnabled indicates whether health check service should be
111 // enabled on this SubConn
112 HealthCheckEnabled bool
113}
114
115// ClientConn represents a gRPC ClientConn.
116//
117// This interface is to be implemented by gRPC. Users should not need a
118// brand new implementation of this interface. For the situations like
119// testing, the new implementation should embed this interface. This allows
120// gRPC to add new methods to this interface.
121type ClientConn interface {
122 // NewSubConn is called by balancer to create a new SubConn.
123 // It doesn't block and wait for the connections to be established.
124 // Behaviors of the SubConn can be controlled by options.
125 NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
126 // RemoveSubConn removes the SubConn from ClientConn.
127 // The SubConn will be shutdown.
128 RemoveSubConn(SubConn)
129
130 // UpdateBalancerState is called by balancer to nofity gRPC that some internal
131 // state in balancer has changed.
132 //
133 // gRPC will update the connectivity state of the ClientConn, and will call pick
134 // on the new picker to pick new SubConn.
135 UpdateBalancerState(s connectivity.State, p Picker)
136
137 // ResolveNow is called by balancer to notify gRPC to do a name resolving.
138 ResolveNow(resolver.ResolveNowOption)
139
140 // Target returns the dial target for this ClientConn.
141 Target() string
142}
143
144// BuildOptions contains additional information for Build.
145type BuildOptions struct {
146 // DialCreds is the transport credential the Balancer implementation can
147 // use to dial to a remote load balancer server. The Balancer implementations
148 // can ignore this if it does not need to talk to another party securely.
149 DialCreds credentials.TransportCredentials
150 // CredsBundle is the credentials bundle that the Balancer can use.
151 CredsBundle credentials.Bundle
152 // Dialer is the custom dialer the Balancer implementation can use to dial
153 // to a remote load balancer server. The Balancer implementations
154 // can ignore this if it doesn't need to talk to remote balancer.
155 Dialer func(context.Context, string) (net.Conn, error)
156 // ChannelzParentID is the entity parent's channelz unique identification number.
157 ChannelzParentID int64
158}
159
160// Builder creates a balancer.
161type Builder interface {
162 // Build creates a new balancer with the ClientConn.
163 Build(cc ClientConn, opts BuildOptions) Balancer
164 // Name returns the name of balancers built by this builder.
165 // It will be used to pick balancers (for example in service config).
166 Name() string
167}
168
169// PickOptions contains addition information for the Pick operation.
170type PickOptions struct {
171 // FullMethodName is the method name that NewClientStream() is called
172 // with. The canonical format is /service/Method.
173 FullMethodName string
174 // Header contains the metadata from the RPC's client header. The metadata
175 // should not be modified; make a copy first if needed.
176 Header metadata.MD
177}
178
179// DoneInfo contains additional information for done.
180type DoneInfo struct {
181 // Err is the rpc error the RPC finished with. It could be nil.
182 Err error
183 // Trailer contains the metadata from the RPC's trailer, if present.
184 Trailer metadata.MD
185 // BytesSent indicates if any bytes have been sent to the server.
186 BytesSent bool
187 // BytesReceived indicates if any byte has been received from the server.
188 BytesReceived bool
189}
190
191var (
192 // ErrNoSubConnAvailable indicates no SubConn is available for pick().
193 // gRPC will block the RPC until a new picker is available via UpdateBalancerState().
194 ErrNoSubConnAvailable = errors.New("no SubConn is available")
195 // ErrTransientFailure indicates all SubConns are in TransientFailure.
196 // WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
197 ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
198)
199
200// Picker is used by gRPC to pick a SubConn to send an RPC.
201// Balancer is expected to generate a new picker from its snapshot every time its
202// internal state has changed.
203//
204// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
205type Picker interface {
206 // Pick returns the SubConn to be used to send the RPC.
207 // The returned SubConn must be one returned by NewSubConn().
208 //
209 // This functions is expected to return:
210 // - a SubConn that is known to be READY;
211 // - ErrNoSubConnAvailable if no SubConn is available, but progress is being
212 // made (for example, some SubConn is in CONNECTING mode);
213 // - other errors if no active connecting is happening (for example, all SubConn
214 // are in TRANSIENT_FAILURE mode).
215 //
216 // If a SubConn is returned:
217 // - If it is READY, gRPC will send the RPC on it;
218 // - If it is not ready, or becomes not ready after it's returned, gRPC will block
219 // until UpdateBalancerState() is called and will call pick on the new picker.
220 //
221 // If the returned error is not nil:
222 // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
223 // - If the error is ErrTransientFailure:
224 // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
225 // is called to pick again;
226 // - Otherwise, RPC will fail with unavailable error.
227 // - Else (error is other non-nil error):
228 // - The RPC will fail with unavailable error.
229 //
230 // The returned done() function will be called once the rpc has finished, with the
231 // final status of that RPC.
232 // done may be nil if balancer doesn't care about the RPC status.
233 Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
234}
235
236// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
237// the connectivity states.
238//
239// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
240//
241// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
242// to be called synchronously from the same goroutine.
243// There's no guarantee on picker.Pick, it may be called anytime.
244type Balancer interface {
245 // HandleSubConnStateChange is called by gRPC when the connectivity state
246 // of sc has changed.
247 // Balancer is expected to aggregate all the state of SubConn and report
248 // that back to gRPC.
249 // Balancer should also generate and update Pickers when its internal state has
250 // been changed by the new state.
251 HandleSubConnStateChange(sc SubConn, state connectivity.State)
252 // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
253 // balancers.
254 // Balancer can create new SubConn or remove SubConn with the addresses.
255 // An empty address slice and a non-nil error will be passed if the resolver returns
256 // non-nil error to gRPC.
257 HandleResolvedAddrs([]resolver.Address, error)
258 // Close closes the balancer. The balancer is not required to call
259 // ClientConn.RemoveSubConn for its existing SubConns.
260 Close()
261}
262
263// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
264// and returns one aggregated connectivity state.
265//
266// It's not thread safe.
267type ConnectivityStateEvaluator struct {
268 numReady uint64 // Number of addrConns in ready state.
269 numConnecting uint64 // Number of addrConns in connecting state.
270 numTransientFailure uint64 // Number of addrConns in transientFailure.
271}
272
273// RecordTransition records state change happening in subConn and based on that
274// it evaluates what aggregated state should be.
275//
276// - If at least one SubConn in Ready, the aggregated state is Ready;
277// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
278// - Else the aggregated state is TransientFailure.
279//
280// Idle and Shutdown are not considered.
281func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
282 // Update counters.
283 for idx, state := range []connectivity.State{oldState, newState} {
284 updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
285 switch state {
286 case connectivity.Ready:
287 cse.numReady += updateVal
288 case connectivity.Connecting:
289 cse.numConnecting += updateVal
290 case connectivity.TransientFailure:
291 cse.numTransientFailure += updateVal
292 }
293 }
294
295 // Evaluate.
296 if cse.numReady > 0 {
297 return connectivity.Ready
298 }
299 if cse.numConnecting > 0 {
300 return connectivity.Connecting
301 }
302 return connectivity.TransientFailure
303}
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
new file mode 100644
index 0000000..245785e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -0,0 +1,171 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package base
20
21import (
22 "context"
23
24 "google.golang.org/grpc/balancer"
25 "google.golang.org/grpc/connectivity"
26 "google.golang.org/grpc/grpclog"
27 "google.golang.org/grpc/resolver"
28)
29
30type baseBuilder struct {
31 name string
32 pickerBuilder PickerBuilder
33 config Config
34}
35
36func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
37 return &baseBalancer{
38 cc: cc,
39 pickerBuilder: bb.pickerBuilder,
40
41 subConns: make(map[resolver.Address]balancer.SubConn),
42 scStates: make(map[balancer.SubConn]connectivity.State),
43 csEvltr: &balancer.ConnectivityStateEvaluator{},
44 // Initialize picker to a picker that always return
45 // ErrNoSubConnAvailable, because when state of a SubConn changes, we
46 // may call UpdateBalancerState with this picker.
47 picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
48 config: bb.config,
49 }
50}
51
52func (bb *baseBuilder) Name() string {
53 return bb.name
54}
55
56type baseBalancer struct {
57 cc balancer.ClientConn
58 pickerBuilder PickerBuilder
59
60 csEvltr *balancer.ConnectivityStateEvaluator
61 state connectivity.State
62
63 subConns map[resolver.Address]balancer.SubConn
64 scStates map[balancer.SubConn]connectivity.State
65 picker balancer.Picker
66 config Config
67}
68
69func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
70 if err != nil {
71 grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
72 return
73 }
74 grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
75 // addrsSet is the set converted from addrs, it's used for quick lookup of an address.
76 addrsSet := make(map[resolver.Address]struct{})
77 for _, a := range addrs {
78 addrsSet[a] = struct{}{}
79 if _, ok := b.subConns[a]; !ok {
80 // a is a new address (not existing in b.subConns).
81 sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
82 if err != nil {
83 grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
84 continue
85 }
86 b.subConns[a] = sc
87 b.scStates[sc] = connectivity.Idle
88 sc.Connect()
89 }
90 }
91 for a, sc := range b.subConns {
92 // a was removed by resolver.
93 if _, ok := addrsSet[a]; !ok {
94 b.cc.RemoveSubConn(sc)
95 delete(b.subConns, a)
96 // Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
97 // The entry will be deleted in HandleSubConnStateChange.
98 }
99 }
100}
101
102// regeneratePicker takes a snapshot of the balancer, and generates a picker
103// from it. The picker is
104// - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
105// - built by the pickerBuilder with all READY SubConns otherwise.
106func (b *baseBalancer) regeneratePicker() {
107 if b.state == connectivity.TransientFailure {
108 b.picker = NewErrPicker(balancer.ErrTransientFailure)
109 return
110 }
111 readySCs := make(map[resolver.Address]balancer.SubConn)
112
113 // Filter out all ready SCs from full subConn map.
114 for addr, sc := range b.subConns {
115 if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
116 readySCs[addr] = sc
117 }
118 }
119 b.picker = b.pickerBuilder.Build(readySCs)
120}
121
122func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
123 grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
124 oldS, ok := b.scStates[sc]
125 if !ok {
126 grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
127 return
128 }
129 b.scStates[sc] = s
130 switch s {
131 case connectivity.Idle:
132 sc.Connect()
133 case connectivity.Shutdown:
134 // When an address was removed by resolver, b called RemoveSubConn but
135 // kept the sc's state in scStates. Remove state for this sc here.
136 delete(b.scStates, sc)
137 }
138
139 oldAggrState := b.state
140 b.state = b.csEvltr.RecordTransition(oldS, s)
141
142 // Regenerate picker when one of the following happens:
143 // - this sc became ready from not-ready
144 // - this sc became not-ready from ready
145 // - the aggregated state of balancer became TransientFailure from non-TransientFailure
146 // - the aggregated state of balancer became non-TransientFailure from TransientFailure
147 if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
148 (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
149 b.regeneratePicker()
150 }
151
152 b.cc.UpdateBalancerState(b.state, b.picker)
153}
154
155// Close is a nop because base balancer doesn't have internal state to clean up,
156// and it doesn't need to call RemoveSubConn for the SubConns.
157func (b *baseBalancer) Close() {
158}
159
160// NewErrPicker returns a picker that always returns err on Pick().
161func NewErrPicker(err error) balancer.Picker {
162 return &errPicker{err: err}
163}
164
165type errPicker struct {
166 err error // Pick() always returns this err.
167}
168
169func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
170 return nil, nil, p.err
171}
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
new file mode 100644
index 0000000..34b1f29
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/base/base.go
@@ -0,0 +1,64 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package base defines a balancer base that can be used to build balancers with
20// different picking algorithms.
21//
22// The base balancer creates a new SubConn for each resolved address. The
23// provided picker will only be notified about READY SubConns.
24//
25// This package is the base of round_robin balancer, its purpose is to be used
26// to build round_robin like balancers with complex picking algorithms.
27// Balancers with more complicated logic should try to implement a balancer
28// builder from scratch.
29//
30// All APIs in this package are experimental.
31package base
32
33import (
34 "google.golang.org/grpc/balancer"
35 "google.golang.org/grpc/resolver"
36)
37
38// PickerBuilder creates balancer.Picker.
39type PickerBuilder interface {
40 // Build takes a slice of ready SubConns, and returns a picker that will be
41 // used by gRPC to pick a SubConn.
42 Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
43}
44
45// NewBalancerBuilder returns a balancer builder. The balancers
46// built by this builder will use the picker builder to build pickers.
47func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
48 return NewBalancerBuilderWithConfig(name, pb, Config{})
49}
50
51// Config contains the config info about the base balancer builder.
52type Config struct {
53 // HealthCheck indicates whether health checking should be enabled for this specific balancer.
54 HealthCheck bool
55}
56
57// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
58func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
59 return &baseBuilder{
60 name: name,
61 pickerBuilder: pb,
62 config: config,
63 }
64}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
new file mode 100644
index 0000000..57aea9f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -0,0 +1,79 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
20// installed as one of the default balancers in gRPC, users don't need to
21// explicitly install this balancer.
22package roundrobin
23
24import (
25 "context"
26 "sync"
27
28 "google.golang.org/grpc/balancer"
29 "google.golang.org/grpc/balancer/base"
30 "google.golang.org/grpc/grpclog"
31 "google.golang.org/grpc/resolver"
32)
33
34// Name is the name of round_robin balancer.
35const Name = "round_robin"
36
37// newBuilder creates a new roundrobin balancer builder.
38func newBuilder() balancer.Builder {
39 return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
40}
41
42func init() {
43 balancer.Register(newBuilder())
44}
45
46type rrPickerBuilder struct{}
47
48func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
49 grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
50 var scs []balancer.SubConn
51 for _, sc := range readySCs {
52 scs = append(scs, sc)
53 }
54 return &rrPicker{
55 subConns: scs,
56 }
57}
58
59type rrPicker struct {
60 // subConns is the snapshot of the roundrobin balancer when this picker was
61 // created. The slice is immutable. Each Get() will do a round robin
62 // selection from it and return the selected SubConn.
63 subConns []balancer.SubConn
64
65 mu sync.Mutex
66 next int
67}
68
69func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
70 if len(p.subConns) <= 0 {
71 return nil, nil, balancer.ErrNoSubConnAvailable
72 }
73
74 p.mu.Lock()
75 sc := p.subConns[p.next]
76 p.next = (p.next + 1) % len(p.subConns)
77 p.mu.Unlock()
78 return sc, nil, nil
79}
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
new file mode 100644
index 0000000..7233ade
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -0,0 +1,328 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package grpc
20
21import (
22 "fmt"
23 "sync"
24
25 "google.golang.org/grpc/balancer"
26 "google.golang.org/grpc/connectivity"
27 "google.golang.org/grpc/grpclog"
28 "google.golang.org/grpc/resolver"
29)
30
31// scStateUpdate contains the subConn and the new state it changed to.
32type scStateUpdate struct {
33 sc balancer.SubConn
34 state connectivity.State
35}
36
37// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple.
38// TODO make a general purpose buffer that uses interface{}.
39type scStateUpdateBuffer struct {
40 c chan *scStateUpdate
41 mu sync.Mutex
42 backlog []*scStateUpdate
43}
44
45func newSCStateUpdateBuffer() *scStateUpdateBuffer {
46 return &scStateUpdateBuffer{
47 c: make(chan *scStateUpdate, 1),
48 }
49}
50
51func (b *scStateUpdateBuffer) put(t *scStateUpdate) {
52 b.mu.Lock()
53 defer b.mu.Unlock()
54 if len(b.backlog) == 0 {
55 select {
56 case b.c <- t:
57 return
58 default:
59 }
60 }
61 b.backlog = append(b.backlog, t)
62}
63
64func (b *scStateUpdateBuffer) load() {
65 b.mu.Lock()
66 defer b.mu.Unlock()
67 if len(b.backlog) > 0 {
68 select {
69 case b.c <- b.backlog[0]:
70 b.backlog[0] = nil
71 b.backlog = b.backlog[1:]
72 default:
73 }
74 }
75}
76
77// get returns the channel that the scStateUpdate will be sent to.
78//
79// Upon receiving, the caller should call load to send another
80// scStateChangeTuple onto the channel if there is any.
81func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
82 return b.c
83}
84
85// resolverUpdate contains the new resolved addresses or error if there's
86// any.
87type resolverUpdate struct {
88 addrs []resolver.Address
89 err error
90}
91
92// ccBalancerWrapper is a wrapper on top of cc for balancers.
93// It implements balancer.ClientConn interface.
94type ccBalancerWrapper struct {
95 cc *ClientConn
96 balancer balancer.Balancer
97 stateChangeQueue *scStateUpdateBuffer
98 resolverUpdateCh chan *resolverUpdate
99 done chan struct{}
100
101 mu sync.Mutex
102 subConns map[*acBalancerWrapper]struct{}
103}
104
105func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
106 ccb := &ccBalancerWrapper{
107 cc: cc,
108 stateChangeQueue: newSCStateUpdateBuffer(),
109 resolverUpdateCh: make(chan *resolverUpdate, 1),
110 done: make(chan struct{}),
111 subConns: make(map[*acBalancerWrapper]struct{}),
112 }
113 go ccb.watcher()
114 ccb.balancer = b.Build(ccb, bopts)
115 return ccb
116}
117
118// watcher balancer functions sequentially, so the balancer can be implemented
119// lock-free.
120func (ccb *ccBalancerWrapper) watcher() {
121 for {
122 select {
123 case t := <-ccb.stateChangeQueue.get():
124 ccb.stateChangeQueue.load()
125 select {
126 case <-ccb.done:
127 ccb.balancer.Close()
128 return
129 default:
130 }
131 ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
132 case t := <-ccb.resolverUpdateCh:
133 select {
134 case <-ccb.done:
135 ccb.balancer.Close()
136 return
137 default:
138 }
139 ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
140 case <-ccb.done:
141 }
142
143 select {
144 case <-ccb.done:
145 ccb.balancer.Close()
146 ccb.mu.Lock()
147 scs := ccb.subConns
148 ccb.subConns = nil
149 ccb.mu.Unlock()
150 for acbw := range scs {
151 ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
152 }
153 return
154 default:
155 }
156 }
157}
158
159func (ccb *ccBalancerWrapper) close() {
160 close(ccb.done)
161}
162
163func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
164 // When updating addresses for a SubConn, if the address in use is not in
165 // the new addresses, the old ac will be tearDown() and a new ac will be
166 // created. tearDown() generates a state change with Shutdown state, we
167 // don't want the balancer to receive this state change. So before
168 // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
169 // this function will be called with (nil, Shutdown). We don't need to call
170 // balancer method in this case.
171 if sc == nil {
172 return
173 }
174 ccb.stateChangeQueue.put(&scStateUpdate{
175 sc: sc,
176 state: s,
177 })
178}
179
180func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) {
181 if ccb.cc.curBalancerName != grpclbName {
182 var containsGRPCLB bool
183 for _, a := range addrs {
184 if a.Type == resolver.GRPCLB {
185 containsGRPCLB = true
186 break
187 }
188 }
189 if containsGRPCLB {
190 // The current balancer is not grpclb, but addresses contain grpclb
191 // address. This means we failed to switch to grpclb, most likely
192 // because grpclb is not registered. Filter out all grpclb addresses
193 // from addrs before sending to balancer.
194 tempAddrs := make([]resolver.Address, 0, len(addrs))
195 for _, a := range addrs {
196 if a.Type != resolver.GRPCLB {
197 tempAddrs = append(tempAddrs, a)
198 }
199 }
200 addrs = tempAddrs
201 }
202 }
203 select {
204 case <-ccb.resolverUpdateCh:
205 default:
206 }
207 ccb.resolverUpdateCh <- &resolverUpdate{
208 addrs: addrs,
209 err: err,
210 }
211}
212
213func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
214 if len(addrs) <= 0 {
215 return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
216 }
217 ccb.mu.Lock()
218 defer ccb.mu.Unlock()
219 if ccb.subConns == nil {
220 return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
221 }
222 ac, err := ccb.cc.newAddrConn(addrs, opts)
223 if err != nil {
224 return nil, err
225 }
226 acbw := &acBalancerWrapper{ac: ac}
227 acbw.ac.mu.Lock()
228 ac.acbw = acbw
229 acbw.ac.mu.Unlock()
230 ccb.subConns[acbw] = struct{}{}
231 return acbw, nil
232}
233
234func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
235 acbw, ok := sc.(*acBalancerWrapper)
236 if !ok {
237 return
238 }
239 ccb.mu.Lock()
240 defer ccb.mu.Unlock()
241 if ccb.subConns == nil {
242 return
243 }
244 delete(ccb.subConns, acbw)
245 ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
246}
247
248func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
249 ccb.mu.Lock()
250 defer ccb.mu.Unlock()
251 if ccb.subConns == nil {
252 return
253 }
254 // Update picker before updating state. Even though the ordering here does
255 // not matter, it can lead to multiple calls of Pick in the common start-up
256 // case where we wait for ready and then perform an RPC. If the picker is
257 // updated later, we could call the "connecting" picker when the state is
258 // updated, and then call the "ready" picker after the picker gets updated.
259 ccb.cc.blockingpicker.updatePicker(p)
260 ccb.cc.csMgr.updateState(s)
261}
262
263func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
264 ccb.cc.resolveNow(o)
265}
266
267func (ccb *ccBalancerWrapper) Target() string {
268 return ccb.cc.target
269}
270
271// acBalancerWrapper is a wrapper on top of ac for balancers.
272// It implements balancer.SubConn interface.
273type acBalancerWrapper struct {
274 mu sync.Mutex
275 ac *addrConn
276}
277
278func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
279 acbw.mu.Lock()
280 defer acbw.mu.Unlock()
281 if len(addrs) <= 0 {
282 acbw.ac.tearDown(errConnDrain)
283 return
284 }
285 if !acbw.ac.tryUpdateAddrs(addrs) {
286 cc := acbw.ac.cc
287 opts := acbw.ac.scopts
288 acbw.ac.mu.Lock()
289 // Set old ac.acbw to nil so the Shutdown state update will be ignored
290 // by balancer.
291 //
292 // TODO(bar) the state transition could be wrong when tearDown() old ac
293 // and creating new ac, fix the transition.
294 acbw.ac.acbw = nil
295 acbw.ac.mu.Unlock()
296 acState := acbw.ac.getState()
297 acbw.ac.tearDown(errConnDrain)
298
299 if acState == connectivity.Shutdown {
300 return
301 }
302
303 ac, err := cc.newAddrConn(addrs, opts)
304 if err != nil {
305 grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
306 return
307 }
308 acbw.ac = ac
309 ac.mu.Lock()
310 ac.acbw = acbw
311 ac.mu.Unlock()
312 if acState != connectivity.Idle {
313 ac.connect()
314 }
315 }
316}
317
318func (acbw *acBalancerWrapper) Connect() {
319 acbw.mu.Lock()
320 defer acbw.mu.Unlock()
321 acbw.ac.connect()
322}
323
324func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
325 acbw.mu.Lock()
326 defer acbw.mu.Unlock()
327 return acbw.ac
328}
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
new file mode 100644
index 0000000..42b60fe
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
@@ -0,0 +1,326 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package grpc
20
21import (
22 "context"
23 "strings"
24 "sync"
25
26 "google.golang.org/grpc/balancer"
27 "google.golang.org/grpc/connectivity"
28 "google.golang.org/grpc/grpclog"
29 "google.golang.org/grpc/resolver"
30)
31
32type balancerWrapperBuilder struct {
33 b Balancer // The v1 balancer.
34}
35
36func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
37 targetAddr := cc.Target()
38 targetSplitted := strings.Split(targetAddr, ":///")
39 if len(targetSplitted) >= 2 {
40 targetAddr = targetSplitted[1]
41 }
42
43 bwb.b.Start(targetAddr, BalancerConfig{
44 DialCreds: opts.DialCreds,
45 Dialer: opts.Dialer,
46 })
47 _, pickfirst := bwb.b.(*pickFirst)
48 bw := &balancerWrapper{
49 balancer: bwb.b,
50 pickfirst: pickfirst,
51 cc: cc,
52 targetAddr: targetAddr,
53 startCh: make(chan struct{}),
54 conns: make(map[resolver.Address]balancer.SubConn),
55 connSt: make(map[balancer.SubConn]*scState),
56 csEvltr: &balancer.ConnectivityStateEvaluator{},
57 state: connectivity.Idle,
58 }
59 cc.UpdateBalancerState(connectivity.Idle, bw)
60 go bw.lbWatcher()
61 return bw
62}
63
64func (bwb *balancerWrapperBuilder) Name() string {
65 return "wrapper"
66}
67
68type scState struct {
69 addr Address // The v1 address type.
70 s connectivity.State
71 down func(error)
72}
73
74type balancerWrapper struct {
75 balancer Balancer // The v1 balancer.
76 pickfirst bool
77
78 cc balancer.ClientConn
79 targetAddr string // Target without the scheme.
80
81 mu sync.Mutex
82 conns map[resolver.Address]balancer.SubConn
83 connSt map[balancer.SubConn]*scState
84 // This channel is closed when handling the first resolver result.
85 // lbWatcher blocks until this is closed, to avoid race between
86 // - NewSubConn is created, cc wants to notify balancer of state changes;
87 // - Build hasn't return, cc doesn't have access to balancer.
88 startCh chan struct{}
89
90 // To aggregate the connectivity state.
91 csEvltr *balancer.ConnectivityStateEvaluator
92 state connectivity.State
93}
94
95// lbWatcher watches the Notify channel of the balancer and manages
96// connections accordingly.
97func (bw *balancerWrapper) lbWatcher() {
98 <-bw.startCh
99 notifyCh := bw.balancer.Notify()
100 if notifyCh == nil {
101 // There's no resolver in the balancer. Connect directly.
102 a := resolver.Address{
103 Addr: bw.targetAddr,
104 Type: resolver.Backend,
105 }
106 sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
107 if err != nil {
108 grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
109 } else {
110 bw.mu.Lock()
111 bw.conns[a] = sc
112 bw.connSt[sc] = &scState{
113 addr: Address{Addr: bw.targetAddr},
114 s: connectivity.Idle,
115 }
116 bw.mu.Unlock()
117 sc.Connect()
118 }
119 return
120 }
121
122 for addrs := range notifyCh {
123 grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs)
124 if bw.pickfirst {
125 var (
126 oldA resolver.Address
127 oldSC balancer.SubConn
128 )
129 bw.mu.Lock()
130 for oldA, oldSC = range bw.conns {
131 break
132 }
133 bw.mu.Unlock()
134 if len(addrs) <= 0 {
135 if oldSC != nil {
136 // Teardown old sc.
137 bw.mu.Lock()
138 delete(bw.conns, oldA)
139 delete(bw.connSt, oldSC)
140 bw.mu.Unlock()
141 bw.cc.RemoveSubConn(oldSC)
142 }
143 continue
144 }
145
146 var newAddrs []resolver.Address
147 for _, a := range addrs {
148 newAddr := resolver.Address{
149 Addr: a.Addr,
150 Type: resolver.Backend, // All addresses from balancer are all backends.
151 ServerName: "",
152 Metadata: a.Metadata,
153 }
154 newAddrs = append(newAddrs, newAddr)
155 }
156 if oldSC == nil {
157 // Create new sc.
158 sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
159 if err != nil {
160 grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
161 } else {
162 bw.mu.Lock()
163 // For pickfirst, there should be only one SubConn, so the
164 // address doesn't matter. All states updating (up and down)
165 // and picking should all happen on that only SubConn.
166 bw.conns[resolver.Address{}] = sc
167 bw.connSt[sc] = &scState{
168 addr: addrs[0], // Use the first address.
169 s: connectivity.Idle,
170 }
171 bw.mu.Unlock()
172 sc.Connect()
173 }
174 } else {
175 bw.mu.Lock()
176 bw.connSt[oldSC].addr = addrs[0]
177 bw.mu.Unlock()
178 oldSC.UpdateAddresses(newAddrs)
179 }
180 } else {
181 var (
182 add []resolver.Address // Addresses need to setup connections.
183 del []balancer.SubConn // Connections need to tear down.
184 )
185 resAddrs := make(map[resolver.Address]Address)
186 for _, a := range addrs {
187 resAddrs[resolver.Address{
188 Addr: a.Addr,
189 Type: resolver.Backend, // All addresses from balancer are all backends.
190 ServerName: "",
191 Metadata: a.Metadata,
192 }] = a
193 }
194 bw.mu.Lock()
195 for a := range resAddrs {
196 if _, ok := bw.conns[a]; !ok {
197 add = append(add, a)
198 }
199 }
200 for a, c := range bw.conns {
201 if _, ok := resAddrs[a]; !ok {
202 del = append(del, c)
203 delete(bw.conns, a)
204 // Keep the state of this sc in bw.connSt until its state becomes Shutdown.
205 }
206 }
207 bw.mu.Unlock()
208 for _, a := range add {
209 sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
210 if err != nil {
211 grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
212 } else {
213 bw.mu.Lock()
214 bw.conns[a] = sc
215 bw.connSt[sc] = &scState{
216 addr: resAddrs[a],
217 s: connectivity.Idle,
218 }
219 bw.mu.Unlock()
220 sc.Connect()
221 }
222 }
223 for _, c := range del {
224 bw.cc.RemoveSubConn(c)
225 }
226 }
227 }
228}
229
230func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
231 bw.mu.Lock()
232 defer bw.mu.Unlock()
233 scSt, ok := bw.connSt[sc]
234 if !ok {
235 return
236 }
237 if s == connectivity.Idle {
238 sc.Connect()
239 }
240 oldS := scSt.s
241 scSt.s = s
242 if oldS != connectivity.Ready && s == connectivity.Ready {
243 scSt.down = bw.balancer.Up(scSt.addr)
244 } else if oldS == connectivity.Ready && s != connectivity.Ready {
245 if scSt.down != nil {
246 scSt.down(errConnClosing)
247 }
248 }
249 sa := bw.csEvltr.RecordTransition(oldS, s)
250 if bw.state != sa {
251 bw.state = sa
252 }
253 bw.cc.UpdateBalancerState(bw.state, bw)
254 if s == connectivity.Shutdown {
255 // Remove state for this sc.
256 delete(bw.connSt, sc)
257 }
258}
259
260func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
261 bw.mu.Lock()
262 defer bw.mu.Unlock()
263 select {
264 case <-bw.startCh:
265 default:
266 close(bw.startCh)
267 }
268 // There should be a resolver inside the balancer.
269 // All updates here, if any, are ignored.
270}
271
272func (bw *balancerWrapper) Close() {
273 bw.mu.Lock()
274 defer bw.mu.Unlock()
275 select {
276 case <-bw.startCh:
277 default:
278 close(bw.startCh)
279 }
280 bw.balancer.Close()
281}
282
283// The picker is the balancerWrapper itself.
284// Pick should never return ErrNoSubConnAvailable.
285// It either blocks or returns error, consistent with v1 balancer Get().
286func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
287 failfast := true // Default failfast is true.
288 if ss, ok := rpcInfoFromContext(ctx); ok {
289 failfast = ss.failfast
290 }
291 a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
292 if err != nil {
293 return nil, nil, err
294 }
295 var done func(balancer.DoneInfo)
296 if p != nil {
297 done = func(i balancer.DoneInfo) { p() }
298 }
299 var sc balancer.SubConn
300 bw.mu.Lock()
301 defer bw.mu.Unlock()
302 if bw.pickfirst {
303 // Get the first sc in conns.
304 for _, sc = range bw.conns {
305 break
306 }
307 } else {
308 var ok bool
309 sc, ok = bw.conns[resolver.Address{
310 Addr: a.Addr,
311 Type: resolver.Backend,
312 ServerName: "",
313 Metadata: a.Metadata,
314 }]
315 if !ok && failfast {
316 return nil, nil, balancer.ErrTransientFailure
317 }
318 if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
319 // If the returned sc is not ready and RPC is failfast,
320 // return error, and this RPC will fail.
321 return nil, nil, balancer.ErrTransientFailure
322 }
323 }
324
325 return sc, done, nil
326}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
new file mode 100644
index 0000000..f393bb6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -0,0 +1,900 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto
3
4package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9import duration "github.com/golang/protobuf/ptypes/duration"
10import timestamp "github.com/golang/protobuf/ptypes/timestamp"
11
12// Reference imports to suppress errors if they are not otherwise used.
13var _ = proto.Marshal
14var _ = fmt.Errorf
15var _ = math.Inf
16
17// This is a compile-time assertion to ensure that this generated file
18// is compatible with the proto package it is being compiled against.
19// A compilation error at this line likely means your copy of the
20// proto package needs to be updated.
21const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
22
23// Enumerates the type of event
24// Note the terminology is different from the RPC semantics
25// definition, but the same meaning is expressed here.
26type GrpcLogEntry_EventType int32
27
28const (
29 GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0
30 // Header sent from client to server
31 GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1
32 // Header sent from server to client
33 GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2
34 // Message sent from client to server
35 GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3
36 // Message sent from server to client
37 GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4
38 // A signal that client is done sending
39 GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5
40 // Trailer indicates the end of the RPC.
41 // On client side, this event means a trailer was either received
42 // from the network or the gRPC library locally generated a status
43 // to inform the application about a failure.
44 // On server side, this event means the server application requested
45 // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after
46 // this due to races on server side.
47 GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6
48 // A signal that the RPC is cancelled. On client side, this
49 // indicates the client application requests a cancellation.
50 // On server side, this indicates that cancellation was detected.
51 // Note: This marks the end of the RPC. Events may arrive after
52 // this due to races. For example, on client side a trailer
53 // may arrive even though the application requested to cancel the RPC.
54 GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
55)
56
57var GrpcLogEntry_EventType_name = map[int32]string{
58 0: "EVENT_TYPE_UNKNOWN",
59 1: "EVENT_TYPE_CLIENT_HEADER",
60 2: "EVENT_TYPE_SERVER_HEADER",
61 3: "EVENT_TYPE_CLIENT_MESSAGE",
62 4: "EVENT_TYPE_SERVER_MESSAGE",
63 5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
64 6: "EVENT_TYPE_SERVER_TRAILER",
65 7: "EVENT_TYPE_CANCEL",
66}
67var GrpcLogEntry_EventType_value = map[string]int32{
68 "EVENT_TYPE_UNKNOWN": 0,
69 "EVENT_TYPE_CLIENT_HEADER": 1,
70 "EVENT_TYPE_SERVER_HEADER": 2,
71 "EVENT_TYPE_CLIENT_MESSAGE": 3,
72 "EVENT_TYPE_SERVER_MESSAGE": 4,
73 "EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
74 "EVENT_TYPE_SERVER_TRAILER": 6,
75 "EVENT_TYPE_CANCEL": 7,
76}
77
78func (x GrpcLogEntry_EventType) String() string {
79 return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
80}
81func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
82 return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0}
83}
84
85// Enumerates the entity that generates the log entry
86type GrpcLogEntry_Logger int32
87
88const (
89 GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0
90 GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1
91 GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2
92)
93
94var GrpcLogEntry_Logger_name = map[int32]string{
95 0: "LOGGER_UNKNOWN",
96 1: "LOGGER_CLIENT",
97 2: "LOGGER_SERVER",
98}
99var GrpcLogEntry_Logger_value = map[string]int32{
100 "LOGGER_UNKNOWN": 0,
101 "LOGGER_CLIENT": 1,
102 "LOGGER_SERVER": 2,
103}
104
105func (x GrpcLogEntry_Logger) String() string {
106 return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
107}
108func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
109 return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1}
110}
111
112type Address_Type int32
113
114const (
115 Address_TYPE_UNKNOWN Address_Type = 0
116 // address is in 1.2.3.4 form
117 Address_TYPE_IPV4 Address_Type = 1
118 // address is in IPv6 canonical form (RFC5952 section 4)
119 // The scope is NOT included in the address string.
120 Address_TYPE_IPV6 Address_Type = 2
121 // address is UDS string
122 Address_TYPE_UNIX Address_Type = 3
123)
124
125var Address_Type_name = map[int32]string{
126 0: "TYPE_UNKNOWN",
127 1: "TYPE_IPV4",
128 2: "TYPE_IPV6",
129 3: "TYPE_UNIX",
130}
131var Address_Type_value = map[string]int32{
132 "TYPE_UNKNOWN": 0,
133 "TYPE_IPV4": 1,
134 "TYPE_IPV6": 2,
135 "TYPE_UNIX": 3,
136}
137
138func (x Address_Type) String() string {
139 return proto.EnumName(Address_Type_name, int32(x))
140}
141func (Address_Type) EnumDescriptor() ([]byte, []int) {
142 return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0}
143}
144
145// Log entry we store in binary logs
146type GrpcLogEntry struct {
147 // The timestamp of the binary log message
148 Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
149 // Uniquely identifies a call. The value must not be 0 in order to disambiguate
150 // from an unset value.
151 // Each call may have several log entries, they will all have the same call_id.
152 // Nothing is guaranteed about their value other than they are unique across
153 // different RPCs in the same gRPC process.
154 CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"`
155 // The entry sequence id for this call. The first GrpcLogEntry has a
156 // value of 1, to disambiguate from an unset value. The purpose of
157 // this field is to detect missing entries in environments where
158 // durability or ordering is not guaranteed.
159 SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
160 Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
161 Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"`
162 // The logger uses one of the following fields to record the payload,
163 // according to the type of the log entry.
164 //
165 // Types that are valid to be assigned to Payload:
166 // *GrpcLogEntry_ClientHeader
167 // *GrpcLogEntry_ServerHeader
168 // *GrpcLogEntry_Message
169 // *GrpcLogEntry_Trailer
170 Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"`
171 // true if payload does not represent the full message or metadata.
172 PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"`
173 // Peer address information, will only be recorded on the first
174 // incoming event. On client side, peer is logged on
175 // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
176 // the case of trailers-only. On server side, peer is always
177 // logged on EVENT_TYPE_CLIENT_HEADER.
178 Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
179 XXX_NoUnkeyedLiteral struct{} `json:"-"`
180 XXX_unrecognized []byte `json:"-"`
181 XXX_sizecache int32 `json:"-"`
182}
183
184func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} }
185func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
186func (*GrpcLogEntry) ProtoMessage() {}
187func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
188 return fileDescriptor_binarylog_264c8c9c551ce911, []int{0}
189}
190func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
191 return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
192}
193func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
194 return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
195}
196func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) {
197 xxx_messageInfo_GrpcLogEntry.Merge(dst, src)
198}
199func (m *GrpcLogEntry) XXX_Size() int {
200 return xxx_messageInfo_GrpcLogEntry.Size(m)
201}
202func (m *GrpcLogEntry) XXX_DiscardUnknown() {
203 xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m)
204}
205
206var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo
207
208func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp {
209 if m != nil {
210 return m.Timestamp
211 }
212 return nil
213}
214
215func (m *GrpcLogEntry) GetCallId() uint64 {
216 if m != nil {
217 return m.CallId
218 }
219 return 0
220}
221
222func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
223 if m != nil {
224 return m.SequenceIdWithinCall
225 }
226 return 0
227}
228
229func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
230 if m != nil {
231 return m.Type
232 }
233 return GrpcLogEntry_EVENT_TYPE_UNKNOWN
234}
235
236func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
237 if m != nil {
238 return m.Logger
239 }
240 return GrpcLogEntry_LOGGER_UNKNOWN
241}
242
243type isGrpcLogEntry_Payload interface {
244 isGrpcLogEntry_Payload()
245}
246
247type GrpcLogEntry_ClientHeader struct {
248 ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"`
249}
250
251type GrpcLogEntry_ServerHeader struct {
252 ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"`
253}
254
255type GrpcLogEntry_Message struct {
256 Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
257}
258
259type GrpcLogEntry_Trailer struct {
260 Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"`
261}
262
263func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {}
264
265func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {}
266
267func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
268
269func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
270
271func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
272 if m != nil {
273 return m.Payload
274 }
275 return nil
276}
277
278func (m *GrpcLogEntry) GetClientHeader() *ClientHeader {
279 if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
280 return x.ClientHeader
281 }
282 return nil
283}
284
285func (m *GrpcLogEntry) GetServerHeader() *ServerHeader {
286 if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
287 return x.ServerHeader
288 }
289 return nil
290}
291
292func (m *GrpcLogEntry) GetMessage() *Message {
293 if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok {
294 return x.Message
295 }
296 return nil
297}
298
299func (m *GrpcLogEntry) GetTrailer() *Trailer {
300 if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok {
301 return x.Trailer
302 }
303 return nil
304}
305
306func (m *GrpcLogEntry) GetPayloadTruncated() bool {
307 if m != nil {
308 return m.PayloadTruncated
309 }
310 return false
311}
312
313func (m *GrpcLogEntry) GetPeer() *Address {
314 if m != nil {
315 return m.Peer
316 }
317 return nil
318}
319
320// XXX_OneofFuncs is for the internal use of the proto package.
321func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
322 return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{
323 (*GrpcLogEntry_ClientHeader)(nil),
324 (*GrpcLogEntry_ServerHeader)(nil),
325 (*GrpcLogEntry_Message)(nil),
326 (*GrpcLogEntry_Trailer)(nil),
327 }
328}
329
330func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
331 m := msg.(*GrpcLogEntry)
332 // payload
333 switch x := m.Payload.(type) {
334 case *GrpcLogEntry_ClientHeader:
335 b.EncodeVarint(6<<3 | proto.WireBytes)
336 if err := b.EncodeMessage(x.ClientHeader); err != nil {
337 return err
338 }
339 case *GrpcLogEntry_ServerHeader:
340 b.EncodeVarint(7<<3 | proto.WireBytes)
341 if err := b.EncodeMessage(x.ServerHeader); err != nil {
342 return err
343 }
344 case *GrpcLogEntry_Message:
345 b.EncodeVarint(8<<3 | proto.WireBytes)
346 if err := b.EncodeMessage(x.Message); err != nil {
347 return err
348 }
349 case *GrpcLogEntry_Trailer:
350 b.EncodeVarint(9<<3 | proto.WireBytes)
351 if err := b.EncodeMessage(x.Trailer); err != nil {
352 return err
353 }
354 case nil:
355 default:
356 return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
357 }
358 return nil
359}
360
361func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
362 m := msg.(*GrpcLogEntry)
363 switch tag {
364 case 6: // payload.client_header
365 if wire != proto.WireBytes {
366 return true, proto.ErrInternalBadWireType
367 }
368 msg := new(ClientHeader)
369 err := b.DecodeMessage(msg)
370 m.Payload = &GrpcLogEntry_ClientHeader{msg}
371 return true, err
372 case 7: // payload.server_header
373 if wire != proto.WireBytes {
374 return true, proto.ErrInternalBadWireType
375 }
376 msg := new(ServerHeader)
377 err := b.DecodeMessage(msg)
378 m.Payload = &GrpcLogEntry_ServerHeader{msg}
379 return true, err
380 case 8: // payload.message
381 if wire != proto.WireBytes {
382 return true, proto.ErrInternalBadWireType
383 }
384 msg := new(Message)
385 err := b.DecodeMessage(msg)
386 m.Payload = &GrpcLogEntry_Message{msg}
387 return true, err
388 case 9: // payload.trailer
389 if wire != proto.WireBytes {
390 return true, proto.ErrInternalBadWireType
391 }
392 msg := new(Trailer)
393 err := b.DecodeMessage(msg)
394 m.Payload = &GrpcLogEntry_Trailer{msg}
395 return true, err
396 default:
397 return false, nil
398 }
399}
400
401func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
402 m := msg.(*GrpcLogEntry)
403 // payload
404 switch x := m.Payload.(type) {
405 case *GrpcLogEntry_ClientHeader:
406 s := proto.Size(x.ClientHeader)
407 n += 1 // tag and wire
408 n += proto.SizeVarint(uint64(s))
409 n += s
410 case *GrpcLogEntry_ServerHeader:
411 s := proto.Size(x.ServerHeader)
412 n += 1 // tag and wire
413 n += proto.SizeVarint(uint64(s))
414 n += s
415 case *GrpcLogEntry_Message:
416 s := proto.Size(x.Message)
417 n += 1 // tag and wire
418 n += proto.SizeVarint(uint64(s))
419 n += s
420 case *GrpcLogEntry_Trailer:
421 s := proto.Size(x.Trailer)
422 n += 1 // tag and wire
423 n += proto.SizeVarint(uint64(s))
424 n += s
425 case nil:
426 default:
427 panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
428 }
429 return n
430}
431
432type ClientHeader struct {
433 // This contains only the metadata from the application.
434 Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
435 // The name of the RPC method, which looks something like:
436 // /<service>/<method>
437 // Note the leading "/" character.
438 MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
439 // A single process may be used to run multiple virtual
440 // servers with different identities.
441 // The authority is the name of such a server identitiy.
442 // It is typically a portion of the URI in the form of
443 // <host> or <host>:<port> .
444 Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
445 // the RPC timeout
446 Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
447 XXX_NoUnkeyedLiteral struct{} `json:"-"`
448 XXX_unrecognized []byte `json:"-"`
449 XXX_sizecache int32 `json:"-"`
450}
451
452func (m *ClientHeader) Reset() { *m = ClientHeader{} }
453func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
454func (*ClientHeader) ProtoMessage() {}
455func (*ClientHeader) Descriptor() ([]byte, []int) {
456 return fileDescriptor_binarylog_264c8c9c551ce911, []int{1}
457}
458func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
459 return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
460}
461func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
462 return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
463}
464func (dst *ClientHeader) XXX_Merge(src proto.Message) {
465 xxx_messageInfo_ClientHeader.Merge(dst, src)
466}
467func (m *ClientHeader) XXX_Size() int {
468 return xxx_messageInfo_ClientHeader.Size(m)
469}
470func (m *ClientHeader) XXX_DiscardUnknown() {
471 xxx_messageInfo_ClientHeader.DiscardUnknown(m)
472}
473
474var xxx_messageInfo_ClientHeader proto.InternalMessageInfo
475
476func (m *ClientHeader) GetMetadata() *Metadata {
477 if m != nil {
478 return m.Metadata
479 }
480 return nil
481}
482
483func (m *ClientHeader) GetMethodName() string {
484 if m != nil {
485 return m.MethodName
486 }
487 return ""
488}
489
490func (m *ClientHeader) GetAuthority() string {
491 if m != nil {
492 return m.Authority
493 }
494 return ""
495}
496
497func (m *ClientHeader) GetTimeout() *duration.Duration {
498 if m != nil {
499 return m.Timeout
500 }
501 return nil
502}
503
504type ServerHeader struct {
505 // This contains only the metadata from the application.
506 Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
507 XXX_NoUnkeyedLiteral struct{} `json:"-"`
508 XXX_unrecognized []byte `json:"-"`
509 XXX_sizecache int32 `json:"-"`
510}
511
512func (m *ServerHeader) Reset() { *m = ServerHeader{} }
513func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
514func (*ServerHeader) ProtoMessage() {}
515func (*ServerHeader) Descriptor() ([]byte, []int) {
516 return fileDescriptor_binarylog_264c8c9c551ce911, []int{2}
517}
518func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
519 return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
520}
521func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
522 return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
523}
524func (dst *ServerHeader) XXX_Merge(src proto.Message) {
525 xxx_messageInfo_ServerHeader.Merge(dst, src)
526}
527func (m *ServerHeader) XXX_Size() int {
528 return xxx_messageInfo_ServerHeader.Size(m)
529}
530func (m *ServerHeader) XXX_DiscardUnknown() {
531 xxx_messageInfo_ServerHeader.DiscardUnknown(m)
532}
533
534var xxx_messageInfo_ServerHeader proto.InternalMessageInfo
535
536func (m *ServerHeader) GetMetadata() *Metadata {
537 if m != nil {
538 return m.Metadata
539 }
540 return nil
541}
542
543type Trailer struct {
544 // This contains only the metadata from the application.
545 Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
546 // The gRPC status code.
547 StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"`
548 // An original status message before any transport specific
549 // encoding.
550 StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
551 // The value of the 'grpc-status-details-bin' metadata key. If
552 // present, this is always an encoded 'google.rpc.Status' message.
553 StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
554 XXX_NoUnkeyedLiteral struct{} `json:"-"`
555 XXX_unrecognized []byte `json:"-"`
556 XXX_sizecache int32 `json:"-"`
557}
558
559func (m *Trailer) Reset() { *m = Trailer{} }
560func (m *Trailer) String() string { return proto.CompactTextString(m) }
561func (*Trailer) ProtoMessage() {}
562func (*Trailer) Descriptor() ([]byte, []int) {
563 return fileDescriptor_binarylog_264c8c9c551ce911, []int{3}
564}
565func (m *Trailer) XXX_Unmarshal(b []byte) error {
566 return xxx_messageInfo_Trailer.Unmarshal(m, b)
567}
568func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
569 return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
570}
571func (dst *Trailer) XXX_Merge(src proto.Message) {
572 xxx_messageInfo_Trailer.Merge(dst, src)
573}
574func (m *Trailer) XXX_Size() int {
575 return xxx_messageInfo_Trailer.Size(m)
576}
577func (m *Trailer) XXX_DiscardUnknown() {
578 xxx_messageInfo_Trailer.DiscardUnknown(m)
579}
580
581var xxx_messageInfo_Trailer proto.InternalMessageInfo
582
583func (m *Trailer) GetMetadata() *Metadata {
584 if m != nil {
585 return m.Metadata
586 }
587 return nil
588}
589
590func (m *Trailer) GetStatusCode() uint32 {
591 if m != nil {
592 return m.StatusCode
593 }
594 return 0
595}
596
597func (m *Trailer) GetStatusMessage() string {
598 if m != nil {
599 return m.StatusMessage
600 }
601 return ""
602}
603
604func (m *Trailer) GetStatusDetails() []byte {
605 if m != nil {
606 return m.StatusDetails
607 }
608 return nil
609}
610
611// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
612type Message struct {
613 // Length of the message. It may not be the same as the length of the
614 // data field, as the logging payload can be truncated or omitted.
615 Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
616 // May be truncated or omitted.
617 Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
618 XXX_NoUnkeyedLiteral struct{} `json:"-"`
619 XXX_unrecognized []byte `json:"-"`
620 XXX_sizecache int32 `json:"-"`
621}
622
623func (m *Message) Reset() { *m = Message{} }
624func (m *Message) String() string { return proto.CompactTextString(m) }
625func (*Message) ProtoMessage() {}
626func (*Message) Descriptor() ([]byte, []int) {
627 return fileDescriptor_binarylog_264c8c9c551ce911, []int{4}
628}
629func (m *Message) XXX_Unmarshal(b []byte) error {
630 return xxx_messageInfo_Message.Unmarshal(m, b)
631}
632func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
633 return xxx_messageInfo_Message.Marshal(b, m, deterministic)
634}
635func (dst *Message) XXX_Merge(src proto.Message) {
636 xxx_messageInfo_Message.Merge(dst, src)
637}
638func (m *Message) XXX_Size() int {
639 return xxx_messageInfo_Message.Size(m)
640}
641func (m *Message) XXX_DiscardUnknown() {
642 xxx_messageInfo_Message.DiscardUnknown(m)
643}
644
645var xxx_messageInfo_Message proto.InternalMessageInfo
646
647func (m *Message) GetLength() uint32 {
648 if m != nil {
649 return m.Length
650 }
651 return 0
652}
653
654func (m *Message) GetData() []byte {
655 if m != nil {
656 return m.Data
657 }
658 return nil
659}
660
661// A list of metadata pairs, used in the payload of client header,
662// server header, and server trailer.
663// Implementations may omit some entries to honor the header limits
664// of GRPC_BINARY_LOG_CONFIG.
665//
666// Header keys added by gRPC are omitted. To be more specific,
667// implementations will not log the following entries, and this is
668// not to be treated as a truncation:
669// - entries handled by grpc that are not user visible, such as those
670// that begin with 'grpc-' (with exception of grpc-trace-bin)
671// or keys like 'lb-token'
672// - transport specific entries, including but not limited to:
673// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
674// - entries added for call credentials
675//
676// Implementations must always log grpc-trace-bin if it is present.
677// Practically speaking it will only be visible on server side because
678// grpc-trace-bin is managed by low level client side mechanisms
679// inaccessible from the application level. On server side, the
680// header is just a normal metadata key.
681// The pair will not count towards the size limit.
682type Metadata struct {
683 Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
684 XXX_NoUnkeyedLiteral struct{} `json:"-"`
685 XXX_unrecognized []byte `json:"-"`
686 XXX_sizecache int32 `json:"-"`
687}
688
689func (m *Metadata) Reset() { *m = Metadata{} }
690func (m *Metadata) String() string { return proto.CompactTextString(m) }
691func (*Metadata) ProtoMessage() {}
692func (*Metadata) Descriptor() ([]byte, []int) {
693 return fileDescriptor_binarylog_264c8c9c551ce911, []int{5}
694}
695func (m *Metadata) XXX_Unmarshal(b []byte) error {
696 return xxx_messageInfo_Metadata.Unmarshal(m, b)
697}
698func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
699 return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
700}
701func (dst *Metadata) XXX_Merge(src proto.Message) {
702 xxx_messageInfo_Metadata.Merge(dst, src)
703}
704func (m *Metadata) XXX_Size() int {
705 return xxx_messageInfo_Metadata.Size(m)
706}
707func (m *Metadata) XXX_DiscardUnknown() {
708 xxx_messageInfo_Metadata.DiscardUnknown(m)
709}
710
711var xxx_messageInfo_Metadata proto.InternalMessageInfo
712
713func (m *Metadata) GetEntry() []*MetadataEntry {
714 if m != nil {
715 return m.Entry
716 }
717 return nil
718}
719
720// A metadata key value pair
721type MetadataEntry struct {
722 Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
723 Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
724 XXX_NoUnkeyedLiteral struct{} `json:"-"`
725 XXX_unrecognized []byte `json:"-"`
726 XXX_sizecache int32 `json:"-"`
727}
728
729func (m *MetadataEntry) Reset() { *m = MetadataEntry{} }
730func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
731func (*MetadataEntry) ProtoMessage() {}
732func (*MetadataEntry) Descriptor() ([]byte, []int) {
733 return fileDescriptor_binarylog_264c8c9c551ce911, []int{6}
734}
735func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
736 return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
737}
738func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
739 return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
740}
741func (dst *MetadataEntry) XXX_Merge(src proto.Message) {
742 xxx_messageInfo_MetadataEntry.Merge(dst, src)
743}
744func (m *MetadataEntry) XXX_Size() int {
745 return xxx_messageInfo_MetadataEntry.Size(m)
746}
747func (m *MetadataEntry) XXX_DiscardUnknown() {
748 xxx_messageInfo_MetadataEntry.DiscardUnknown(m)
749}
750
751var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo
752
753func (m *MetadataEntry) GetKey() string {
754 if m != nil {
755 return m.Key
756 }
757 return ""
758}
759
760func (m *MetadataEntry) GetValue() []byte {
761 if m != nil {
762 return m.Value
763 }
764 return nil
765}
766
767// Address information
768type Address struct {
769 Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
770 Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
771 // only for TYPE_IPV4 and TYPE_IPV6
772 IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
773 XXX_NoUnkeyedLiteral struct{} `json:"-"`
774 XXX_unrecognized []byte `json:"-"`
775 XXX_sizecache int32 `json:"-"`
776}
777
778func (m *Address) Reset() { *m = Address{} }
779func (m *Address) String() string { return proto.CompactTextString(m) }
780func (*Address) ProtoMessage() {}
781func (*Address) Descriptor() ([]byte, []int) {
782 return fileDescriptor_binarylog_264c8c9c551ce911, []int{7}
783}
784func (m *Address) XXX_Unmarshal(b []byte) error {
785 return xxx_messageInfo_Address.Unmarshal(m, b)
786}
787func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
788 return xxx_messageInfo_Address.Marshal(b, m, deterministic)
789}
790func (dst *Address) XXX_Merge(src proto.Message) {
791 xxx_messageInfo_Address.Merge(dst, src)
792}
793func (m *Address) XXX_Size() int {
794 return xxx_messageInfo_Address.Size(m)
795}
796func (m *Address) XXX_DiscardUnknown() {
797 xxx_messageInfo_Address.DiscardUnknown(m)
798}
799
800var xxx_messageInfo_Address proto.InternalMessageInfo
801
802func (m *Address) GetType() Address_Type {
803 if m != nil {
804 return m.Type
805 }
806 return Address_TYPE_UNKNOWN
807}
808
809func (m *Address) GetAddress() string {
810 if m != nil {
811 return m.Address
812 }
813 return ""
814}
815
816func (m *Address) GetIpPort() uint32 {
817 if m != nil {
818 return m.IpPort
819 }
820 return 0
821}
822
823func init() {
824 proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
825 proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
826 proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
827 proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer")
828 proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message")
829 proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
830 proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
831 proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
832 proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
833 proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
834 proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
835}
836
837func init() {
838 proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
839}
840
841var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{
842 // 900 bytes of a gzipped FileDescriptorProto
843 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
844 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04,
845 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d,
846 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c,
847 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf,
848 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2,
849 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09,
850 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e,
851 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef,
852 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36,
853 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5,
854 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46,
855 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84,
856 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72,
857 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa,
858 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb,
859 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84,
860 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1,
861 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c,
862 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24,
863 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba,
864 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8,
865 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5,
866 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1,
867 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94,
868 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f,
869 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec,
870 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b,
871 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1,
872 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5,
873 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b,
874 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d,
875 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42,
876 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4,
877 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd,
878 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51,
879 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01,
880 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58,
881 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5,
882 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff,
883 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26,
884 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23,
885 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44,
886 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46,
887 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf,
888 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab,
889 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32,
890 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49,
891 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb,
892 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c,
893 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0,
894 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed,
895 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f,
896 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7,
897 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e,
898 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50,
899 0xd4, 0x07, 0x00, 0x00,
900}
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
index 797190f..100f05d 100644
--- a/vendor/google.golang.org/grpc/call.go
+++ b/vendor/google.golang.org/grpc/call.go
@@ -19,291 +19,56 @@
19package grpc 19package grpc
20 20
21import ( 21import (
22 "bytes" 22 "context"
23 "io"
24 "time"
25
26 "golang.org/x/net/context"
27 "golang.org/x/net/trace"
28 "google.golang.org/grpc/codes"
29 "google.golang.org/grpc/peer"
30 "google.golang.org/grpc/stats"
31 "google.golang.org/grpc/status"
32 "google.golang.org/grpc/transport"
33) 23)
34 24
35// recvResponse receives and parses an RPC response. 25// Invoke sends the RPC request on the wire and returns after response is
36// On error, it returns the error and indicates whether the call should be retried. 26// received. This is typically called by generated code.
37// 27//
38// TODO(zhaoq): Check whether the received message sequence is valid. 28// All errors returned by Invoke are compatible with the status package.
39// TODO ctx is used for stats collection and processing. It is the context passed from the application. 29func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
40func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) { 30 // allow interceptor to see all applicable call options, which means those
41 // Try to acquire header metadata from the server if there is any. 31 // configured as defaults from dial option as well as per-call options
42 defer func() { 32 opts = combine(cc.dopts.callOptions, opts)
43 if err != nil {
44 if _, ok := err.(transport.ConnectionError); !ok {
45 t.CloseStream(stream, err)
46 }
47 }
48 }()
49 c.headerMD, err = stream.Header()
50 if err != nil {
51 return
52 }
53 p := &parser{r: stream}
54 var inPayload *stats.InPayload
55 if dopts.copts.StatsHandler != nil {
56 inPayload = &stats.InPayload{
57 Client: true,
58 }
59 }
60 for {
61 if c.maxReceiveMessageSize == nil {
62 return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
63 }
64 if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil {
65 if err == io.EOF {
66 break
67 }
68 return
69 }
70 }
71 if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
72 // TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
73 // Fix the order if necessary.
74 dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
75 }
76 c.trailerMD = stream.Trailer()
77 return nil
78}
79
80// sendRequest writes out various information of an RPC such as Context and Message.
81func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
82 defer func() {
83 if err != nil {
84 // If err is connection error, t will be closed, no need to close stream here.
85 if _, ok := err.(transport.ConnectionError); !ok {
86 t.CloseStream(stream, err)
87 }
88 }
89 }()
90 var (
91 cbuf *bytes.Buffer
92 outPayload *stats.OutPayload
93 )
94 if compressor != nil {
95 cbuf = new(bytes.Buffer)
96 }
97 if dopts.copts.StatsHandler != nil {
98 outPayload = &stats.OutPayload{
99 Client: true,
100 }
101 }
102 outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
103 if err != nil {
104 return err
105 }
106 if c.maxSendMessageSize == nil {
107 return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
108 }
109 if len(outBuf) > *c.maxSendMessageSize {
110 return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(outBuf), *c.maxSendMessageSize)
111 }
112 err = t.Write(stream, outBuf, opts)
113 if err == nil && outPayload != nil {
114 outPayload.SentTime = time.Now()
115 dopts.copts.StatsHandler.HandleRPC(ctx, outPayload)
116 }
117 // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
118 // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
119 // recvResponse to get the final status.
120 if err != nil && err != io.EOF {
121 return err
122 }
123 // Sent successfully.
124 return nil
125}
126 33
127// Invoke sends the RPC request on the wire and returns after response is received.
128// Invoke is called by generated code. Also users can call Invoke directly when it
129// is really needed in their use cases.
130func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
131 if cc.dopts.unaryInt != nil { 34 if cc.dopts.unaryInt != nil {
132 return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) 35 return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
133 } 36 }
134 return invoke(ctx, method, args, reply, cc, opts...) 37 return invoke(ctx, method, args, reply, cc, opts...)
135} 38}
136 39
137func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { 40func combine(o1 []CallOption, o2 []CallOption) []CallOption {
138 c := defaultCallInfo 41 // we don't use append because o1 could have extra capacity whose
139 mc := cc.GetMethodConfig(method) 42 // elements would be overwritten, which could cause inadvertent
140 if mc.WaitForReady != nil { 43 // sharing (and race connditions) between concurrent calls
141 c.failFast = !*mc.WaitForReady 44 if len(o1) == 0 {
142 } 45 return o2
143 46 } else if len(o2) == 0 {
144 if mc.Timeout != nil && *mc.Timeout >= 0 { 47 return o1
145 var cancel context.CancelFunc 48 }
146 ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) 49 ret := make([]CallOption, len(o1)+len(o2))
147 defer cancel() 50 copy(ret, o1)
148 } 51 copy(ret[len(o1):], o2)
52 return ret
53}
149 54
150 opts = append(cc.dopts.callOptions, opts...) 55// Invoke sends the RPC request on the wire and returns after response is
151 for _, o := range opts { 56// received. This is typically called by generated code.
152 if err := o.before(&c); err != nil { 57//
153 return toRPCErr(err) 58// DEPRECATED: Use ClientConn.Invoke instead.
154 } 59func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
155 } 60 return cc.Invoke(ctx, method, args, reply, opts...)
156 defer func() { 61}
157 for _, o := range opts {
158 o.after(&c)
159 }
160 }()
161 62
162 c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) 63var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
163 c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
164 64
165 if EnableTracing { 65func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
166 c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) 66 cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
167 defer c.traceInfo.tr.Finish() 67 if err != nil {
168 c.traceInfo.firstLine.client = true 68 return err
169 if deadline, ok := ctx.Deadline(); ok {
170 c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
171 }
172 c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
173 // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
174 defer func() {
175 if e != nil {
176 c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true)
177 c.traceInfo.tr.SetError()
178 }
179 }()
180 }
181 ctx = newContextWithRPCInfo(ctx)
182 sh := cc.dopts.copts.StatsHandler
183 if sh != nil {
184 ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
185 begin := &stats.Begin{
186 Client: true,
187 BeginTime: time.Now(),
188 FailFast: c.failFast,
189 }
190 sh.HandleRPC(ctx, begin)
191 defer func() {
192 end := &stats.End{
193 Client: true,
194 EndTime: time.Now(),
195 Error: e,
196 }
197 sh.HandleRPC(ctx, end)
198 }()
199 }
200 topts := &transport.Options{
201 Last: true,
202 Delay: false,
203 } 69 }
204 for { 70 if err := cs.SendMsg(req); err != nil {
205 var ( 71 return err
206 err error
207 t transport.ClientTransport
208 stream *transport.Stream
209 // Record the put handler from Balancer.Get(...). It is called once the
210 // RPC has completed or failed.
211 put func()
212 )
213 // TODO(zhaoq): Need a formal spec of fail-fast.
214 callHdr := &transport.CallHdr{
215 Host: cc.authority,
216 Method: method,
217 }
218 if cc.dopts.cp != nil {
219 callHdr.SendCompress = cc.dopts.cp.Type()
220 }
221 if c.creds != nil {
222 callHdr.Creds = c.creds
223 }
224
225 gopts := BalancerGetOptions{
226 BlockingWait: !c.failFast,
227 }
228 t, put, err = cc.getTransport(ctx, gopts)
229 if err != nil {
230 // TODO(zhaoq): Probably revisit the error handling.
231 if _, ok := status.FromError(err); ok {
232 return err
233 }
234 if err == errConnClosing || err == errConnUnavailable {
235 if c.failFast {
236 return Errorf(codes.Unavailable, "%v", err)
237 }
238 continue
239 }
240 // All the other errors are treated as Internal errors.
241 return Errorf(codes.Internal, "%v", err)
242 }
243 if c.traceInfo.tr != nil {
244 c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
245 }
246 stream, err = t.NewStream(ctx, callHdr)
247 if err != nil {
248 if put != nil {
249 if _, ok := err.(transport.ConnectionError); ok {
250 // If error is connection error, transport was sending data on wire,
251 // and we are not sure if anything has been sent on wire.
252 // If error is not connection error, we are sure nothing has been sent.
253 updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
254 }
255 put()
256 }
257 if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
258 continue
259 }
260 return toRPCErr(err)
261 }
262 if peer, ok := peer.FromContext(stream.Context()); ok {
263 c.peer = peer
264 }
265 err = sendRequest(ctx, cc.dopts, cc.dopts.cp, &c, callHdr, stream, t, args, topts)
266 if err != nil {
267 if put != nil {
268 updateRPCInfoInContext(ctx, rpcInfo{
269 bytesSent: stream.BytesSent(),
270 bytesReceived: stream.BytesReceived(),
271 })
272 put()
273 }
274 // Retry a non-failfast RPC when
275 // i) there is a connection error; or
276 // ii) the server started to drain before this RPC was initiated.
277 if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
278 continue
279 }
280 return toRPCErr(err)
281 }
282 err = recvResponse(ctx, cc.dopts, t, &c, stream, reply)
283 if err != nil {
284 if put != nil {
285 updateRPCInfoInContext(ctx, rpcInfo{
286 bytesSent: stream.BytesSent(),
287 bytesReceived: stream.BytesReceived(),
288 })
289 put()
290 }
291 if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
292 continue
293 }
294 return toRPCErr(err)
295 }
296 if c.traceInfo.tr != nil {
297 c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
298 }
299 t.CloseStream(stream, nil)
300 if put != nil {
301 updateRPCInfoInContext(ctx, rpcInfo{
302 bytesSent: stream.BytesSent(),
303 bytesReceived: stream.BytesReceived(),
304 })
305 put()
306 }
307 return stream.Status().Err()
308 } 72 }
73 return cs.RecvMsg(reply)
309} 74}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index e3f6cb1..56d0bf7 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -19,35 +19,72 @@
19package grpc 19package grpc
20 20
21import ( 21import (
22 "context"
22 "errors" 23 "errors"
24 "fmt"
25 "math"
23 "net" 26 "net"
27 "reflect"
24 "strings" 28 "strings"
25 "sync" 29 "sync"
30 "sync/atomic"
26 "time" 31 "time"
27 32
28 "golang.org/x/net/context" 33 "google.golang.org/grpc/balancer"
29 "golang.org/x/net/trace" 34 _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
35 "google.golang.org/grpc/codes"
30 "google.golang.org/grpc/connectivity" 36 "google.golang.org/grpc/connectivity"
31 "google.golang.org/grpc/credentials" 37 "google.golang.org/grpc/credentials"
32 "google.golang.org/grpc/grpclog" 38 "google.golang.org/grpc/grpclog"
39 "google.golang.org/grpc/internal/backoff"
40 "google.golang.org/grpc/internal/channelz"
41 "google.golang.org/grpc/internal/envconfig"
42 "google.golang.org/grpc/internal/grpcsync"
43 "google.golang.org/grpc/internal/transport"
33 "google.golang.org/grpc/keepalive" 44 "google.golang.org/grpc/keepalive"
34 "google.golang.org/grpc/stats" 45 "google.golang.org/grpc/metadata"
35 "google.golang.org/grpc/transport" 46 "google.golang.org/grpc/resolver"
47 _ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
48 _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
49 "google.golang.org/grpc/status"
50)
51
52const (
53 // minimum time to give a connection to complete
54 minConnectTimeout = 20 * time.Second
55 // must match grpclbName in grpclb/grpclb.go
56 grpclbName = "grpclb"
36) 57)
37 58
38var ( 59var (
39 // ErrClientConnClosing indicates that the operation is illegal because 60 // ErrClientConnClosing indicates that the operation is illegal because
40 // the ClientConn is closing. 61 // the ClientConn is closing.
41 ErrClientConnClosing = errors.New("grpc: the client connection is closing") 62 //
42 // ErrClientConnTimeout indicates that the ClientConn cannot establish the 63 // Deprecated: this error should not be relied upon by users; use the status
43 // underlying connections within the specified timeout. 64 // code of Canceled instead.
44 // DEPRECATED: Please use context.DeadlineExceeded instead. 65 ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing")
45 ErrClientConnTimeout = errors.New("grpc: timed out when dialing") 66 // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
67 errConnDrain = errors.New("grpc: the connection is drained")
68 // errConnClosing indicates that the connection is closing.
69 errConnClosing = errors.New("grpc: the connection is closing")
70 // errBalancerClosed indicates that the balancer is closed.
71 errBalancerClosed = errors.New("grpc: balancer is closed")
72 // We use an accessor so that minConnectTimeout can be
73 // atomically read and updated while testing.
74 getMinConnectTimeout = func() time.Duration {
75 return minConnectTimeout
76 }
77)
46 78
79// The following errors are returned from Dial and DialContext
80var (
47 // errNoTransportSecurity indicates that there is no transport security 81 // errNoTransportSecurity indicates that there is no transport security
48 // being set for ClientConn. Users should either set one or explicitly 82 // being set for ClientConn. Users should either set one or explicitly
49 // call WithInsecure DialOption to disable security. 83 // call WithInsecure DialOption to disable security.
50 errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") 84 errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
85 // errTransportCredsAndBundle indicates that creds bundle is used together
86 // with other individual Transport Credentials.
87 errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
51 // errTransportCredentialsMissing indicates that users want to transmit security 88 // errTransportCredentialsMissing indicates that users want to transmit security
52 // information (e.g., oauth2 token) which requires secure connection on an insecure 89 // information (e.g., oauth2 token) which requires secure connection on an insecure
53 // connection. 90 // connection.
@@ -55,278 +92,100 @@ var (
55 // errCredentialsConflict indicates that grpc.WithTransportCredentials() 92 // errCredentialsConflict indicates that grpc.WithTransportCredentials()
56 // and grpc.WithInsecure() are both called for a connection. 93 // and grpc.WithInsecure() are both called for a connection.
57 errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") 94 errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
58 // errNetworkIO indicates that the connection is down due to some network I/O error.
59 errNetworkIO = errors.New("grpc: failed with network I/O error")
60 // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
61 errConnDrain = errors.New("grpc: the connection is drained")
62 // errConnClosing indicates that the connection is closing.
63 errConnClosing = errors.New("grpc: the connection is closing")
64 // errConnUnavailable indicates that the connection is unavailable.
65 errConnUnavailable = errors.New("grpc: the connection is unavailable")
66 // errBalancerClosed indicates that the balancer is closed.
67 errBalancerClosed = errors.New("grpc: balancer is closed")
68 // minimum time to give a connection to complete
69 minConnectTimeout = 20 * time.Second
70) 95)
71 96
72// dialOptions configure a Dial call. dialOptions are set by the DialOption
73// values passed to Dial.
74type dialOptions struct {
75 unaryInt UnaryClientInterceptor
76 streamInt StreamClientInterceptor
77 codec Codec
78 cp Compressor
79 dc Decompressor
80 bs backoffStrategy
81 balancer Balancer
82 block bool
83 insecure bool
84 timeout time.Duration
85 scChan <-chan ServiceConfig
86 copts transport.ConnectOptions
87 callOptions []CallOption
88}
89
90const ( 97const (
91 defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 98 defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
92 defaultClientMaxSendMessageSize = 1024 * 1024 * 4 99 defaultClientMaxSendMessageSize = math.MaxInt32
100 // http2IOBufSize specifies the buffer size for sending frames.
101 defaultWriteBufSize = 32 * 1024
102 defaultReadBufSize = 32 * 1024
93) 103)
94 104
95// DialOption configures how we set up the connection.
96type DialOption func(*dialOptions)
97
98// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream.
99// The lower bound for window size is 64K and any value smaller than that will be ignored.
100func WithInitialWindowSize(s int32) DialOption {
101 return func(o *dialOptions) {
102 o.copts.InitialWindowSize = s
103 }
104}
105
106// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection.
107// The lower bound for window size is 64K and any value smaller than that will be ignored.
108func WithInitialConnWindowSize(s int32) DialOption {
109 return func(o *dialOptions) {
110 o.copts.InitialConnWindowSize = s
111 }
112}
113
114// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
115func WithMaxMsgSize(s int) DialOption {
116 return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
117}
118
119// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection.
120func WithDefaultCallOptions(cos ...CallOption) DialOption {
121 return func(o *dialOptions) {
122 o.callOptions = append(o.callOptions, cos...)
123 }
124}
125
126// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
127func WithCodec(c Codec) DialOption {
128 return func(o *dialOptions) {
129 o.codec = c
130 }
131}
132
133// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
134// compressor.
135func WithCompressor(cp Compressor) DialOption {
136 return func(o *dialOptions) {
137 o.cp = cp
138 }
139}
140
141// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
142// message decompressor.
143func WithDecompressor(dc Decompressor) DialOption {
144 return func(o *dialOptions) {
145 o.dc = dc
146 }
147}
148
149// WithBalancer returns a DialOption which sets a load balancer.
150func WithBalancer(b Balancer) DialOption {
151 return func(o *dialOptions) {
152 o.balancer = b
153 }
154}
155
156// WithServiceConfig returns a DialOption which has a channel to read the service configuration.
157func WithServiceConfig(c <-chan ServiceConfig) DialOption {
158 return func(o *dialOptions) {
159 o.scChan = c
160 }
161}
162
163// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
164// when backing off after failed connection attempts.
165func WithBackoffMaxDelay(md time.Duration) DialOption {
166 return WithBackoffConfig(BackoffConfig{MaxDelay: md})
167}
168
169// WithBackoffConfig configures the dialer to use the provided backoff
170// parameters after connection failures.
171//
172// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
173// for use.
174func WithBackoffConfig(b BackoffConfig) DialOption {
175 // Set defaults to ensure that provided BackoffConfig is valid and
176 // unexported fields get default values.
177 setDefaults(&b)
178 return withBackoff(b)
179}
180
181// withBackoff sets the backoff strategy used for retries after a
182// failed connection attempt.
183//
184// This can be exported if arbitrary backoff strategies are allowed by gRPC.
185func withBackoff(bs backoffStrategy) DialOption {
186 return func(o *dialOptions) {
187 o.bs = bs
188 }
189}
190
191// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
192// connection is up. Without this, Dial returns immediately and connecting the server
193// happens in background.
194func WithBlock() DialOption {
195 return func(o *dialOptions) {
196 o.block = true
197 }
198}
199
200// WithInsecure returns a DialOption which disables transport security for this ClientConn.
201// Note that transport security is required unless WithInsecure is set.
202func WithInsecure() DialOption {
203 return func(o *dialOptions) {
204 o.insecure = true
205 }
206}
207
208// WithTransportCredentials returns a DialOption which configures a
209// connection level security credentials (e.g., TLS/SSL).
210func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
211 return func(o *dialOptions) {
212 o.copts.TransportCredentials = creds
213 }
214}
215
216// WithPerRPCCredentials returns a DialOption which sets
217// credentials and places auth state on each outbound RPC.
218func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
219 return func(o *dialOptions) {
220 o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
221 }
222}
223
224// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
225// initially. This is valid if and only if WithBlock() is present.
226// Deprecated: use DialContext and context.WithTimeout instead.
227func WithTimeout(d time.Duration) DialOption {
228 return func(o *dialOptions) {
229 o.timeout = d
230 }
231}
232
233// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
234// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's
235// Temporary() method to decide if it should try to reconnect to the network address.
236func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
237 return func(o *dialOptions) {
238 o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
239 if deadline, ok := ctx.Deadline(); ok {
240 return f(addr, deadline.Sub(time.Now()))
241 }
242 return f(addr, 0)
243 }
244 }
245}
246
247// WithStatsHandler returns a DialOption that specifies the stats handler
248// for all the RPCs and underlying network connections in this ClientConn.
249func WithStatsHandler(h stats.Handler) DialOption {
250 return func(o *dialOptions) {
251 o.copts.StatsHandler = h
252 }
253}
254
255// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors.
256// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network
257// address and won't try to reconnect.
258// The default value of FailOnNonTempDialError is false.
259// This is an EXPERIMENTAL API.
260func FailOnNonTempDialError(f bool) DialOption {
261 return func(o *dialOptions) {
262 o.copts.FailOnNonTempDialError = f
263 }
264}
265
266// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
267func WithUserAgent(s string) DialOption {
268 return func(o *dialOptions) {
269 o.copts.UserAgent = s
270 }
271}
272
273// WithKeepaliveParams returns a DialOption that specifies keepalive paramaters for the client transport.
274func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
275 return func(o *dialOptions) {
276 o.copts.KeepaliveParams = kp
277 }
278}
279
280// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs.
281func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
282 return func(o *dialOptions) {
283 o.unaryInt = f
284 }
285}
286
287// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs.
288func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
289 return func(o *dialOptions) {
290 o.streamInt = f
291 }
292}
293
294// WithAuthority returns a DialOption that specifies the value to be used as
295// the :authority pseudo-header. This value only works with WithInsecure and
296// has no effect if TransportCredentials are present.
297func WithAuthority(a string) DialOption {
298 return func(o *dialOptions) {
299 o.copts.Authority = a
300 }
301}
302
303// Dial creates a client connection to the given target. 105// Dial creates a client connection to the given target.
304func Dial(target string, opts ...DialOption) (*ClientConn, error) { 106func Dial(target string, opts ...DialOption) (*ClientConn, error) {
305 return DialContext(context.Background(), target, opts...) 107 return DialContext(context.Background(), target, opts...)
306} 108}
307 109
308// DialContext creates a client connection to the given target. ctx can be used to 110// DialContext creates a client connection to the given target. By default, it's
309// cancel or expire the pending connection. Once this function returns, the 111// a non-blocking dial (the function won't wait for connections to be
310// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close 112// established, and connecting happens in the background). To make it a blocking
311// to terminate all the pending operations after this function returns. 113// dial, use WithBlock() dial option.
114//
115// In the non-blocking case, the ctx does not act against the connection. It
116// only controls the setup steps.
117//
118// In the blocking case, ctx can be used to cancel or expire the pending
119// connection. Once this function returns, the cancellation and expiration of
120// ctx will be noop. Users should call ClientConn.Close to terminate all the
121// pending operations after this function returns.
122//
123// The target name syntax is defined in
124// https://github.com/grpc/grpc/blob/master/doc/naming.md.
125// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
312func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { 126func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
313 cc := &ClientConn{ 127 cc := &ClientConn{
314 target: target, 128 target: target,
315 csMgr: &connectivityStateManager{}, 129 csMgr: &connectivityStateManager{},
316 conns: make(map[Address]*addrConn), 130 conns: make(map[*addrConn]struct{}),
317 } 131 dopts: defaultDialOptions(),
318 cc.csEvltr = &connectivityStateEvaluator{csMgr: cc.csMgr} 132 blockingpicker: newPickerWrapper(),
133 czData: new(channelzData),
134 firstResolveEvent: grpcsync.NewEvent(),
135 }
136 cc.retryThrottler.Store((*retryThrottler)(nil))
319 cc.ctx, cc.cancel = context.WithCancel(context.Background()) 137 cc.ctx, cc.cancel = context.WithCancel(context.Background())
320 138
321 for _, opt := range opts { 139 for _, opt := range opts {
322 opt(&cc.dopts) 140 opt.apply(&cc.dopts)
141 }
142
143 if channelz.IsOn() {
144 if cc.dopts.channelzParentID != 0 {
145 cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
146 channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
147 Desc: "Channel Created",
148 Severity: channelz.CtINFO,
149 Parent: &channelz.TraceEventDesc{
150 Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
151 Severity: channelz.CtINFO,
152 },
153 })
154 } else {
155 cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
156 channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
157 Desc: "Channel Created",
158 Severity: channelz.CtINFO,
159 })
160 }
161 cc.csMgr.channelzID = cc.channelzID
323 } 162 }
163
164 if !cc.dopts.insecure {
165 if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
166 return nil, errNoTransportSecurity
167 }
168 if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
169 return nil, errTransportCredsAndBundle
170 }
171 } else {
172 if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil {
173 return nil, errCredentialsConflict
174 }
175 for _, cd := range cc.dopts.copts.PerRPCCredentials {
176 if cd.RequireTransportSecurity() {
177 return nil, errTransportCredentialsMissing
178 }
179 }
180 }
181
324 cc.mkp = cc.dopts.copts.KeepaliveParams 182 cc.mkp = cc.dopts.copts.KeepaliveParams
325 183
326 if cc.dopts.copts.Dialer == nil { 184 if cc.dopts.copts.Dialer == nil {
327 cc.dopts.copts.Dialer = newProxyDialer( 185 cc.dopts.copts.Dialer = newProxyDialer(
328 func(ctx context.Context, addr string) (net.Conn, error) { 186 func(ctx context.Context, addr string) (net.Conn, error) {
329 return dialContext(ctx, "tcp", addr) 187 network, addr := parseDialTarget(addr)
188 return (&net.Dialer{}).DialContext(ctx, network, addr)
330 }, 189 },
331 ) 190 )
332 } 191 }
@@ -367,66 +226,41 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
367 default: 226 default:
368 } 227 }
369 } 228 }
370 // Set defaults.
371 if cc.dopts.codec == nil {
372 cc.dopts.codec = protoCodec{}
373 }
374 if cc.dopts.bs == nil { 229 if cc.dopts.bs == nil {
375 cc.dopts.bs = DefaultBackoffConfig 230 cc.dopts.bs = backoff.Exponential{
231 MaxDelay: DefaultBackoffConfig.MaxDelay,
232 }
233 }
234 if cc.dopts.resolverBuilder == nil {
235 // Only try to parse target when resolver builder is not already set.
236 cc.parsedTarget = parseTarget(cc.target)
237 grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
238 cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
239 if cc.dopts.resolverBuilder == nil {
240 // If resolver builder is still nil, the parse target's scheme is
241 // not registered. Fallback to default resolver and set Endpoint to
242 // the original unparsed target.
243 grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
244 cc.parsedTarget = resolver.Target{
245 Scheme: resolver.GetDefaultScheme(),
246 Endpoint: target,
247 }
248 cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
249 }
250 } else {
251 cc.parsedTarget = resolver.Target{Endpoint: target}
376 } 252 }
377 creds := cc.dopts.copts.TransportCredentials 253 creds := cc.dopts.copts.TransportCredentials
378 if creds != nil && creds.Info().ServerName != "" { 254 if creds != nil && creds.Info().ServerName != "" {
379 cc.authority = creds.Info().ServerName 255 cc.authority = creds.Info().ServerName
380 } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" { 256 } else if cc.dopts.insecure && cc.dopts.authority != "" {
381 cc.authority = cc.dopts.copts.Authority 257 cc.authority = cc.dopts.authority
382 } else { 258 } else {
383 cc.authority = target 259 // Use endpoint from "scheme://authority/endpoint" as the default
384 } 260 // authority for ClientConn.
385 waitC := make(chan error, 1) 261 cc.authority = cc.parsedTarget.Endpoint
386 go func() {
387 defer close(waitC)
388 if cc.dopts.balancer == nil && cc.sc.LB != nil {
389 cc.dopts.balancer = cc.sc.LB
390 }
391 if cc.dopts.balancer != nil {
392 var credsClone credentials.TransportCredentials
393 if creds != nil {
394 credsClone = creds.Clone()
395 }
396 config := BalancerConfig{
397 DialCreds: credsClone,
398 Dialer: cc.dopts.copts.Dialer,
399 }
400 if err := cc.dopts.balancer.Start(target, config); err != nil {
401 waitC <- err
402 return
403 }
404 ch := cc.dopts.balancer.Notify()
405 if ch != nil {
406 if cc.dopts.block {
407 doneChan := make(chan struct{})
408 go cc.lbWatcher(doneChan)
409 <-doneChan
410 } else {
411 go cc.lbWatcher(nil)
412 }
413 return
414 }
415 }
416 // No balancer, or no resolver within the balancer. Connect directly.
417 if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil {
418 waitC <- err
419 return
420 }
421 }()
422 select {
423 case <-ctx.Done():
424 return nil, ctx.Err()
425 case err := <-waitC:
426 if err != nil {
427 return nil, err
428 }
429 } 262 }
263
430 if cc.dopts.scChan != nil && !scSet { 264 if cc.dopts.scChan != nil && !scSet {
431 // Blocking wait for the initial service config. 265 // Blocking wait for the initial service config.
432 select { 266 select {
@@ -442,55 +276,50 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
442 go cc.scWatcher() 276 go cc.scWatcher()
443 } 277 }
444 278
445 return cc, nil 279 var credsClone credentials.TransportCredentials
446} 280 if creds := cc.dopts.copts.TransportCredentials; creds != nil {
281 credsClone = creds.Clone()
282 }
283 cc.balancerBuildOpts = balancer.BuildOptions{
284 DialCreds: credsClone,
285 CredsBundle: cc.dopts.copts.CredsBundle,
286 Dialer: cc.dopts.copts.Dialer,
287 ChannelzParentID: cc.channelzID,
288 }
447 289
448// connectivityStateEvaluator gets updated by addrConns when their 290 // Build the resolver.
449// states transition, based on which it evaluates the state of 291 rWrapper, err := newCCResolverWrapper(cc)
450// ClientConn. 292 if err != nil {
451// Note: This code will eventually sit in the balancer in the new design. 293 return nil, fmt.Errorf("failed to build resolver: %v", err)
452type connectivityStateEvaluator struct { 294 }
453 csMgr *connectivityStateManager
454 mu sync.Mutex
455 numReady uint64 // Number of addrConns in ready state.
456 numConnecting uint64 // Number of addrConns in connecting state.
457 numTransientFailure uint64 // Number of addrConns in transientFailure.
458}
459 295
460// recordTransition records state change happening in every addrConn and based on 296 cc.mu.Lock()
461// that it evaluates what state the ClientConn is in. 297 cc.resolverWrapper = rWrapper
462// It can only transition between connectivity.Ready, connectivity.Connecting and connectivity.TransientFailure. Other states, 298 cc.mu.Unlock()
463// Idle and connectivity.Shutdown are transitioned into by ClientConn; in the begining of the connection 299 // A blocking dial blocks until the clientConn is ready.
464// before any addrConn is created ClientConn is in idle state. In the end when ClientConn 300 if cc.dopts.block {
465// closes it is in connectivity.Shutdown state. 301 for {
466// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. 302 s := cc.GetState()
467func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) { 303 if s == connectivity.Ready {
468 cse.mu.Lock() 304 break
469 defer cse.mu.Unlock() 305 } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
470 306 if err = cc.blockingpicker.connectionError(); err != nil {
471 // Update counters. 307 terr, ok := err.(interface {
472 for idx, state := range []connectivity.State{oldState, newState} { 308 Temporary() bool
473 updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. 309 })
474 switch state { 310 if ok && !terr.Temporary() {
475 case connectivity.Ready: 311 return nil, err
476 cse.numReady += updateVal 312 }
477 case connectivity.Connecting: 313 }
478 cse.numConnecting += updateVal 314 }
479 case connectivity.TransientFailure: 315 if !cc.WaitForStateChange(ctx, s) {
480 cse.numTransientFailure += updateVal 316 // ctx got timeout or canceled.
317 return nil, ctx.Err()
318 }
481 } 319 }
482 } 320 }
483 321
484 // Evaluate. 322 return cc, nil
485 if cse.numReady > 0 {
486 cse.csMgr.updateState(connectivity.Ready)
487 return
488 }
489 if cse.numConnecting > 0 {
490 cse.csMgr.updateState(connectivity.Connecting)
491 return
492 }
493 cse.csMgr.updateState(connectivity.TransientFailure)
494} 323}
495 324
496// connectivityStateManager keeps the connectivity.State of ClientConn. 325// connectivityStateManager keeps the connectivity.State of ClientConn.
@@ -499,6 +328,7 @@ type connectivityStateManager struct {
499 mu sync.Mutex 328 mu sync.Mutex
500 state connectivity.State 329 state connectivity.State
501 notifyChan chan struct{} 330 notifyChan chan struct{}
331 channelzID int64
502} 332}
503 333
504// updateState updates the connectivity.State of ClientConn. 334// updateState updates the connectivity.State of ClientConn.
@@ -514,6 +344,12 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) {
514 return 344 return
515 } 345 }
516 csm.state = state 346 csm.state = state
347 if channelz.IsOn() {
348 channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{
349 Desc: fmt.Sprintf("Channel Connectivity change to %v", state),
350 Severity: channelz.CtINFO,
351 })
352 }
517 if csm.notifyChan != nil { 353 if csm.notifyChan != nil {
518 // There are other goroutines waiting on this channel. 354 // There are other goroutines waiting on this channel.
519 close(csm.notifyChan) 355 close(csm.notifyChan)
@@ -541,17 +377,32 @@ type ClientConn struct {
541 ctx context.Context 377 ctx context.Context
542 cancel context.CancelFunc 378 cancel context.CancelFunc
543 379
544 target string 380 target string
545 authority string 381 parsedTarget resolver.Target
546 dopts dialOptions 382 authority string
547 csMgr *connectivityStateManager 383 dopts dialOptions
548 csEvltr *connectivityStateEvaluator // This will eventually be part of balancer. 384 csMgr *connectivityStateManager
549 385
550 mu sync.RWMutex 386 balancerBuildOpts balancer.BuildOptions
551 sc ServiceConfig 387 blockingpicker *pickerWrapper
552 conns map[Address]*addrConn 388
389 mu sync.RWMutex
390 resolverWrapper *ccResolverWrapper
391 sc ServiceConfig
392 scRaw string
393 conns map[*addrConn]struct{}
553 // Keepalive parameter can be updated if a GoAway is received. 394 // Keepalive parameter can be updated if a GoAway is received.
554 mkp keepalive.ClientParameters 395 mkp keepalive.ClientParameters
396 curBalancerName string
397 preBalancerName string // previous balancer name.
398 curAddresses []resolver.Address
399 balancerWrapper *ccBalancerWrapper
400 retryThrottler atomic.Value
401
402 firstResolveEvent *grpcsync.Event
403
404 channelzID int64 // channelz unique identification number
405 czData *channelzData
555} 406}
556 407
557// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or 408// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
@@ -576,65 +427,6 @@ func (cc *ClientConn) GetState() connectivity.State {
576 return cc.csMgr.getState() 427 return cc.csMgr.getState()
577} 428}
578 429
579// lbWatcher watches the Notify channel of the balancer in cc and manages
580// connections accordingly. If doneChan is not nil, it is closed after the
581// first successfull connection is made.
582func (cc *ClientConn) lbWatcher(doneChan chan struct{}) {
583 defer func() {
584 // In case channel from cc.dopts.balancer.Notify() gets closed before a
585 // successful connection gets established, don't forget to notify the
586 // caller.
587 if doneChan != nil {
588 close(doneChan)
589 }
590 }()
591
592 for addrs := range cc.dopts.balancer.Notify() {
593 var (
594 add []Address // Addresses need to setup connections.
595 del []*addrConn // Connections need to tear down.
596 )
597 cc.mu.Lock()
598 for _, a := range addrs {
599 if _, ok := cc.conns[a]; !ok {
600 add = append(add, a)
601 }
602 }
603 for k, c := range cc.conns {
604 var keep bool
605 for _, a := range addrs {
606 if k == a {
607 keep = true
608 break
609 }
610 }
611 if !keep {
612 del = append(del, c)
613 delete(cc.conns, c.addr)
614 }
615 }
616 cc.mu.Unlock()
617 for _, a := range add {
618 var err error
619 if doneChan != nil {
620 err = cc.resetAddrConn(a, true, nil)
621 if err == nil {
622 close(doneChan)
623 doneChan = nil
624 }
625 } else {
626 err = cc.resetAddrConn(a, false, nil)
627 }
628 if err != nil {
629 grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
630 }
631 }
632 for _, c := range del {
633 c.tearDown(errConnDrain)
634 }
635 }
636}
637
638func (cc *ClientConn) scWatcher() { 430func (cc *ClientConn) scWatcher() {
639 for { 431 for {
640 select { 432 select {
@@ -646,6 +438,7 @@ func (cc *ClientConn) scWatcher() {
646 // TODO: load balance policy runtime change is ignored. 438 // TODO: load balance policy runtime change is ignored.
647 // We may revist this decision in the future. 439 // We may revist this decision in the future.
648 cc.sc = sc 440 cc.sc = sc
441 cc.scRaw = ""
649 cc.mu.Unlock() 442 cc.mu.Unlock()
650 case <-cc.ctx.Done(): 443 case <-cc.ctx.Done():
651 return 444 return
@@ -653,99 +446,287 @@ func (cc *ClientConn) scWatcher() {
653 } 446 }
654} 447}
655 448
656// resetAddrConn creates an addrConn for addr and adds it to cc.conns. 449// waitForResolvedAddrs blocks until the resolver has provided addresses or the
657// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. 450// context expires. Returns nil unless the context expires first; otherwise
658// If tearDownErr is nil, errConnDrain will be used instead. 451// returns a status error based on the context.
659// 452func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
660// We should never need to replace an addrConn with a new one. This function is only used 453 // This is on the RPC path, so we use a fast path to avoid the
661// as newAddrConn to create new addrConn. 454 // more-expensive "select" below after the resolver has returned once.
662// TODO rename this function and clean up the code. 455 if cc.firstResolveEvent.HasFired() {
663func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error { 456 return nil
664 ac := &addrConn{
665 cc: cc,
666 addr: addr,
667 dopts: cc.dopts,
668 } 457 }
669 ac.ctx, ac.cancel = context.WithCancel(cc.ctx) 458 select {
670 ac.csEvltr = cc.csEvltr 459 case <-cc.firstResolveEvent.Done():
671 if EnableTracing { 460 return nil
672 ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) 461 case <-ctx.Done():
462 return status.FromContextError(ctx.Err()).Err()
463 case <-cc.ctx.Done():
464 return ErrClientConnClosing
673 } 465 }
674 if !ac.dopts.insecure { 466}
675 if ac.dopts.copts.TransportCredentials == nil { 467
676 return errNoTransportSecurity 468func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) {
677 } 469 cc.mu.Lock()
678 } else { 470 defer cc.mu.Unlock()
679 if ac.dopts.copts.TransportCredentials != nil { 471 if cc.conns == nil {
680 return errCredentialsConflict 472 // cc was closed.
473 return
474 }
475
476 if reflect.DeepEqual(cc.curAddresses, addrs) {
477 return
478 }
479
480 cc.curAddresses = addrs
481 cc.firstResolveEvent.Fire()
482
483 if cc.dopts.balancerBuilder == nil {
484 // Only look at balancer types and switch balancer if balancer dial
485 // option is not set.
486 var isGRPCLB bool
487 for _, a := range addrs {
488 if a.Type == resolver.GRPCLB {
489 isGRPCLB = true
490 break
491 }
681 } 492 }
682 for _, cd := range ac.dopts.copts.PerRPCCredentials { 493 var newBalancerName string
683 if cd.RequireTransportSecurity() { 494 if isGRPCLB {
684 return errTransportCredentialsMissing 495 newBalancerName = grpclbName
496 } else {
497 // Address list doesn't contain grpclb address. Try to pick a
498 // non-grpclb balancer.
499 newBalancerName = cc.curBalancerName
500 // If current balancer is grpclb, switch to the previous one.
501 if newBalancerName == grpclbName {
502 newBalancerName = cc.preBalancerName
503 }
504 // The following could be true in two cases:
505 // - the first time handling resolved addresses
506 // (curBalancerName="")
507 // - the first time handling non-grpclb addresses
508 // (curBalancerName="grpclb", preBalancerName="")
509 if newBalancerName == "" {
510 newBalancerName = PickFirstBalancerName
685 } 511 }
686 } 512 }
513 cc.switchBalancer(newBalancerName)
514 } else if cc.balancerWrapper == nil {
515 // Balancer dial option was set, and this is the first time handling
516 // resolved addresses. Build a balancer with dopts.balancerBuilder.
517 cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
687 } 518 }
519
520 cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
521}
522
523// switchBalancer starts the switching from current balancer to the balancer
524// with the given name.
525//
526// It will NOT send the current address list to the new balancer. If needed,
527// caller of this function should send address list to the new balancer after
528// this function returns.
529//
530// Caller must hold cc.mu.
531func (cc *ClientConn) switchBalancer(name string) {
532 if cc.conns == nil {
533 return
534 }
535
536 if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) {
537 return
538 }
539
540 grpclog.Infof("ClientConn switching balancer to %q", name)
541 if cc.dopts.balancerBuilder != nil {
542 grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
543 return
544 }
545 // TODO(bar switching) change this to two steps: drain and close.
546 // Keep track of sc in wrapper.
547 if cc.balancerWrapper != nil {
548 cc.balancerWrapper.close()
549 }
550
551 builder := balancer.Get(name)
552 // TODO(yuxuanli): If user send a service config that does not contain a valid balancer name, should
553 // we reuse previous one?
554 if channelz.IsOn() {
555 if builder == nil {
556 channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
557 Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName),
558 Severity: channelz.CtWarning,
559 })
560 } else {
561 channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
562 Desc: fmt.Sprintf("Channel switches to new LB policy %q", name),
563 Severity: channelz.CtINFO,
564 })
565 }
566 }
567 if builder == nil {
568 grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
569 builder = newPickfirstBuilder()
570 }
571
572 cc.preBalancerName = cc.curBalancerName
573 cc.curBalancerName = builder.Name()
574 cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
575}
576
577func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
578 cc.mu.Lock()
579 if cc.conns == nil {
580 cc.mu.Unlock()
581 return
582 }
583 // TODO(bar switching) send updates to all balancer wrappers when balancer
584 // gracefully switching is supported.
585 cc.balancerWrapper.handleSubConnStateChange(sc, s)
586 cc.mu.Unlock()
587}
588
589// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
590//
591// Caller needs to make sure len(addrs) > 0.
592func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
593 ac := &addrConn{
594 cc: cc,
595 addrs: addrs,
596 scopts: opts,
597 dopts: cc.dopts,
598 czData: new(channelzData),
599 resetBackoff: make(chan struct{}),
600 }
601 ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
688 // Track ac in cc. This needs to be done before any getTransport(...) is called. 602 // Track ac in cc. This needs to be done before any getTransport(...) is called.
689 cc.mu.Lock() 603 cc.mu.Lock()
690 if cc.conns == nil { 604 if cc.conns == nil {
691 cc.mu.Unlock() 605 cc.mu.Unlock()
692 return ErrClientConnClosing 606 return nil, ErrClientConnClosing
607 }
608 if channelz.IsOn() {
609 ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
610 channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
611 Desc: "Subchannel Created",
612 Severity: channelz.CtINFO,
613 Parent: &channelz.TraceEventDesc{
614 Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
615 Severity: channelz.CtINFO,
616 },
617 })
693 } 618 }
694 stale := cc.conns[ac.addr] 619 cc.conns[ac] = struct{}{}
695 cc.conns[ac.addr] = ac
696 cc.mu.Unlock() 620 cc.mu.Unlock()
697 if stale != nil { 621 return ac, nil
698 // There is an addrConn alive on ac.addr already. This could be due to 622}
699 // a buggy Balancer that reports duplicated Addresses. 623
700 if tearDownErr == nil { 624// removeAddrConn removes the addrConn in the subConn from clientConn.
701 // tearDownErr is nil if resetAddrConn is called by 625// It also tears down the ac with the given error.
702 // 1) Dial 626func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
703 // 2) lbWatcher 627 cc.mu.Lock()
704 // In both cases, the stale ac should drain, not close. 628 if cc.conns == nil {
705 stale.tearDown(errConnDrain) 629 cc.mu.Unlock()
706 } else { 630 return
707 stale.tearDown(tearDownErr)
708 }
709 } 631 }
710 if block { 632 delete(cc.conns, ac)
711 if err := ac.resetTransport(false); err != nil { 633 cc.mu.Unlock()
712 if err != errConnClosing { 634 ac.tearDown(err)
713 // Tear down ac and delete it from cc.conns. 635}
714 cc.mu.Lock() 636
715 delete(cc.conns, ac.addr) 637func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
716 cc.mu.Unlock() 638 return &channelz.ChannelInternalMetric{
717 ac.tearDown(err) 639 State: cc.GetState(),
718 } 640 Target: cc.target,
719 if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { 641 CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted),
720 return e.Origin() 642 CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded),
721 } 643 CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed),
722 return err 644 LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)),
723 } 645 }
724 // Start to monitor the error status of transport. 646}
725 go ac.transportMonitor() 647
726 } else { 648// Target returns the target string of the ClientConn.
727 // Start a goroutine connecting to the server asynchronously. 649// This is an EXPERIMENTAL API.
728 go func() { 650func (cc *ClientConn) Target() string {
729 if err := ac.resetTransport(false); err != nil { 651 return cc.target
730 grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) 652}
731 if err != errConnClosing { 653
732 // Keep this ac in cc.conns, to get the reason it's torn down. 654func (cc *ClientConn) incrCallsStarted() {
733 ac.tearDown(err) 655 atomic.AddInt64(&cc.czData.callsStarted, 1)
734 } 656 atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano())
735 return 657}
736 } 658
737 ac.transportMonitor() 659func (cc *ClientConn) incrCallsSucceeded() {
738 }() 660 atomic.AddInt64(&cc.czData.callsSucceeded, 1)
661}
662
663func (cc *ClientConn) incrCallsFailed() {
664 atomic.AddInt64(&cc.czData.callsFailed, 1)
665}
666
667// connect starts creating a transport.
668// It does nothing if the ac is not IDLE.
669// TODO(bar) Move this to the addrConn section.
670func (ac *addrConn) connect() error {
671 ac.mu.Lock()
672 if ac.state == connectivity.Shutdown {
673 ac.mu.Unlock()
674 return errConnClosing
675 }
676 if ac.state != connectivity.Idle {
677 ac.mu.Unlock()
678 return nil
739 } 679 }
680 ac.updateConnectivityState(connectivity.Connecting)
681 ac.mu.Unlock()
682
683 // Start a goroutine connecting to the server asynchronously.
684 go ac.resetTransport()
740 return nil 685 return nil
741} 686}
742 687
688// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
689//
690// It checks whether current connected address of ac is in the new addrs list.
691// - If true, it updates ac.addrs and returns true. The ac will keep using
692// the existing connection.
693// - If false, it does nothing and returns false.
694func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
695 ac.mu.Lock()
696 defer ac.mu.Unlock()
697 grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
698 if ac.state == connectivity.Shutdown {
699 ac.addrs = addrs
700 return true
701 }
702
703 // Unless we're busy reconnecting already, let's reconnect from the top of
704 // the list.
705 if ac.state != connectivity.Ready {
706 return false
707 }
708
709 var curAddrFound bool
710 for _, a := range addrs {
711 if reflect.DeepEqual(ac.curAddr, a) {
712 curAddrFound = true
713 break
714 }
715 }
716 grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
717 if curAddrFound {
718 ac.addrs = addrs
719 }
720
721 return curAddrFound
722}
723
743// GetMethodConfig gets the method config of the input method. 724// GetMethodConfig gets the method config of the input method.
744// If there's an exact match for input method (i.e. /service/method), we return 725// If there's an exact match for input method (i.e. /service/method), we return
745// the corresponding MethodConfig. 726// the corresponding MethodConfig.
746// If there isn't an exact match for the input method, we look for the default config 727// If there isn't an exact match for the input method, we look for the default config
747// under the service (i.e /service/). If there is a default MethodConfig for 728// under the service (i.e /service/). If there is a default MethodConfig for
748// the serivce, we return it. 729// the service, we return it.
749// Otherwise, we return an empty MethodConfig. 730// Otherwise, we return an empty MethodConfig.
750func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { 731func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
751 // TODO: Avoid the locking here. 732 // TODO: Avoid the locking here.
@@ -754,68 +735,122 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
754 m, ok := cc.sc.Methods[method] 735 m, ok := cc.sc.Methods[method]
755 if !ok { 736 if !ok {
756 i := strings.LastIndex(method, "/") 737 i := strings.LastIndex(method, "/")
757 m, _ = cc.sc.Methods[method[:i+1]] 738 m = cc.sc.Methods[method[:i+1]]
758 } 739 }
759 return m 740 return m
760} 741}
761 742
762func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { 743func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
763 var ( 744 cc.mu.RLock()
764 ac *addrConn 745 defer cc.mu.RUnlock()
765 ok bool 746 return cc.sc.healthCheckConfig
766 put func() 747}
767 ) 748
768 if cc.dopts.balancer == nil { 749func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
769 // If balancer is nil, there should be only one addrConn available. 750 hdr, _ := metadata.FromOutgoingContext(ctx)
770 cc.mu.RLock() 751 t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
771 if cc.conns == nil { 752 FullMethodName: method,
772 cc.mu.RUnlock() 753 Header: hdr,
773 return nil, nil, toRPCErr(ErrClientConnClosing) 754 })
774 } 755 if err != nil {
775 for _, ac = range cc.conns { 756 return nil, nil, toRPCErr(err)
776 // Break after the first iteration to get the first addrConn. 757 }
777 ok = true 758 return t, done, nil
778 break 759}
760
761// handleServiceConfig parses the service config string in JSON format to Go native
762// struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
763func (cc *ClientConn) handleServiceConfig(js string) error {
764 if cc.dopts.disableServiceConfig {
765 return nil
766 }
767 if cc.scRaw == js {
768 return nil
769 }
770 if channelz.IsOn() {
771 channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
772 // The special formatting of \"%s\" instead of %q is to provide nice printing of service config
773 // for human consumption.
774 Desc: fmt.Sprintf("Channel has a new service config \"%s\"", js),
775 Severity: channelz.CtINFO,
776 })
777 }
778 sc, err := parseServiceConfig(js)
779 if err != nil {
780 return err
781 }
782 cc.mu.Lock()
783 // Check if the ClientConn is already closed. Some fields (e.g.
784 // balancerWrapper) are set to nil when closing the ClientConn, and could
785 // cause nil pointer panic if we don't have this check.
786 if cc.conns == nil {
787 cc.mu.Unlock()
788 return nil
789 }
790 cc.scRaw = js
791 cc.sc = sc
792
793 if sc.retryThrottling != nil {
794 newThrottler := &retryThrottler{
795 tokens: sc.retryThrottling.MaxTokens,
796 max: sc.retryThrottling.MaxTokens,
797 thresh: sc.retryThrottling.MaxTokens / 2,
798 ratio: sc.retryThrottling.TokenRatio,
779 } 799 }
780 cc.mu.RUnlock() 800 cc.retryThrottler.Store(newThrottler)
781 } else { 801 } else {
782 var ( 802 cc.retryThrottler.Store((*retryThrottler)(nil))
783 addr Address
784 err error
785 )
786 addr, put, err = cc.dopts.balancer.Get(ctx, opts)
787 if err != nil {
788 return nil, nil, toRPCErr(err)
789 }
790 cc.mu.RLock()
791 if cc.conns == nil {
792 cc.mu.RUnlock()
793 return nil, nil, toRPCErr(ErrClientConnClosing)
794 }
795 ac, ok = cc.conns[addr]
796 cc.mu.RUnlock()
797 } 803 }
798 if !ok { 804
799 if put != nil { 805 if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config.
800 updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) 806 if cc.curBalancerName == grpclbName {
801 put() 807 // If current balancer is grpclb, there's at least one grpclb
808 // balancer address in the resolved list. Don't switch the balancer,
809 // but change the previous balancer name, so if a new resolved
810 // address list doesn't contain grpclb address, balancer will be
811 // switched to *sc.LB.
812 cc.preBalancerName = *sc.LB
813 } else {
814 cc.switchBalancer(*sc.LB)
815 cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
802 } 816 }
803 return nil, nil, errConnClosing
804 } 817 }
805 t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) 818
806 if err != nil { 819 cc.mu.Unlock()
807 if put != nil { 820 return nil
808 updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) 821}
809 put() 822
810 } 823func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
811 return nil, nil, err 824 cc.mu.RLock()
825 r := cc.resolverWrapper
826 cc.mu.RUnlock()
827 if r == nil {
828 return
829 }
830 go r.resolveNow(o)
831}
832
833// ResetConnectBackoff wakes up all subchannels in transient failure and causes
834// them to attempt another connection immediately. It also resets the backoff
835// times used for subsequent attempts regardless of the current state.
836//
837// In general, this function should not be used. Typical service or network
838// outages result in a reasonable client reconnection strategy by default.
839// However, if a previously unavailable network becomes available, this may be
840// used to trigger an immediate reconnect.
841//
842// This API is EXPERIMENTAL.
843func (cc *ClientConn) ResetConnectBackoff() {
844 cc.mu.Lock()
845 defer cc.mu.Unlock()
846 for ac := range cc.conns {
847 ac.resetConnectBackoff()
812 } 848 }
813 return t, put, nil
814} 849}
815 850
816// Close tears down the ClientConn and all underlying connections. 851// Close tears down the ClientConn and all underlying connections.
817func (cc *ClientConn) Close() error { 852func (cc *ClientConn) Close() error {
818 cc.cancel() 853 defer cc.cancel()
819 854
820 cc.mu.Lock() 855 cc.mu.Lock()
821 if cc.conns == nil { 856 if cc.conns == nil {
@@ -825,13 +860,41 @@ func (cc *ClientConn) Close() error {
825 conns := cc.conns 860 conns := cc.conns
826 cc.conns = nil 861 cc.conns = nil
827 cc.csMgr.updateState(connectivity.Shutdown) 862 cc.csMgr.updateState(connectivity.Shutdown)
863
864 rWrapper := cc.resolverWrapper
865 cc.resolverWrapper = nil
866 bWrapper := cc.balancerWrapper
867 cc.balancerWrapper = nil
828 cc.mu.Unlock() 868 cc.mu.Unlock()
829 if cc.dopts.balancer != nil { 869
830 cc.dopts.balancer.Close() 870 cc.blockingpicker.close()
871
872 if rWrapper != nil {
873 rWrapper.close()
831 } 874 }
832 for _, ac := range conns { 875 if bWrapper != nil {
876 bWrapper.close()
877 }
878
879 for ac := range conns {
833 ac.tearDown(ErrClientConnClosing) 880 ac.tearDown(ErrClientConnClosing)
834 } 881 }
882 if channelz.IsOn() {
883 ted := &channelz.TraceEventDesc{
884 Desc: "Channel Deleted",
885 Severity: channelz.CtINFO,
886 }
887 if cc.dopts.channelzParentID != 0 {
888 ted.Parent = &channelz.TraceEventDesc{
889 Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
890 Severity: channelz.CtINFO,
891 }
892 }
893 channelz.AddTraceEvent(cc.channelzID, ted)
894 // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
895 // the entity beng deleted, and thus prevent it from being deleted right away.
896 channelz.RemoveEntry(cc.channelzID)
897 }
835 return nil 898 return nil
836} 899}
837 900
@@ -841,29 +904,56 @@ type addrConn struct {
841 cancel context.CancelFunc 904 cancel context.CancelFunc
842 905
843 cc *ClientConn 906 cc *ClientConn
844 addr Address
845 dopts dialOptions 907 dopts dialOptions
846 events trace.EventLog 908 acbw balancer.SubConn
909 scopts balancer.NewSubConnOptions
910
911 // transport is set when there's a viable transport (note: ac state may not be READY as LB channel
912 // health checking may require server to report healthy to set ac to READY), and is reset
913 // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway
914 // is received, transport is closed, ac has been torn down).
915 transport transport.ClientTransport // The current transport.
847 916
848 csEvltr *connectivityStateEvaluator 917 mu sync.Mutex
918 curAddr resolver.Address // The current address.
919 addrs []resolver.Address // All addresses that the resolver resolved to.
849 920
850 mu sync.Mutex 921 // Use updateConnectivityState for updating addrConn's connectivity state.
851 state connectivity.State 922 state connectivity.State
852 down func(error) // the handler called when a connection is down. 923
853 // ready is closed and becomes nil when a new transport is up or failed 924 tearDownErr error // The reason this addrConn is torn down.
854 // due to timeout. 925
855 ready chan struct{} 926 backoffIdx int // Needs to be stateful for resetConnectBackoff.
856 transport transport.ClientTransport 927 resetBackoff chan struct{}
857 928
858 // The reason this addrConn is torn down. 929 channelzID int64 // channelz unique identification number.
859 tearDownErr error 930 czData *channelzData
931 healthCheckEnabled bool
932}
933
934// Note: this requires a lock on ac.mu.
935func (ac *addrConn) updateConnectivityState(s connectivity.State) {
936 if ac.state == s {
937 return
938 }
939
940 updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s)
941 grpclog.Infof(updateMsg)
942 ac.state = s
943 if channelz.IsOn() {
944 channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
945 Desc: updateMsg,
946 Severity: channelz.CtINFO,
947 })
948 }
949 ac.cc.handleSubConnStateChange(ac.acbw, s)
860} 950}
861 951
862// adjustParams updates parameters used to create transports upon 952// adjustParams updates parameters used to create transports upon
863// receiving a GoAway. 953// receiving a GoAway.
864func (ac *addrConn) adjustParams(r transport.GoAwayReason) { 954func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
865 switch r { 955 switch r {
866 case transport.TooManyPings: 956 case transport.GoAwayTooManyPings:
867 v := 2 * ac.dopts.copts.KeepaliveParams.Time 957 v := 2 * ac.dopts.copts.KeepaliveParams.Time
868 ac.cc.mu.Lock() 958 ac.cc.mu.Lock()
869 if v > ac.cc.mkp.Time { 959 if v > ac.cc.mkp.Time {
@@ -873,246 +963,359 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
873 } 963 }
874} 964}
875 965
876// printf records an event in ac's event log, unless ac has been closed. 966func (ac *addrConn) resetTransport() {
877// REQUIRES ac.mu is held. 967 for i := 0; ; i++ {
878func (ac *addrConn) printf(format string, a ...interface{}) { 968 tryNextAddrFromStart := grpcsync.NewEvent()
879 if ac.events != nil {
880 ac.events.Printf(format, a...)
881 }
882}
883 969
884// errorf records an error in ac's event log, unless ac has been closed.
885// REQUIRES ac.mu is held.
886func (ac *addrConn) errorf(format string, a ...interface{}) {
887 if ac.events != nil {
888 ac.events.Errorf(format, a...)
889 }
890}
891
892// resetTransport recreates a transport to the address for ac.
893// For the old transport:
894// - if drain is true, it will be gracefully closed.
895// - otherwise, it will be closed.
896func (ac *addrConn) resetTransport(drain bool) error {
897 ac.mu.Lock()
898 if ac.state == connectivity.Shutdown {
899 ac.mu.Unlock()
900 return errConnClosing
901 }
902 ac.printf("connecting")
903 if ac.down != nil {
904 ac.down(downErrorf(false, true, "%v", errNetworkIO))
905 ac.down = nil
906 }
907 oldState := ac.state
908 ac.state = connectivity.Connecting
909 ac.csEvltr.recordTransition(oldState, ac.state)
910 t := ac.transport
911 ac.transport = nil
912 ac.mu.Unlock()
913 if t != nil && !drain {
914 t.Close()
915 }
916 ac.cc.mu.RLock()
917 ac.dopts.copts.KeepaliveParams = ac.cc.mkp
918 ac.cc.mu.RUnlock()
919 for retries := 0; ; retries++ {
920 ac.mu.Lock() 970 ac.mu.Lock()
921 if ac.state == connectivity.Shutdown { 971 if i > 0 {
922 // ac.tearDown(...) has been invoked. 972 ac.cc.resolveNow(resolver.ResolveNowOption{})
923 ac.mu.Unlock()
924 return errConnClosing
925 } 973 }
974 addrs := ac.addrs
975 backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
926 ac.mu.Unlock() 976 ac.mu.Unlock()
927 sleepTime := ac.dopts.bs.backoff(retries)
928 timeout := minConnectTimeout
929 if timeout < sleepTime {
930 timeout = sleepTime
931 }
932 ctx, cancel := context.WithTimeout(ac.ctx, timeout)
933 connectTime := time.Now()
934 sinfo := transport.TargetInfo{
935 Addr: ac.addr.Addr,
936 Metadata: ac.addr.Metadata,
937 }
938 newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts)
939 // Don't call cancel in success path due to a race in Go 1.6:
940 // https://github.com/golang/go/issues/15078.
941 if err != nil {
942 cancel()
943 977
944 if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { 978 addrLoop:
945 return err 979 for _, addr := range addrs {
980 ac.mu.Lock()
981
982 if ac.state == connectivity.Shutdown {
983 ac.mu.Unlock()
984 return
985 }
986 ac.updateConnectivityState(connectivity.Connecting)
987 ac.transport = nil
988 ac.mu.Unlock()
989
990 // This will be the duration that dial gets to finish.
991 dialDuration := getMinConnectTimeout()
992 if dialDuration < backoffFor {
993 // Give dial more time as we keep failing to connect.
994 dialDuration = backoffFor
946 } 995 }
947 grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr) 996 connectDeadline := time.Now().Add(dialDuration)
997
948 ac.mu.Lock() 998 ac.mu.Lock()
999 ac.cc.mu.RLock()
1000 ac.dopts.copts.KeepaliveParams = ac.cc.mkp
1001 ac.cc.mu.RUnlock()
1002
949 if ac.state == connectivity.Shutdown { 1003 if ac.state == connectivity.Shutdown {
950 // ac.tearDown(...) has been invoked.
951 ac.mu.Unlock() 1004 ac.mu.Unlock()
952 return errConnClosing 1005 return
953 } 1006 }
954 ac.errorf("transient failure: %v", err) 1007
955 oldState = ac.state 1008 copts := ac.dopts.copts
956 ac.state = connectivity.TransientFailure 1009 if ac.scopts.CredsBundle != nil {
957 ac.csEvltr.recordTransition(oldState, ac.state) 1010 copts.CredsBundle = ac.scopts.CredsBundle
958 if ac.ready != nil {
959 close(ac.ready)
960 ac.ready = nil
961 } 1011 }
1012 hctx, hcancel := context.WithCancel(ac.ctx)
1013 defer hcancel()
962 ac.mu.Unlock() 1014 ac.mu.Unlock()
963 timer := time.NewTimer(sleepTime - time.Since(connectTime)) 1015
964 select { 1016 if channelz.IsOn() {
965 case <-timer.C: 1017 channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
966 case <-ac.ctx.Done(): 1018 Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr),
967 timer.Stop() 1019 Severity: channelz.CtINFO,
968 return ac.ctx.Err() 1020 })
1021 }
1022
1023 reconnect := grpcsync.NewEvent()
1024 prefaceReceived := make(chan struct{})
1025 newTr, err := ac.createTransport(addr, copts, connectDeadline, reconnect, prefaceReceived)
1026 if err == nil {
1027 ac.mu.Lock()
1028 ac.curAddr = addr
1029 ac.transport = newTr
1030 ac.mu.Unlock()
1031
1032 healthCheckConfig := ac.cc.healthCheckConfig()
1033 // LB channel health checking is only enabled when all the four requirements below are met:
1034 // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption,
1035 // 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package,
1036 // 3. a service config with non-empty healthCheckConfig field is provided,
1037 // 4. the current load balancer allows it.
1038 healthcheckManagingState := false
1039 if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled {
1040 if ac.cc.dopts.healthCheckFunc == nil {
1041 // TODO: add a link to the health check doc in the error message.
1042 grpclog.Error("the client side LB channel health check function has not been set.")
1043 } else {
1044 // TODO(deklerk) refactor to just return transport
1045 go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName)
1046 healthcheckManagingState = true
1047 }
1048 }
1049 if !healthcheckManagingState {
1050 ac.mu.Lock()
1051 ac.updateConnectivityState(connectivity.Ready)
1052 ac.mu.Unlock()
1053 }
1054 } else {
1055 hcancel()
1056 if err == errConnClosing {
1057 return
1058 }
1059
1060 if tryNextAddrFromStart.HasFired() {
1061 break addrLoop
1062 }
1063 continue
1064 }
1065
1066 ac.mu.Lock()
1067 reqHandshake := ac.dopts.reqHandshake
1068 ac.mu.Unlock()
1069
1070 <-reconnect.Done()
1071 hcancel()
1072
1073 if reqHandshake == envconfig.RequireHandshakeHybrid {
1074 // In RequireHandshakeHybrid mode, we must check to see whether
1075 // server preface has arrived yet to decide whether to start
1076 // reconnecting at the top of the list (server preface received)
1077 // or continue with the next addr in the list as if the
1078 // connection were not successful (server preface not received).
1079 select {
1080 case <-prefaceReceived:
1081 // We received a server preface - huzzah! We consider this
1082 // a success and restart from the top of the addr list.
1083 ac.mu.Lock()
1084 ac.backoffIdx = 0
1085 ac.mu.Unlock()
1086 break addrLoop
1087 default:
1088 // Despite having set state to READY, in hybrid mode we
1089 // consider this a failure and continue connecting at the
1090 // next addr in the list.
1091 ac.mu.Lock()
1092 if ac.state == connectivity.Shutdown {
1093 ac.mu.Unlock()
1094 return
1095 }
1096
1097 ac.updateConnectivityState(connectivity.TransientFailure)
1098 ac.mu.Unlock()
1099
1100 if tryNextAddrFromStart.HasFired() {
1101 break addrLoop
1102 }
1103 }
1104 } else {
1105 // In RequireHandshakeOn mode, we would have already waited for
1106 // the server preface, so we consider this a success and restart
1107 // from the top of the addr list. In RequireHandshakeOff mode,
1108 // we don't care to wait for the server preface before
1109 // considering this a success, so we also restart from the top
1110 // of the addr list.
1111 ac.mu.Lock()
1112 ac.backoffIdx = 0
1113 ac.mu.Unlock()
1114 break addrLoop
969 } 1115 }
970 timer.Stop()
971 continue
972 } 1116 }
1117
1118 // After exhausting all addresses, or after need to reconnect after a
1119 // READY, the addrConn enters TRANSIENT_FAILURE.
973 ac.mu.Lock() 1120 ac.mu.Lock()
974 ac.printf("ready")
975 if ac.state == connectivity.Shutdown { 1121 if ac.state == connectivity.Shutdown {
976 // ac.tearDown(...) has been invoked.
977 ac.mu.Unlock() 1122 ac.mu.Unlock()
978 newTransport.Close() 1123 return
979 return errConnClosing
980 }
981 oldState = ac.state
982 ac.state = connectivity.Ready
983 ac.csEvltr.recordTransition(oldState, ac.state)
984 ac.transport = newTransport
985 if ac.ready != nil {
986 close(ac.ready)
987 ac.ready = nil
988 }
989 if ac.cc.dopts.balancer != nil {
990 ac.down = ac.cc.dopts.balancer.Up(ac.addr)
991 } 1124 }
1125 ac.updateConnectivityState(connectivity.TransientFailure)
1126
1127 // Backoff.
1128 b := ac.resetBackoff
1129 timer := time.NewTimer(backoffFor)
1130 acctx := ac.ctx
992 ac.mu.Unlock() 1131 ac.mu.Unlock()
993 return nil 1132
1133 select {
1134 case <-timer.C:
1135 ac.mu.Lock()
1136 ac.backoffIdx++
1137 ac.mu.Unlock()
1138 case <-b:
1139 timer.Stop()
1140 case <-acctx.Done():
1141 timer.Stop()
1142 return
1143 }
994 } 1144 }
995} 1145}
996 1146
997// Run in a goroutine to track the error in transport and create the 1147// createTransport creates a connection to one of the backends in addrs. It
998// new transport if an error happens. It returns when the channel is closing. 1148// sets ac.transport in the success case, or it returns an error if it was
999func (ac *addrConn) transportMonitor() { 1149// unable to successfully create a transport.
1000 for { 1150//
1151// If waitForHandshake is enabled, it blocks until server preface arrives.
1152func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time, reconnect *grpcsync.Event, prefaceReceived chan struct{}) (transport.ClientTransport, error) {
1153 onCloseCalled := make(chan struct{})
1154
1155 target := transport.TargetInfo{
1156 Addr: addr.Addr,
1157 Metadata: addr.Metadata,
1158 Authority: ac.cc.authority,
1159 }
1160
1161 prefaceTimer := time.NewTimer(connectDeadline.Sub(time.Now()))
1162
1163 onGoAway := func(r transport.GoAwayReason) {
1001 ac.mu.Lock() 1164 ac.mu.Lock()
1002 t := ac.transport 1165 ac.adjustParams(r)
1003 ac.mu.Unlock() 1166 ac.mu.Unlock()
1004 select { 1167 reconnect.Fire()
1005 // This is needed to detect the teardown when 1168 }
1006 // the addrConn is idle (i.e., no RPC in flight). 1169
1007 case <-ac.ctx.Done(): 1170 onClose := func() {
1008 select { 1171 close(onCloseCalled)
1009 case <-t.Error(): 1172 prefaceTimer.Stop()
1010 t.Close() 1173 reconnect.Fire()
1011 default: 1174 }
1012 } 1175
1013 return 1176 onPrefaceReceipt := func() {
1014 case <-t.GoAway(): 1177 close(prefaceReceived)
1015 ac.adjustParams(t.GetGoAwayReason()) 1178 prefaceTimer.Stop()
1016 // If GoAway happens without any network I/O error, the underlying transport 1179 }
1017 // will be gracefully closed, and a new transport will be created. 1180
1018 // (The transport will be closed when all the pending RPCs finished or failed.) 1181 connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
1019 // If GoAway and some network I/O error happen concurrently, the underlying transport 1182 defer cancel()
1020 // will be closed, and a new transport will be created. 1183 if channelz.IsOn() {
1021 var drain bool 1184 copts.ChannelzParentID = ac.channelzID
1185 }
1186
1187 newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
1188
1189 if err == nil {
1190 if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn {
1022 select { 1191 select {
1023 case <-t.Error(): 1192 case <-prefaceTimer.C:
1024 default: 1193 // We didn't get the preface in time.
1025 drain = true 1194 newTr.Close()
1026 } 1195 err = errors.New("timed out waiting for server handshake")
1027 if err := ac.resetTransport(drain); err != nil { 1196 case <-prefaceReceived:
1028 grpclog.Infof("get error from resetTransport %v, transportMonitor returning", err) 1197 // We got the preface - huzzah! things are good.
1029 if err != errConnClosing { 1198 case <-onCloseCalled:
1030 // Keep this ac in cc.conns, to get the reason it's torn down. 1199 // The transport has already closed - noop.
1031 ac.tearDown(err) 1200 return nil, errors.New("connection closed")
1032 }
1033 return
1034 } 1201 }
1035 case <-t.Error(): 1202 } else if ac.dopts.reqHandshake == envconfig.RequireHandshakeHybrid {
1036 select { 1203 go func() {
1037 case <-ac.ctx.Done(): 1204 select {
1038 t.Close() 1205 case <-prefaceTimer.C:
1039 return 1206 // We didn't get the preface in time.
1040 case <-t.GoAway(): 1207 newTr.Close()
1041 ac.adjustParams(t.GetGoAwayReason()) 1208 case <-prefaceReceived:
1042 if err := ac.resetTransport(false); err != nil { 1209 // We got the preface just in the nick of time - huzzah!
1043 grpclog.Infof("get error from resetTransport %v, transportMonitor returning", err) 1210 case <-onCloseCalled:
1044 if err != errConnClosing { 1211 // The transport has already closed - noop.
1045 // Keep this ac in cc.conns, to get the reason it's torn down.
1046 ac.tearDown(err)
1047 }
1048 return
1049 } 1212 }
1050 default: 1213 }()
1051 }
1052 ac.mu.Lock()
1053 if ac.state == connectivity.Shutdown {
1054 // ac has been shutdown.
1055 ac.mu.Unlock()
1056 return
1057 }
1058 oldState := ac.state
1059 ac.state = connectivity.TransientFailure
1060 ac.csEvltr.recordTransition(oldState, ac.state)
1061 ac.mu.Unlock()
1062 if err := ac.resetTransport(false); err != nil {
1063 grpclog.Infof("get error from resetTransport %v, transportMonitor returning", err)
1064 ac.mu.Lock()
1065 ac.printf("transport exiting: %v", err)
1066 ac.mu.Unlock()
1067 grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err)
1068 if err != errConnClosing {
1069 // Keep this ac in cc.conns, to get the reason it's torn down.
1070 ac.tearDown(err)
1071 }
1072 return
1073 }
1074 } 1214 }
1075 } 1215 }
1076}
1077 1216
1078// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or 1217 if err != nil {
1079// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true. 1218 // newTr is either nil, or closed.
1080func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { 1219 ac.cc.blockingpicker.updateConnectionError(err)
1081 for {
1082 ac.mu.Lock() 1220 ac.mu.Lock()
1083 switch { 1221 if ac.state == connectivity.Shutdown {
1084 case ac.state == connectivity.Shutdown: 1222 // ac.tearDown(...) has been invoked.
1085 if failfast || !hasBalancer {
1086 // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr.
1087 err := ac.tearDownErr
1088 ac.mu.Unlock()
1089 return nil, err
1090 }
1091 ac.mu.Unlock() 1223 ac.mu.Unlock()
1224
1092 return nil, errConnClosing 1225 return nil, errConnClosing
1093 case ac.state == connectivity.Ready: 1226 }
1094 ct := ac.transport 1227 ac.mu.Unlock()
1095 ac.mu.Unlock() 1228 grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
1096 return ct, nil 1229 return nil, err
1097 case ac.state == connectivity.TransientFailure: 1230 }
1098 if failfast || hasBalancer { 1231
1099 ac.mu.Unlock() 1232 // Now there is a viable transport to be use, so set ac.transport to reflect the new viable transport.
1100 return nil, errConnUnavailable 1233 ac.mu.Lock()
1234 if ac.state == connectivity.Shutdown {
1235 ac.mu.Unlock()
1236 newTr.Close()
1237 return nil, errConnClosing
1238 }
1239 ac.mu.Unlock()
1240
1241 // Now there is a viable transport to be use, so set ac.transport to reflect the new viable transport.
1242 ac.mu.Lock()
1243 if ac.state == connectivity.Shutdown {
1244 ac.mu.Unlock()
1245 newTr.Close()
1246 return nil, errConnClosing
1247 }
1248 ac.mu.Unlock()
1249
1250 return newTr, nil
1251}
1252
1253func (ac *addrConn) startHealthCheck(ctx context.Context, newTr transport.ClientTransport, addr resolver.Address, serviceName string) {
1254 // Set up the health check helper functions
1255 newStream := func() (interface{}, error) {
1256 return ac.newClientStream(ctx, &StreamDesc{ServerStreams: true}, "/grpc.health.v1.Health/Watch", newTr)
1257 }
1258 firstReady := true
1259 reportHealth := func(ok bool) {
1260 ac.mu.Lock()
1261 defer ac.mu.Unlock()
1262 if ac.transport != newTr {
1263 return
1264 }
1265 if ok {
1266 if firstReady {
1267 firstReady = false
1268 ac.curAddr = addr
1101 } 1269 }
1270 ac.updateConnectivityState(connectivity.Ready)
1271 } else {
1272 ac.updateConnectivityState(connectivity.TransientFailure)
1102 } 1273 }
1103 ready := ac.ready 1274 }
1104 if ready == nil { 1275 err := ac.cc.dopts.healthCheckFunc(ctx, newStream, reportHealth, serviceName)
1105 ready = make(chan struct{}) 1276 if err != nil {
1106 ac.ready = ready 1277 if status.Code(err) == codes.Unimplemented {
1278 if channelz.IsOn() {
1279 channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
1280 Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled",
1281 Severity: channelz.CtError,
1282 })
1283 }
1284 grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
1285 } else {
1286 grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
1107 } 1287 }
1288 }
1289}
1290
1291func (ac *addrConn) resetConnectBackoff() {
1292 ac.mu.Lock()
1293 close(ac.resetBackoff)
1294 ac.backoffIdx = 0
1295 ac.resetBackoff = make(chan struct{})
1296 ac.mu.Unlock()
1297}
1298
1299// getReadyTransport returns the transport if ac's state is READY.
1300// Otherwise it returns nil, false.
1301// If ac's state is IDLE, it will trigger ac to connect.
1302func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
1303 ac.mu.Lock()
1304 if ac.state == connectivity.Ready && ac.transport != nil {
1305 t := ac.transport
1108 ac.mu.Unlock() 1306 ac.mu.Unlock()
1109 select { 1307 return t, true
1110 case <-ctx.Done(): 1308 }
1111 return nil, toRPCErr(ctx.Err()) 1309 var idle bool
1112 // Wait until the new transport is ready or failed. 1310 if ac.state == connectivity.Idle {
1113 case <-ready: 1311 idle = true
1114 }
1115 } 1312 }
1313 ac.mu.Unlock()
1314 // Trigger idle ac to connect.
1315 if idle {
1316 ac.connect()
1317 }
1318 return nil, false
1116} 1319}
1117 1320
1118// tearDown starts to tear down the addrConn. 1321// tearDown starts to tear down the addrConn.
@@ -1121,38 +1324,126 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans
1121// tight loop. 1324// tight loop.
1122// tearDown doesn't remove ac from ac.cc.conns. 1325// tearDown doesn't remove ac from ac.cc.conns.
1123func (ac *addrConn) tearDown(err error) { 1326func (ac *addrConn) tearDown(err error) {
1124 ac.cancel()
1125
1126 ac.mu.Lock() 1327 ac.mu.Lock()
1127 defer ac.mu.Unlock() 1328 if ac.state == connectivity.Shutdown {
1128 if ac.down != nil { 1329 ac.mu.Unlock()
1129 ac.down(downErrorf(false, false, "%v", err)) 1330 return
1130 ac.down = nil
1131 } 1331 }
1132 if err == errConnDrain && ac.transport != nil { 1332 curTr := ac.transport
1333 ac.transport = nil
1334 // We have to set the state to Shutdown before anything else to prevent races
1335 // between setting the state and logic that waits on context cancelation / etc.
1336 ac.updateConnectivityState(connectivity.Shutdown)
1337 ac.cancel()
1338 ac.tearDownErr = err
1339 ac.curAddr = resolver.Address{}
1340 if err == errConnDrain && curTr != nil {
1133 // GracefulClose(...) may be executed multiple times when 1341 // GracefulClose(...) may be executed multiple times when
1134 // i) receiving multiple GoAway frames from the server; or 1342 // i) receiving multiple GoAway frames from the server; or
1135 // ii) there are concurrent name resolver/Balancer triggered 1343 // ii) there are concurrent name resolver/Balancer triggered
1136 // address removal and GoAway. 1344 // address removal and GoAway.
1137 ac.transport.GracefulClose() 1345 // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu.
1346 ac.mu.Unlock()
1347 curTr.GracefulClose()
1348 ac.mu.Lock()
1138 } 1349 }
1139 if ac.state == connectivity.Shutdown { 1350 if channelz.IsOn() {
1140 return 1351 channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
1352 Desc: "Subchannel Deleted",
1353 Severity: channelz.CtINFO,
1354 Parent: &channelz.TraceEventDesc{
1355 Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
1356 Severity: channelz.CtINFO,
1357 },
1358 })
1359 // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
1360 // the entity beng deleted, and thus prevent it from being deleted right away.
1361 channelz.RemoveEntry(ac.channelzID)
1141 } 1362 }
1142 oldState := ac.state 1363 ac.mu.Unlock()
1143 ac.state = connectivity.Shutdown 1364}
1144 ac.tearDownErr = err 1365
1145 ac.csEvltr.recordTransition(oldState, ac.state) 1366func (ac *addrConn) getState() connectivity.State {
1146 if ac.events != nil { 1367 ac.mu.Lock()
1147 ac.events.Finish() 1368 defer ac.mu.Unlock()
1148 ac.events = nil 1369 return ac.state
1370}
1371
1372func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric {
1373 ac.mu.Lock()
1374 addr := ac.curAddr.Addr
1375 ac.mu.Unlock()
1376 return &channelz.ChannelInternalMetric{
1377 State: ac.getState(),
1378 Target: addr,
1379 CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted),
1380 CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded),
1381 CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed),
1382 LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)),
1383 }
1384}
1385
1386func (ac *addrConn) incrCallsStarted() {
1387 atomic.AddInt64(&ac.czData.callsStarted, 1)
1388 atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano())
1389}
1390
1391func (ac *addrConn) incrCallsSucceeded() {
1392 atomic.AddInt64(&ac.czData.callsSucceeded, 1)
1393}
1394
1395func (ac *addrConn) incrCallsFailed() {
1396 atomic.AddInt64(&ac.czData.callsFailed, 1)
1397}
1398
1399type retryThrottler struct {
1400 max float64
1401 thresh float64
1402 ratio float64
1403
1404 mu sync.Mutex
1405 tokens float64 // TODO(dfawley): replace with atomic and remove lock.
1406}
1407
1408// throttle subtracts a retry token from the pool and returns whether a retry
1409// should be throttled (disallowed) based upon the retry throttling policy in
1410// the service config.
1411func (rt *retryThrottler) throttle() bool {
1412 if rt == nil {
1413 return false
1414 }
1415 rt.mu.Lock()
1416 defer rt.mu.Unlock()
1417 rt.tokens--
1418 if rt.tokens < 0 {
1419 rt.tokens = 0
1149 } 1420 }
1150 if ac.ready != nil { 1421 return rt.tokens <= rt.thresh
1151 close(ac.ready) 1422}
1152 ac.ready = nil 1423
1424func (rt *retryThrottler) successfulRPC() {
1425 if rt == nil {
1426 return
1153 } 1427 }
1154 if ac.transport != nil && err != errConnDrain { 1428 rt.mu.Lock()
1155 ac.transport.Close() 1429 defer rt.mu.Unlock()
1430 rt.tokens += rt.ratio
1431 if rt.tokens > rt.max {
1432 rt.tokens = rt.max
1156 } 1433 }
1157 return
1158} 1434}
1435
1436type channelzChannel struct {
1437 cc *ClientConn
1438}
1439
1440func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
1441 return c.cc.channelzMetric()
1442}
1443
1444// ErrClientConnTimeout indicates that the ClientConn cannot establish the
1445// underlying connections within the specified timeout.
1446//
1447// Deprecated: This error is never returned by grpc and should not be
1448// referenced by users.
1449var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
index 905b048..1297765 100644
--- a/vendor/google.golang.org/grpc/codec.go
+++ b/vendor/google.golang.org/grpc/codec.go
@@ -19,86 +19,32 @@
19package grpc 19package grpc
20 20
21import ( 21import (
22 "math" 22 "google.golang.org/grpc/encoding"
23 "sync" 23 _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
24
25 "github.com/golang/protobuf/proto"
26) 24)
27 25
26// baseCodec contains the functionality of both Codec and encoding.Codec, but
27// omits the name/string, which vary between the two and are not needed for
28// anything besides the registry in the encoding package.
29type baseCodec interface {
30 Marshal(v interface{}) ([]byte, error)
31 Unmarshal(data []byte, v interface{}) error
32}
33
34var _ baseCodec = Codec(nil)
35var _ baseCodec = encoding.Codec(nil)
36
28// Codec defines the interface gRPC uses to encode and decode messages. 37// Codec defines the interface gRPC uses to encode and decode messages.
29// Note that implementations of this interface must be thread safe; 38// Note that implementations of this interface must be thread safe;
30// a Codec's methods can be called from concurrent goroutines. 39// a Codec's methods can be called from concurrent goroutines.
40//
41// Deprecated: use encoding.Codec instead.
31type Codec interface { 42type Codec interface {
32 // Marshal returns the wire format of v. 43 // Marshal returns the wire format of v.
33 Marshal(v interface{}) ([]byte, error) 44 Marshal(v interface{}) ([]byte, error)
34 // Unmarshal parses the wire format into v. 45 // Unmarshal parses the wire format into v.
35 Unmarshal(data []byte, v interface{}) error 46 Unmarshal(data []byte, v interface{}) error
36 // String returns the name of the Codec implementation. The returned 47 // String returns the name of the Codec implementation. This is unused by
37 // string will be used as part of content type in transmission. 48 // gRPC.
38 String() string 49 String() string
39} 50}
40
41// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
42type protoCodec struct {
43}
44
45type cachedProtoBuffer struct {
46 lastMarshaledSize uint32
47 proto.Buffer
48}
49
50func capToMaxInt32(val int) uint32 {
51 if val > math.MaxInt32 {
52 return uint32(math.MaxInt32)
53 }
54 return uint32(val)
55}
56
57func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
58 protoMsg := v.(proto.Message)
59 newSlice := make([]byte, 0, cb.lastMarshaledSize)
60
61 cb.SetBuf(newSlice)
62 cb.Reset()
63 if err := cb.Marshal(protoMsg); err != nil {
64 return nil, err
65 }
66 out := cb.Bytes()
67 cb.lastMarshaledSize = capToMaxInt32(len(out))
68 return out, nil
69}
70
71func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
72 cb := protoBufferPool.Get().(*cachedProtoBuffer)
73 out, err := p.marshal(v, cb)
74
75 // put back buffer and lose the ref to the slice
76 cb.SetBuf(nil)
77 protoBufferPool.Put(cb)
78 return out, err
79}
80
81func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
82 cb := protoBufferPool.Get().(*cachedProtoBuffer)
83 cb.SetBuf(data)
84 v.(proto.Message).Reset()
85 err := cb.Unmarshal(v.(proto.Message))
86 cb.SetBuf(nil)
87 protoBufferPool.Put(cb)
88 return err
89}
90
91func (protoCodec) String() string {
92 return "proto"
93}
94
95var (
96 protoBufferPool = &sync.Pool{
97 New: func() interface{} {
98 return &cachedProtoBuffer{
99 Buffer: proto.Buffer{},
100 lastMarshaledSize: 16,
101 }
102 },
103 }
104)
diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go
index e6762d0..0b206a5 100644
--- a/vendor/google.golang.org/grpc/codes/code_string.go
+++ b/vendor/google.golang.org/grpc/codes/code_string.go
@@ -1,16 +1,62 @@
1// generated by stringer -type=Code; DO NOT EDIT 1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
2 18
3package codes 19package codes
4 20
5import "fmt" 21import "strconv"
6 22
7const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" 23func (c Code) String() string {
8 24 switch c {
9var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} 25 case OK:
10 26 return "OK"
11func (i Code) String() string { 27 case Canceled:
12 if i+1 >= Code(len(_Code_index)) { 28 return "Canceled"
13 return fmt.Sprintf("Code(%d)", i) 29 case Unknown:
30 return "Unknown"
31 case InvalidArgument:
32 return "InvalidArgument"
33 case DeadlineExceeded:
34 return "DeadlineExceeded"
35 case NotFound:
36 return "NotFound"
37 case AlreadyExists:
38 return "AlreadyExists"
39 case PermissionDenied:
40 return "PermissionDenied"
41 case ResourceExhausted:
42 return "ResourceExhausted"
43 case FailedPrecondition:
44 return "FailedPrecondition"
45 case Aborted:
46 return "Aborted"
47 case OutOfRange:
48 return "OutOfRange"
49 case Unimplemented:
50 return "Unimplemented"
51 case Internal:
52 return "Internal"
53 case Unavailable:
54 return "Unavailable"
55 case DataLoss:
56 return "DataLoss"
57 case Unauthenticated:
58 return "Unauthenticated"
59 default:
60 return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
14 } 61 }
15 return _Code_name[_Code_index[i]:_Code_index[i+1]]
16} 62}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index 21e7733..d9b9d57 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -20,11 +20,14 @@
20// consistent across various languages. 20// consistent across various languages.
21package codes // import "google.golang.org/grpc/codes" 21package codes // import "google.golang.org/grpc/codes"
22 22
23import (
24 "fmt"
25 "strconv"
26)
27
23// A Code is an unsigned 32-bit error code as defined in the gRPC spec. 28// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
24type Code uint32 29type Code uint32
25 30
26//go:generate stringer -type=Code
27
28const ( 31const (
29 // OK is returned on success. 32 // OK is returned on success.
30 OK Code = 0 33 OK Code = 0
@@ -32,9 +35,9 @@ const (
32 // Canceled indicates the operation was canceled (typically by the caller). 35 // Canceled indicates the operation was canceled (typically by the caller).
33 Canceled Code = 1 36 Canceled Code = 1
34 37
35 // Unknown error. An example of where this error may be returned is 38 // Unknown error. An example of where this error may be returned is
36 // if a Status value received from another address space belongs to 39 // if a Status value received from another address space belongs to
37 // an error-space that is not known in this address space. Also 40 // an error-space that is not known in this address space. Also
38 // errors raised by APIs that do not return enough error information 41 // errors raised by APIs that do not return enough error information
39 // may be converted to this error. 42 // may be converted to this error.
40 Unknown Code = 2 43 Unknown Code = 2
@@ -63,15 +66,11 @@ const (
63 // PermissionDenied indicates the caller does not have permission to 66 // PermissionDenied indicates the caller does not have permission to
64 // execute the specified operation. It must not be used for rejections 67 // execute the specified operation. It must not be used for rejections
65 // caused by exhausting some resource (use ResourceExhausted 68 // caused by exhausting some resource (use ResourceExhausted
66 // instead for those errors). It must not be 69 // instead for those errors). It must not be
67 // used if the caller cannot be identified (use Unauthenticated 70 // used if the caller cannot be identified (use Unauthenticated
68 // instead for those errors). 71 // instead for those errors).
69 PermissionDenied Code = 7 72 PermissionDenied Code = 7
70 73
71 // Unauthenticated indicates the request does not have valid
72 // authentication credentials for the operation.
73 Unauthenticated Code = 16
74
75 // ResourceExhausted indicates some resource has been exhausted, perhaps 74 // ResourceExhausted indicates some resource has been exhausted, perhaps
76 // a per-user quota, or perhaps the entire file system is out of space. 75 // a per-user quota, or perhaps the entire file system is out of space.
77 ResourceExhausted Code = 8 76 ResourceExhausted Code = 8
@@ -87,7 +86,7 @@ const (
87 // (b) Use Aborted if the client should retry at a higher-level 86 // (b) Use Aborted if the client should retry at a higher-level
88 // (e.g., restarting a read-modify-write sequence). 87 // (e.g., restarting a read-modify-write sequence).
89 // (c) Use FailedPrecondition if the client should not retry until 88 // (c) Use FailedPrecondition if the client should not retry until
90 // the system state has been explicitly fixed. E.g., if an "rmdir" 89 // the system state has been explicitly fixed. E.g., if an "rmdir"
91 // fails because the directory is non-empty, FailedPrecondition 90 // fails because the directory is non-empty, FailedPrecondition
92 // should be returned since the client should not retry unless 91 // should be returned since the client should not retry unless
93 // they have first fixed up the directory by deleting files from it. 92 // they have first fixed up the directory by deleting files from it.
@@ -116,7 +115,7 @@ const (
116 // file size. 115 // file size.
117 // 116 //
118 // There is a fair bit of overlap between FailedPrecondition and 117 // There is a fair bit of overlap between FailedPrecondition and
119 // OutOfRange. We recommend using OutOfRange (the more specific 118 // OutOfRange. We recommend using OutOfRange (the more specific
120 // error) when it applies so that callers who are iterating through 119 // error) when it applies so that callers who are iterating through
121 // a space can easily look for an OutOfRange error to detect when 120 // a space can easily look for an OutOfRange error to detect when
122 // they are done. 121 // they are done.
@@ -126,8 +125,8 @@ const (
126 // supported/enabled in this service. 125 // supported/enabled in this service.
127 Unimplemented Code = 12 126 Unimplemented Code = 12
128 127
129 // Internal errors. Means some invariants expected by underlying 128 // Internal errors. Means some invariants expected by underlying
130 // system has been broken. If you see one of these errors, 129 // system has been broken. If you see one of these errors,
131 // something is very broken. 130 // something is very broken.
132 Internal Code = 13 131 Internal Code = 13
133 132
@@ -141,4 +140,58 @@ const (
141 140
142 // DataLoss indicates unrecoverable data loss or corruption. 141 // DataLoss indicates unrecoverable data loss or corruption.
143 DataLoss Code = 15 142 DataLoss Code = 15
143
144 // Unauthenticated indicates the request does not have valid
145 // authentication credentials for the operation.
146 Unauthenticated Code = 16
147
148 _maxCode = 17
144) 149)
150
151var strToCode = map[string]Code{
152 `"OK"`: OK,
153 `"CANCELLED"`:/* [sic] */ Canceled,
154 `"UNKNOWN"`: Unknown,
155 `"INVALID_ARGUMENT"`: InvalidArgument,
156 `"DEADLINE_EXCEEDED"`: DeadlineExceeded,
157 `"NOT_FOUND"`: NotFound,
158 `"ALREADY_EXISTS"`: AlreadyExists,
159 `"PERMISSION_DENIED"`: PermissionDenied,
160 `"RESOURCE_EXHAUSTED"`: ResourceExhausted,
161 `"FAILED_PRECONDITION"`: FailedPrecondition,
162 `"ABORTED"`: Aborted,
163 `"OUT_OF_RANGE"`: OutOfRange,
164 `"UNIMPLEMENTED"`: Unimplemented,
165 `"INTERNAL"`: Internal,
166 `"UNAVAILABLE"`: Unavailable,
167 `"DATA_LOSS"`: DataLoss,
168 `"UNAUTHENTICATED"`: Unauthenticated,
169}
170
171// UnmarshalJSON unmarshals b into the Code.
172func (c *Code) UnmarshalJSON(b []byte) error {
173 // From json.Unmarshaler: By convention, to approximate the behavior of
174 // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
175 // a no-op.
176 if string(b) == "null" {
177 return nil
178 }
179 if c == nil {
180 return fmt.Errorf("nil receiver passed to UnmarshalJSON")
181 }
182
183 if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
184 if ci >= _maxCode {
185 return fmt.Errorf("invalid code: %q", ci)
186 }
187
188 *c = Code(ci)
189 return nil
190 }
191
192 if jc, ok := strToCode[string(b)]; ok {
193 *c = jc
194 return nil
195 }
196 return fmt.Errorf("invalid code: %q", string(b))
197}
diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go
index 568ef5d..b1d7dbc 100644
--- a/vendor/google.golang.org/grpc/connectivity/connectivity.go
+++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go
@@ -22,7 +22,8 @@
22package connectivity 22package connectivity
23 23
24import ( 24import (
25 "golang.org/x/net/context" 25 "context"
26
26 "google.golang.org/grpc/grpclog" 27 "google.golang.org/grpc/grpclog"
27) 28)
28 29
diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh
deleted file mode 100644
index b85f918..0000000
--- a/vendor/google.golang.org/grpc/coverage.sh
+++ /dev/null
@@ -1,48 +0,0 @@
1#!/usr/bin/env bash
2
3
4set -e
5
6workdir=.cover
7profile="$workdir/cover.out"
8mode=set
9end2endtest="google.golang.org/grpc/test"
10
11generate_cover_data() {
12 rm -rf "$workdir"
13 mkdir "$workdir"
14
15 for pkg in "$@"; do
16 if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ]
17 then
18 f="$workdir/$(echo $pkg | tr / -)"
19 go test -covermode="$mode" -coverprofile="$f.cover" "$pkg"
20 go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest"
21 fi
22 done
23
24 echo "mode: $mode" >"$profile"
25 grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
26}
27
28show_cover_report() {
29 go tool cover -${1}="$profile"
30}
31
32push_to_coveralls() {
33 goveralls -coverprofile="$profile"
34}
35
36generate_cover_data $(go list ./...)
37show_cover_report func
38case "$1" in
39"")
40 ;;
41--html)
42 show_cover_report html ;;
43--coveralls)
44 push_to_coveralls ;;
45*)
46 echo >&2 "error: invalid option: $1" ;;
47esac
48rm -rf "$workdir"
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index 2475fe8..a851560 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -23,6 +23,7 @@
23package credentials // import "google.golang.org/grpc/credentials" 23package credentials // import "google.golang.org/grpc/credentials"
24 24
25import ( 25import (
26 "context"
26 "crypto/tls" 27 "crypto/tls"
27 "crypto/x509" 28 "crypto/x509"
28 "errors" 29 "errors"
@@ -31,13 +32,12 @@ import (
31 "net" 32 "net"
32 "strings" 33 "strings"
33 34
34 "golang.org/x/net/context" 35 "github.com/golang/protobuf/proto"
36 "google.golang.org/grpc/credentials/internal"
35) 37)
36 38
37var ( 39// alpnProtoStr are the specified application level protocols for gRPC.
38 // alpnProtoStr are the specified application level protocols for gRPC. 40var alpnProtoStr = []string{"h2"}
39 alpnProtoStr = []string{"h2"}
40)
41 41
42// PerRPCCredentials defines the common interface for the credentials which need to 42// PerRPCCredentials defines the common interface for the credentials which need to
43// attach security information to every RPC (e.g., oauth2). 43// attach security information to every RPC (e.g., oauth2).
@@ -45,8 +45,9 @@ type PerRPCCredentials interface {
45 // GetRequestMetadata gets the current request metadata, refreshing 45 // GetRequestMetadata gets the current request metadata, refreshing
46 // tokens if required. This should be called by the transport layer on 46 // tokens if required. This should be called by the transport layer on
47 // each request, and the data should be populated in headers or other 47 // each request, and the data should be populated in headers or other
48 // context. uri is the URI of the entry point for the request. When 48 // context. If a status code is returned, it will be used as the status
49 // supported by the underlying implementation, ctx can be used for 49 // for the RPC. uri is the URI of the entry point for the request.
50 // When supported by the underlying implementation, ctx can be used for
50 // timeout and cancellation. 51 // timeout and cancellation.
51 // TODO(zhaoq): Define the set of the qualified keys instead of leaving 52 // TODO(zhaoq): Define the set of the qualified keys instead of leaving
52 // it as an arbitrary string. 53 // it as an arbitrary string.
@@ -74,11 +75,9 @@ type AuthInfo interface {
74 AuthType() string 75 AuthType() string
75} 76}
76 77
77var ( 78// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
78 // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC 79// and the caller should not close rawConn.
79 // and the caller should not close rawConn. 80var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
80 ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
81)
82 81
83// TransportCredentials defines the common interface for all the live gRPC wire 82// TransportCredentials defines the common interface for all the live gRPC wire
84// protocols and supported transport security protocols (e.g., TLS, SSL). 83// protocols and supported transport security protocols (e.g., TLS, SSL).
@@ -91,10 +90,14 @@ type TransportCredentials interface {
91 // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). 90 // (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
92 // If the returned error is a wrapper error, implementations should make sure that 91 // If the returned error is a wrapper error, implementations should make sure that
93 // the error implements Temporary() to have the correct retry behaviors. 92 // the error implements Temporary() to have the correct retry behaviors.
93 //
94 // If the returned net.Conn is closed, it MUST close the net.Conn provided.
94 ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) 95 ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
95 // ServerHandshake does the authentication handshake for servers. It returns 96 // ServerHandshake does the authentication handshake for servers. It returns
96 // the authenticated connection and the corresponding auth information about 97 // the authenticated connection and the corresponding auth information about
97 // the connection. 98 // the connection.
99 //
100 // If the returned net.Conn is closed, it MUST close the net.Conn provided.
98 ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) 101 ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
99 // Info provides the ProtocolInfo of this TransportCredentials. 102 // Info provides the ProtocolInfo of this TransportCredentials.
100 Info() ProtocolInfo 103 Info() ProtocolInfo
@@ -106,6 +109,25 @@ type TransportCredentials interface {
106 OverrideServerName(string) error 109 OverrideServerName(string) error
107} 110}
108 111
112// Bundle is a combination of TransportCredentials and PerRPCCredentials.
113//
114// It also contains a mode switching method, so it can be used as a combination
115// of different credential policies.
116//
117// Bundle cannot be used together with individual TransportCredentials.
118// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials.
119//
120// This API is experimental.
121type Bundle interface {
122 TransportCredentials() TransportCredentials
123 PerRPCCredentials() PerRPCCredentials
124 // NewWithMode should make a copy of Bundle, and switch mode. Modifying the
125 // existing Bundle may cause races.
126 //
127 // NewWithMode returns nil if the requested mode is not supported.
128 NewWithMode(mode string) (Bundle, error)
129}
130
109// TLSInfo contains the auth information for a TLS authenticated connection. 131// TLSInfo contains the auth information for a TLS authenticated connection.
110// It implements the AuthInfo interface. 132// It implements the AuthInfo interface.
111type TLSInfo struct { 133type TLSInfo struct {
@@ -117,6 +139,18 @@ func (t TLSInfo) AuthType() string {
117 return "tls" 139 return "tls"
118} 140}
119 141
142// GetSecurityValue returns security info requested by channelz.
143func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
144 v := &TLSChannelzSecurityValue{
145 StandardName: cipherSuiteLookup[t.State.CipherSuite],
146 }
147 // Currently there's no way to get LocalCertificate info from tls package.
148 if len(t.State.PeerCertificates) > 0 {
149 v.RemoteCertificate = t.State.PeerCertificates[0].Raw
150 }
151 return v
152}
153
120// tlsCreds is the credentials required for authenticating a connection using TLS. 154// tlsCreds is the credentials required for authenticating a connection using TLS.
121type tlsCreds struct { 155type tlsCreds struct {
122 // TLS configuration 156 // TLS configuration
@@ -131,15 +165,15 @@ func (c tlsCreds) Info() ProtocolInfo {
131 } 165 }
132} 166}
133 167
134func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { 168func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
135 // use local cfg to avoid clobbering ServerName if using multiple endpoints 169 // use local cfg to avoid clobbering ServerName if using multiple endpoints
136 cfg := cloneTLSConfig(c.config) 170 cfg := cloneTLSConfig(c.config)
137 if cfg.ServerName == "" { 171 if cfg.ServerName == "" {
138 colonPos := strings.LastIndex(addr, ":") 172 colonPos := strings.LastIndex(authority, ":")
139 if colonPos == -1 { 173 if colonPos == -1 {
140 colonPos = len(addr) 174 colonPos = len(authority)
141 } 175 }
142 cfg.ServerName = addr[:colonPos] 176 cfg.ServerName = authority[:colonPos]
143 } 177 }
144 conn := tls.Client(rawConn, cfg) 178 conn := tls.Client(rawConn, cfg)
145 errChannel := make(chan error, 1) 179 errChannel := make(chan error, 1)
@@ -154,7 +188,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net
154 case <-ctx.Done(): 188 case <-ctx.Done():
155 return nil, nil, ctx.Err() 189 return nil, nil, ctx.Err()
156 } 190 }
157 return conn, TLSInfo{conn.ConnectionState()}, nil 191 return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
158} 192}
159 193
160func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { 194func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
@@ -162,7 +196,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
162 if err := conn.Handshake(); err != nil { 196 if err := conn.Handshake(); err != nil {
163 return nil, nil, err 197 return nil, nil, err
164 } 198 }
165 return conn, TLSInfo{conn.ConnectionState()}, nil 199 return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
166} 200}
167 201
168func (c *tlsCreds) Clone() TransportCredentials { 202func (c *tlsCreds) Clone() TransportCredentials {
@@ -217,3 +251,78 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
217 } 251 }
218 return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil 252 return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
219} 253}
254
255// ChannelzSecurityInfo defines the interface that security protocols should implement
256// in order to provide security info to channelz.
257type ChannelzSecurityInfo interface {
258 GetSecurityValue() ChannelzSecurityValue
259}
260
261// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
262// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
263// and *OtherChannelzSecurityValue.
264type ChannelzSecurityValue interface {
265 isChannelzSecurityValue()
266}
267
268// TLSChannelzSecurityValue defines the struct that TLS protocol should return
269// from GetSecurityValue(), containing security info like cipher and certificate used.
270type TLSChannelzSecurityValue struct {
271 StandardName string
272 LocalCertificate []byte
273 RemoteCertificate []byte
274}
275
276func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {}
277
278// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
279// from GetSecurityValue(), which contains protocol specific security info. Note
280// the Value field will be sent to users of channelz requesting channel info, and
281// thus sensitive info should better be avoided.
282type OtherChannelzSecurityValue struct {
283 Name string
284 Value proto.Message
285}
286
287func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {}
288
289var cipherSuiteLookup = map[uint16]string{
290 tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
291 tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
292 tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
293 tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
294 tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
295 tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
296 tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
297 tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
298 tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
299 tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
300 tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
301 tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
302 tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
303 tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
304 tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
305 tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
306 tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
307 tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
308 tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
309 tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
310 tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
311 tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
312 tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
313}
314
315// cloneTLSConfig returns a shallow clone of the exported
316// fields of cfg, ignoring the unexported sync.Once, which
317// contains a mutex and must not be copied.
318//
319// If cfg is nil, a new zero tls.Config is returned.
320//
321// TODO: inline this function if possible.
322func cloneTLSConfig(cfg *tls.Config) *tls.Config {
323 if cfg == nil {
324 return &tls.Config{}
325 }
326
327 return cfg.Clone()
328}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
deleted file mode 100644
index 60409aa..0000000
--- a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
+++ /dev/null
@@ -1,60 +0,0 @@
1// +build go1.7
2// +build !go1.8
3
4/*
5 *
6 * Copyright 2016 gRPC authors.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 */
21
22package credentials
23
24import (
25 "crypto/tls"
26)
27
28// cloneTLSConfig returns a shallow clone of the exported
29// fields of cfg, ignoring the unexported sync.Once, which
30// contains a mutex and must not be copied.
31//
32// If cfg is nil, a new zero tls.Config is returned.
33func cloneTLSConfig(cfg *tls.Config) *tls.Config {
34 if cfg == nil {
35 return &tls.Config{}
36 }
37 return &tls.Config{
38 Rand: cfg.Rand,
39 Time: cfg.Time,
40 Certificates: cfg.Certificates,
41 NameToCertificate: cfg.NameToCertificate,
42 GetCertificate: cfg.GetCertificate,
43 RootCAs: cfg.RootCAs,
44 NextProtos: cfg.NextProtos,
45 ServerName: cfg.ServerName,
46 ClientAuth: cfg.ClientAuth,
47 ClientCAs: cfg.ClientCAs,
48 InsecureSkipVerify: cfg.InsecureSkipVerify,
49 CipherSuites: cfg.CipherSuites,
50 PreferServerCipherSuites: cfg.PreferServerCipherSuites,
51 SessionTicketsDisabled: cfg.SessionTicketsDisabled,
52 SessionTicketKey: cfg.SessionTicketKey,
53 ClientSessionCache: cfg.ClientSessionCache,
54 MinVersion: cfg.MinVersion,
55 MaxVersion: cfg.MaxVersion,
56 CurvePreferences: cfg.CurvePreferences,
57 DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
58 Renegotiation: cfg.Renegotiation,
59 }
60}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
deleted file mode 100644
index d6bbcc9..0000000
--- a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
+++ /dev/null
@@ -1,57 +0,0 @@
1// +build !go1.7
2
3/*
4 *
5 * Copyright 2016 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package credentials
22
23import (
24 "crypto/tls"
25)
26
27// cloneTLSConfig returns a shallow clone of the exported
28// fields of cfg, ignoring the unexported sync.Once, which
29// contains a mutex and must not be copied.
30//
31// If cfg is nil, a new zero tls.Config is returned.
32func cloneTLSConfig(cfg *tls.Config) *tls.Config {
33 if cfg == nil {
34 return &tls.Config{}
35 }
36 return &tls.Config{
37 Rand: cfg.Rand,
38 Time: cfg.Time,
39 Certificates: cfg.Certificates,
40 NameToCertificate: cfg.NameToCertificate,
41 GetCertificate: cfg.GetCertificate,
42 RootCAs: cfg.RootCAs,
43 NextProtos: cfg.NextProtos,
44 ServerName: cfg.ServerName,
45 ClientAuth: cfg.ClientAuth,
46 ClientCAs: cfg.ClientCAs,
47 InsecureSkipVerify: cfg.InsecureSkipVerify,
48 CipherSuites: cfg.CipherSuites,
49 PreferServerCipherSuites: cfg.PreferServerCipherSuites,
50 SessionTicketsDisabled: cfg.SessionTicketsDisabled,
51 SessionTicketKey: cfg.SessionTicketKey,
52 ClientSessionCache: cfg.ClientSessionCache,
53 MinVersion: cfg.MinVersion,
54 MaxVersion: cfg.MaxVersion,
55 CurvePreferences: cfg.CurvePreferences,
56 }
57}
diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
new file mode 100644
index 0000000..2f4472b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
@@ -0,0 +1,61 @@
1// +build !appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21// Package internal contains credentials-internal code.
22package internal
23
24import (
25 "net"
26 "syscall"
27)
28
29type sysConn = syscall.Conn
30
31// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
32// SyscallConn() (the method in interface syscall.Conn) is explicitly
33// implemented on this type,
34//
35// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
36// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
37// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
38// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
39// help here).
40type syscallConn struct {
41 net.Conn
42 // sysConn is a type alias of syscall.Conn. It's necessary because the name
43 // `Conn` collides with `net.Conn`.
44 sysConn
45}
46
47// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
48// implements syscall.Conn. rawConn will be used to support syscall, and newConn
49// will be used for read/write.
50//
51// This function returns newConn if rawConn doesn't implement syscall.Conn.
52func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
53 sysConn, ok := rawConn.(syscall.Conn)
54 if !ok {
55 return newConn
56 }
57 return &syscallConn{
58 Conn: newConn,
59 sysConn: sysConn,
60 }
61}
diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
index a537b08..d4346e9 100644
--- a/vendor/google.golang.org/grpc/naming/go17.go
+++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
@@ -1,8 +1,8 @@
1// +build go1.6, !go1.8 1// +build appengine
2 2
3/* 3/*
4 * 4 *
5 * Copyright 2017 gRPC authors. 5 * Copyright 2018 gRPC authors.
6 * 6 *
7 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License. 8 * you may not use this file except in compliance with the License.
@@ -18,17 +18,13 @@
18 * 18 *
19 */ 19 */
20 20
21package naming 21package internal
22 22
23import ( 23import (
24 "net" 24 "net"
25
26 "golang.org/x/net/context"
27) 25)
28 26
29var ( 27// WrapSyscallConn returns newConn on appengine.
30 lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } 28func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
31 lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { 29 return newConn
32 return net.LookupSRV(service, proto, name) 30}
33 }
34)
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
new file mode 100644
index 0000000..f286462
--- /dev/null
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -0,0 +1,492 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package grpc
20
21import (
22 "context"
23 "fmt"
24 "net"
25 "time"
26
27 "google.golang.org/grpc/balancer"
28 "google.golang.org/grpc/credentials"
29 "google.golang.org/grpc/internal"
30 "google.golang.org/grpc/internal/backoff"
31 "google.golang.org/grpc/internal/envconfig"
32 "google.golang.org/grpc/internal/transport"
33 "google.golang.org/grpc/keepalive"
34 "google.golang.org/grpc/resolver"
35 "google.golang.org/grpc/stats"
36)
37
38// dialOptions configure a Dial call. dialOptions are set by the DialOption
39// values passed to Dial.
40type dialOptions struct {
41 unaryInt UnaryClientInterceptor
42 streamInt StreamClientInterceptor
43 cp Compressor
44 dc Decompressor
45 bs backoff.Strategy
46 block bool
47 insecure bool
48 timeout time.Duration
49 scChan <-chan ServiceConfig
50 authority string
51 copts transport.ConnectOptions
52 callOptions []CallOption
53 // This is used by v1 balancer dial option WithBalancer to support v1
54 // balancer, and also by WithBalancerName dial option.
55 balancerBuilder balancer.Builder
56 // This is to support grpclb.
57 resolverBuilder resolver.Builder
58 reqHandshake envconfig.RequireHandshakeSetting
59 channelzParentID int64
60 disableServiceConfig bool
61 disableRetry bool
62 disableHealthCheck bool
63 healthCheckFunc internal.HealthChecker
64}
65
66// DialOption configures how we set up the connection.
67type DialOption interface {
68 apply(*dialOptions)
69}
70
71// EmptyDialOption does not alter the dial configuration. It can be embedded in
72// another structure to build custom dial options.
73//
74// This API is EXPERIMENTAL.
75type EmptyDialOption struct{}
76
77func (EmptyDialOption) apply(*dialOptions) {}
78
79// funcDialOption wraps a function that modifies dialOptions into an
80// implementation of the DialOption interface.
81type funcDialOption struct {
82 f func(*dialOptions)
83}
84
85func (fdo *funcDialOption) apply(do *dialOptions) {
86 fdo.f(do)
87}
88
89func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
90 return &funcDialOption{
91 f: f,
92 }
93}
94
95// WithWaitForHandshake blocks until the initial settings frame is received from
96// the server before assigning RPCs to the connection.
97//
98// Deprecated: this is the default behavior, and this option will be removed
99// after the 1.18 release.
100func WithWaitForHandshake() DialOption {
101 return newFuncDialOption(func(o *dialOptions) {
102 o.reqHandshake = envconfig.RequireHandshakeOn
103 })
104}
105
106// WithWriteBufferSize determines how much data can be batched before doing a
107// write on the wire. The corresponding memory allocation for this buffer will
108// be twice the size to keep syscalls low. The default value for this buffer is
109// 32KB.
110//
111// Zero will disable the write buffer such that each write will be on underlying
112// connection. Note: A Send call may not directly translate to a write.
113func WithWriteBufferSize(s int) DialOption {
114 return newFuncDialOption(func(o *dialOptions) {
115 o.copts.WriteBufferSize = s
116 })
117}
118
119// WithReadBufferSize lets you set the size of read buffer, this determines how
120// much data can be read at most for each read syscall.
121//
122// The default value for this buffer is 32KB. Zero will disable read buffer for
123// a connection so data framer can access the underlying conn directly.
124func WithReadBufferSize(s int) DialOption {
125 return newFuncDialOption(func(o *dialOptions) {
126 o.copts.ReadBufferSize = s
127 })
128}
129
130// WithInitialWindowSize returns a DialOption which sets the value for initial
131// window size on a stream. The lower bound for window size is 64K and any value
132// smaller than that will be ignored.
133func WithInitialWindowSize(s int32) DialOption {
134 return newFuncDialOption(func(o *dialOptions) {
135 o.copts.InitialWindowSize = s
136 })
137}
138
139// WithInitialConnWindowSize returns a DialOption which sets the value for
140// initial window size on a connection. The lower bound for window size is 64K
141// and any value smaller than that will be ignored.
142func WithInitialConnWindowSize(s int32) DialOption {
143 return newFuncDialOption(func(o *dialOptions) {
144 o.copts.InitialConnWindowSize = s
145 })
146}
147
148// WithMaxMsgSize returns a DialOption which sets the maximum message size the
149// client can receive.
150//
151// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
152func WithMaxMsgSize(s int) DialOption {
153 return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
154}
155
156// WithDefaultCallOptions returns a DialOption which sets the default
157// CallOptions for calls over the connection.
158func WithDefaultCallOptions(cos ...CallOption) DialOption {
159 return newFuncDialOption(func(o *dialOptions) {
160 o.callOptions = append(o.callOptions, cos...)
161 })
162}
163
164// WithCodec returns a DialOption which sets a codec for message marshaling and
165// unmarshaling.
166//
167// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead.
168func WithCodec(c Codec) DialOption {
169 return WithDefaultCallOptions(CallCustomCodec(c))
170}
171
172// WithCompressor returns a DialOption which sets a Compressor to use for
173// message compression. It has lower priority than the compressor set by the
174// UseCompressor CallOption.
175//
176// Deprecated: use UseCompressor instead.
177func WithCompressor(cp Compressor) DialOption {
178 return newFuncDialOption(func(o *dialOptions) {
179 o.cp = cp
180 })
181}
182
183// WithDecompressor returns a DialOption which sets a Decompressor to use for
184// incoming message decompression. If incoming response messages are encoded
185// using the decompressor's Type(), it will be used. Otherwise, the message
186// encoding will be used to look up the compressor registered via
187// encoding.RegisterCompressor, which will then be used to decompress the
188// message. If no compressor is registered for the encoding, an Unimplemented
189// status error will be returned.
190//
191// Deprecated: use encoding.RegisterCompressor instead.
192func WithDecompressor(dc Decompressor) DialOption {
193 return newFuncDialOption(func(o *dialOptions) {
194 o.dc = dc
195 })
196}
197
198// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
199// Name resolver will be ignored if this DialOption is specified.
200//
201// Deprecated: use the new balancer APIs in balancer package and
202// WithBalancerName.
203func WithBalancer(b Balancer) DialOption {
204 return newFuncDialOption(func(o *dialOptions) {
205 o.balancerBuilder = &balancerWrapperBuilder{
206 b: b,
207 }
208 })
209}
210
211// WithBalancerName sets the balancer that the ClientConn will be initialized
212// with. Balancer registered with balancerName will be used. This function
213// panics if no balancer was registered by balancerName.
214//
215// The balancer cannot be overridden by balancer option specified by service
216// config.
217//
218// This is an EXPERIMENTAL API.
219func WithBalancerName(balancerName string) DialOption {
220 builder := balancer.Get(balancerName)
221 if builder == nil {
222 panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
223 }
224 return newFuncDialOption(func(o *dialOptions) {
225 o.balancerBuilder = builder
226 })
227}
228
229// withResolverBuilder is only for grpclb.
230func withResolverBuilder(b resolver.Builder) DialOption {
231 return newFuncDialOption(func(o *dialOptions) {
232 o.resolverBuilder = b
233 })
234}
235
236// WithServiceConfig returns a DialOption which has a channel to read the
237// service configuration.
238//
239// Deprecated: service config should be received through name resolver, as
240// specified here.
241// https://github.com/grpc/grpc/blob/master/doc/service_config.md
242func WithServiceConfig(c <-chan ServiceConfig) DialOption {
243 return newFuncDialOption(func(o *dialOptions) {
244 o.scChan = c
245 })
246}
247
248// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
249// when backing off after failed connection attempts.
250func WithBackoffMaxDelay(md time.Duration) DialOption {
251 return WithBackoffConfig(BackoffConfig{MaxDelay: md})
252}
253
254// WithBackoffConfig configures the dialer to use the provided backoff
255// parameters after connection failures.
256//
257// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
258// for use.
259func WithBackoffConfig(b BackoffConfig) DialOption {
260 return withBackoff(backoff.Exponential{
261 MaxDelay: b.MaxDelay,
262 })
263}
264
265// withBackoff sets the backoff strategy used for connectRetryNum after a failed
266// connection attempt.
267//
268// This can be exported if arbitrary backoff strategies are allowed by gRPC.
269func withBackoff(bs backoff.Strategy) DialOption {
270 return newFuncDialOption(func(o *dialOptions) {
271 o.bs = bs
272 })
273}
274
275// WithBlock returns a DialOption which makes caller of Dial blocks until the
276// underlying connection is up. Without this, Dial returns immediately and
277// connecting the server happens in background.
278func WithBlock() DialOption {
279 return newFuncDialOption(func(o *dialOptions) {
280 o.block = true
281 })
282}
283
284// WithInsecure returns a DialOption which disables transport security for this
285// ClientConn. Note that transport security is required unless WithInsecure is
286// set.
287func WithInsecure() DialOption {
288 return newFuncDialOption(func(o *dialOptions) {
289 o.insecure = true
290 })
291}
292
293// WithTransportCredentials returns a DialOption which configures a connection
294// level security credentials (e.g., TLS/SSL). This should not be used together
295// with WithCredentialsBundle.
296func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
297 return newFuncDialOption(func(o *dialOptions) {
298 o.copts.TransportCredentials = creds
299 })
300}
301
302// WithPerRPCCredentials returns a DialOption which sets credentials and places
303// auth state on each outbound RPC.
304func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
305 return newFuncDialOption(func(o *dialOptions) {
306 o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
307 })
308}
309
310// WithCredentialsBundle returns a DialOption to set a credentials bundle for
311// the ClientConn.WithCreds. This should not be used together with
312// WithTransportCredentials.
313//
314// This API is experimental.
315func WithCredentialsBundle(b credentials.Bundle) DialOption {
316 return newFuncDialOption(func(o *dialOptions) {
317 o.copts.CredsBundle = b
318 })
319}
320
321// WithTimeout returns a DialOption that configures a timeout for dialing a
322// ClientConn initially. This is valid if and only if WithBlock() is present.
323//
324// Deprecated: use DialContext and context.WithTimeout instead.
325func WithTimeout(d time.Duration) DialOption {
326 return newFuncDialOption(func(o *dialOptions) {
327 o.timeout = d
328 })
329}
330
331func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
332 return newFuncDialOption(func(o *dialOptions) {
333 o.copts.Dialer = f
334 })
335}
336
337func init() {
338 internal.WithContextDialer = withContextDialer
339 internal.WithResolverBuilder = withResolverBuilder
340 internal.WithHealthCheckFunc = withHealthCheckFunc
341}
342
343// WithDialer returns a DialOption that specifies a function to use for dialing
344// network addresses. If FailOnNonTempDialError() is set to true, and an error
345// is returned by f, gRPC checks the error's Temporary() method to decide if it
346// should try to reconnect to the network address.
347func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
348 return withContextDialer(
349 func(ctx context.Context, addr string) (net.Conn, error) {
350 if deadline, ok := ctx.Deadline(); ok {
351 return f(addr, deadline.Sub(time.Now()))
352 }
353 return f(addr, 0)
354 })
355}
356
357// WithStatsHandler returns a DialOption that specifies the stats handler for
358// all the RPCs and underlying network connections in this ClientConn.
359func WithStatsHandler(h stats.Handler) DialOption {
360 return newFuncDialOption(func(o *dialOptions) {
361 o.copts.StatsHandler = h
362 })
363}
364
365// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
366// non-temporary dial errors. If f is true, and dialer returns a non-temporary
367// error, gRPC will fail the connection to the network address and won't try to
368// reconnect. The default value of FailOnNonTempDialError is false.
369//
370// FailOnNonTempDialError only affects the initial dial, and does not do
371// anything useful unless you are also using WithBlock().
372//
373// This is an EXPERIMENTAL API.
374func FailOnNonTempDialError(f bool) DialOption {
375 return newFuncDialOption(func(o *dialOptions) {
376 o.copts.FailOnNonTempDialError = f
377 })
378}
379
380// WithUserAgent returns a DialOption that specifies a user agent string for all
381// the RPCs.
382func WithUserAgent(s string) DialOption {
383 return newFuncDialOption(func(o *dialOptions) {
384 o.copts.UserAgent = s
385 })
386}
387
388// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
389// for the client transport.
390func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
391 return newFuncDialOption(func(o *dialOptions) {
392 o.copts.KeepaliveParams = kp
393 })
394}
395
396// WithUnaryInterceptor returns a DialOption that specifies the interceptor for
397// unary RPCs.
398func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
399 return newFuncDialOption(func(o *dialOptions) {
400 o.unaryInt = f
401 })
402}
403
404// WithStreamInterceptor returns a DialOption that specifies the interceptor for
405// streaming RPCs.
406func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
407 return newFuncDialOption(func(o *dialOptions) {
408 o.streamInt = f
409 })
410}
411
412// WithAuthority returns a DialOption that specifies the value to be used as the
413// :authority pseudo-header. This value only works with WithInsecure and has no
414// effect if TransportCredentials are present.
415func WithAuthority(a string) DialOption {
416 return newFuncDialOption(func(o *dialOptions) {
417 o.authority = a
418 })
419}
420
421// WithChannelzParentID returns a DialOption that specifies the channelz ID of
422// current ClientConn's parent. This function is used in nested channel creation
423// (e.g. grpclb dial).
424func WithChannelzParentID(id int64) DialOption {
425 return newFuncDialOption(func(o *dialOptions) {
426 o.channelzParentID = id
427 })
428}
429
430// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
431// service config provided by the resolver and provides a hint to the resolver
432// to not fetch service configs.
433func WithDisableServiceConfig() DialOption {
434 return newFuncDialOption(func(o *dialOptions) {
435 o.disableServiceConfig = true
436 })
437}
438
439// WithDisableRetry returns a DialOption that disables retries, even if the
440// service config enables them. This does not impact transparent retries, which
441// will happen automatically if no data is written to the wire or if the RPC is
442// unprocessed by the remote server.
443//
444// Retry support is currently disabled by default, but will be enabled by
445// default in the future. Until then, it may be enabled by setting the
446// environment variable "GRPC_GO_RETRY" to "on".
447//
448// This API is EXPERIMENTAL.
449func WithDisableRetry() DialOption {
450 return newFuncDialOption(func(o *dialOptions) {
451 o.disableRetry = true
452 })
453}
454
455// WithMaxHeaderListSize returns a DialOption that specifies the maximum
456// (uncompressed) size of header list that the client is prepared to accept.
457func WithMaxHeaderListSize(s uint32) DialOption {
458 return newFuncDialOption(func(o *dialOptions) {
459 o.copts.MaxHeaderListSize = &s
460 })
461}
462
463// WithDisableHealthCheck disables the LB channel health checking for all SubConns of this ClientConn.
464//
465// This API is EXPERIMENTAL.
466func WithDisableHealthCheck() DialOption {
467 return newFuncDialOption(func(o *dialOptions) {
468 o.disableHealthCheck = true
469 })
470}
471
472// withHealthCheckFunc replaces the default health check function with the provided one. It makes
473// tests easier to change the health check function.
474//
475// For testing purpose only.
476func withHealthCheckFunc(f internal.HealthChecker) DialOption {
477 return newFuncDialOption(func(o *dialOptions) {
478 o.healthCheckFunc = f
479 })
480}
481
482func defaultDialOptions() dialOptions {
483 return dialOptions{
484 disableRetry: !envconfig.Retry,
485 reqHandshake: envconfig.RequireHandshake,
486 healthCheckFunc: internal.HealthCheckFunc,
487 copts: transport.ConnectOptions{
488 WriteBufferSize: defaultWriteBufSize,
489 ReadBufferSize: defaultReadBufSize,
490 },
491 }
492}
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
new file mode 100644
index 0000000..ade8b7c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -0,0 +1,118 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package encoding defines the interface for the compressor and codec, and
20// functions to register and retrieve compressors and codecs.
21//
22// This package is EXPERIMENTAL.
23package encoding
24
25import (
26 "io"
27 "strings"
28)
29
30// Identity specifies the optional encoding for uncompressed streams.
31// It is intended for grpc internal use only.
32const Identity = "identity"
33
34// Compressor is used for compressing and decompressing when sending or
35// receiving messages.
36type Compressor interface {
37 // Compress writes the data written to wc to w after compressing it. If an
38 // error occurs while initializing the compressor, that error is returned
39 // instead.
40 Compress(w io.Writer) (io.WriteCloser, error)
41 // Decompress reads data from r, decompresses it, and provides the
42 // uncompressed data via the returned io.Reader. If an error occurs while
43 // initializing the decompressor, that error is returned instead.
44 Decompress(r io.Reader) (io.Reader, error)
45 // Name is the name of the compression codec and is used to set the content
46 // coding header. The result must be static; the result cannot change
47 // between calls.
48 Name() string
49}
50
51var registeredCompressor = make(map[string]Compressor)
52
53// RegisterCompressor registers the compressor with gRPC by its name. It can
54// be activated when sending an RPC via grpc.UseCompressor(). It will be
55// automatically accessed when receiving a message based on the content coding
56// header. Servers also use it to send a response with the same encoding as
57// the request.
58//
59// NOTE: this function must only be called during initialization time (i.e. in
60// an init() function), and is not thread-safe. If multiple Compressors are
61// registered with the same name, the one registered last will take effect.
62func RegisterCompressor(c Compressor) {
63 registeredCompressor[c.Name()] = c
64}
65
66// GetCompressor returns Compressor for the given compressor name.
67func GetCompressor(name string) Compressor {
68 return registeredCompressor[name]
69}
70
71// Codec defines the interface gRPC uses to encode and decode messages. Note
72// that implementations of this interface must be thread safe; a Codec's
73// methods can be called from concurrent goroutines.
74type Codec interface {
75 // Marshal returns the wire format of v.
76 Marshal(v interface{}) ([]byte, error)
77 // Unmarshal parses the wire format into v.
78 Unmarshal(data []byte, v interface{}) error
79 // Name returns the name of the Codec implementation. The returned string
80 // will be used as part of content type in transmission. The result must be
81 // static; the result cannot change between calls.
82 Name() string
83}
84
85var registeredCodecs = make(map[string]Codec)
86
87// RegisterCodec registers the provided Codec for use with all gRPC clients and
88// servers.
89//
90// The Codec will be stored and looked up by result of its Name() method, which
91// should match the content-subtype of the encoding handled by the Codec. This
92// is case-insensitive, and is stored and looked up as lowercase. If the
93// result of calling Name() is an empty string, RegisterCodec will panic. See
94// Content-Type on
95// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
96// more details.
97//
98// NOTE: this function must only be called during initialization time (i.e. in
99// an init() function), and is not thread-safe. If multiple Compressors are
100// registered with the same name, the one registered last will take effect.
101func RegisterCodec(codec Codec) {
102 if codec == nil {
103 panic("cannot register a nil Codec")
104 }
105 contentSubtype := strings.ToLower(codec.Name())
106 if contentSubtype == "" {
107 panic("cannot register Codec with empty string result for String()")
108 }
109 registeredCodecs[contentSubtype] = codec
110}
111
112// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is
113// registered for the content-subtype.
114//
115// The content-subtype is expected to be lowercase.
116func GetCodec(contentSubtype string) Codec {
117 return registeredCodecs[contentSubtype]
118}
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
new file mode 100644
index 0000000..66b97a6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -0,0 +1,110 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package proto defines the protobuf codec. Importing this package will
20// register the codec.
21package proto
22
23import (
24 "math"
25 "sync"
26
27 "github.com/golang/protobuf/proto"
28 "google.golang.org/grpc/encoding"
29)
30
31// Name is the name registered for the proto compressor.
32const Name = "proto"
33
34func init() {
35 encoding.RegisterCodec(codec{})
36}
37
38// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
39type codec struct{}
40
41type cachedProtoBuffer struct {
42 lastMarshaledSize uint32
43 proto.Buffer
44}
45
46func capToMaxInt32(val int) uint32 {
47 if val > math.MaxInt32 {
48 return uint32(math.MaxInt32)
49 }
50 return uint32(val)
51}
52
53func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
54 protoMsg := v.(proto.Message)
55 newSlice := make([]byte, 0, cb.lastMarshaledSize)
56
57 cb.SetBuf(newSlice)
58 cb.Reset()
59 if err := cb.Marshal(protoMsg); err != nil {
60 return nil, err
61 }
62 out := cb.Bytes()
63 cb.lastMarshaledSize = capToMaxInt32(len(out))
64 return out, nil
65}
66
67func (codec) Marshal(v interface{}) ([]byte, error) {
68 if pm, ok := v.(proto.Marshaler); ok {
69 // object can marshal itself, no need for buffer
70 return pm.Marshal()
71 }
72
73 cb := protoBufferPool.Get().(*cachedProtoBuffer)
74 out, err := marshal(v, cb)
75
76 // put back buffer and lose the ref to the slice
77 cb.SetBuf(nil)
78 protoBufferPool.Put(cb)
79 return out, err
80}
81
82func (codec) Unmarshal(data []byte, v interface{}) error {
83 protoMsg := v.(proto.Message)
84 protoMsg.Reset()
85
86 if pu, ok := protoMsg.(proto.Unmarshaler); ok {
87 // object can unmarshal itself, no need for buffer
88 return pu.Unmarshal(data)
89 }
90
91 cb := protoBufferPool.Get().(*cachedProtoBuffer)
92 cb.SetBuf(data)
93 err := cb.Unmarshal(protoMsg)
94 cb.SetBuf(nil)
95 protoBufferPool.Put(cb)
96 return err
97}
98
99func (codec) Name() string {
100 return Name
101}
102
103var protoBufferPool = &sync.Pool{
104 New: func() interface{} {
105 return &cachedProtoBuffer{
106 Buffer: proto.Buffer{},
107 lastMarshaledSize: 16,
108 }
109 },
110}
diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod
new file mode 100644
index 0000000..f296dcf
--- /dev/null
+++ b/vendor/google.golang.org/grpc/go.mod
@@ -0,0 +1,20 @@
1module google.golang.org/grpc
2
3require (
4 cloud.google.com/go v0.26.0 // indirect
5 github.com/client9/misspell v0.3.4
6 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
7 github.com/golang/mock v1.1.1
8 github.com/golang/protobuf v1.2.0
9 github.com/kisielk/gotool v1.0.0 // indirect
10 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3
11 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d
12 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
13 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
14 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522
15 golang.org/x/text v0.3.0 // indirect
16 golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52
17 google.golang.org/appengine v1.1.0 // indirect
18 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
19 honnef.co/go/tools v0.0.0-20180728063816-88497007e858
20)
diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum
new file mode 100644
index 0000000..bfb6bb7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/go.sum
@@ -0,0 +1,32 @@
1cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
2cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
3github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
4github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
5github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
6github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
7github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
8github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
9github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
10github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
11github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
12github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
13golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 h1:x/bBzNauLQAlE3fLku/xy92Y8QwKX5HZymrMz2IiKFc=
14golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
15golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I=
16golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
17golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
18golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
19golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
20golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
21golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE=
22golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
23golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
24golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
25golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52 h1:JG/0uqcGdTNgq7FdU+61l5Pdmb8putNZlXb65bJBROs=
26golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
27google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
28google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
29google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
30google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
31honnef.co/go/tools v0.0.0-20180728063816-88497007e858 h1:wN+eVZ7U+gqdqkec6C6VXR1OFf9a5Ul9ETzeYsYv20g=
32honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go
deleted file mode 100644
index f3dbf21..0000000
--- a/vendor/google.golang.org/grpc/go16.go
+++ /dev/null
@@ -1,98 +0,0 @@
1// +build go1.6,!go1.7
2
3/*
4 *
5 * Copyright 2016 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package grpc
22
23import (
24 "fmt"
25 "io"
26 "net"
27 "net/http"
28 "os"
29
30 "golang.org/x/net/context"
31 "google.golang.org/grpc/codes"
32 "google.golang.org/grpc/status"
33 "google.golang.org/grpc/transport"
34)
35
36// dialContext connects to the address on the named network.
37func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
38 return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
39}
40
41func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
42 req.Cancel = ctx.Done()
43 if err := req.Write(conn); err != nil {
44 return fmt.Errorf("failed to write the HTTP request: %v", err)
45 }
46 return nil
47}
48
49// toRPCErr converts an error into an error from the status package.
50func toRPCErr(err error) error {
51 if _, ok := status.FromError(err); ok {
52 return err
53 }
54 switch e := err.(type) {
55 case transport.StreamError:
56 return status.Error(e.Code, e.Desc)
57 case transport.ConnectionError:
58 return status.Error(codes.Unavailable, e.Desc)
59 default:
60 switch err {
61 case context.DeadlineExceeded:
62 return status.Error(codes.DeadlineExceeded, err.Error())
63 case context.Canceled:
64 return status.Error(codes.Canceled, err.Error())
65 case ErrClientConnClosing:
66 return status.Error(codes.FailedPrecondition, err.Error())
67 }
68 }
69 return status.Error(codes.Unknown, err.Error())
70}
71
72// convertCode converts a standard Go error into its canonical code. Note that
73// this is only used to translate the error returned by the server applications.
74func convertCode(err error) codes.Code {
75 switch err {
76 case nil:
77 return codes.OK
78 case io.EOF:
79 return codes.OutOfRange
80 case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
81 return codes.FailedPrecondition
82 case os.ErrInvalid:
83 return codes.InvalidArgument
84 case context.Canceled:
85 return codes.Canceled
86 case context.DeadlineExceeded:
87 return codes.DeadlineExceeded
88 }
89 switch {
90 case os.IsExist(err):
91 return codes.AlreadyExists
92 case os.IsNotExist(err):
93 return codes.NotFound
94 case os.IsPermission(err):
95 return codes.PermissionDenied
96 }
97 return codes.Unknown
98}
diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go
deleted file mode 100644
index a3421d9..0000000
--- a/vendor/google.golang.org/grpc/go17.go
+++ /dev/null
@@ -1,98 +0,0 @@
1// +build go1.7
2
3/*
4 *
5 * Copyright 2016 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package grpc
22
23import (
24 "context"
25 "io"
26 "net"
27 "net/http"
28 "os"
29
30 netctx "golang.org/x/net/context"
31 "google.golang.org/grpc/codes"
32 "google.golang.org/grpc/status"
33 "google.golang.org/grpc/transport"
34)
35
36// dialContext connects to the address on the named network.
37func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
38 return (&net.Dialer{}).DialContext(ctx, network, address)
39}
40
41func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
42 req = req.WithContext(ctx)
43 if err := req.Write(conn); err != nil {
44 return err
45 }
46 return nil
47}
48
49// toRPCErr converts an error into an error from the status package.
50func toRPCErr(err error) error {
51 if _, ok := status.FromError(err); ok {
52 return err
53 }
54 switch e := err.(type) {
55 case transport.StreamError:
56 return status.Error(e.Code, e.Desc)
57 case transport.ConnectionError:
58 return status.Error(codes.Unavailable, e.Desc)
59 default:
60 switch err {
61 case context.DeadlineExceeded, netctx.DeadlineExceeded:
62 return status.Error(codes.DeadlineExceeded, err.Error())
63 case context.Canceled, netctx.Canceled:
64 return status.Error(codes.Canceled, err.Error())
65 case ErrClientConnClosing:
66 return status.Error(codes.FailedPrecondition, err.Error())
67 }
68 }
69 return status.Error(codes.Unknown, err.Error())
70}
71
72// convertCode converts a standard Go error into its canonical code. Note that
73// this is only used to translate the error returned by the server applications.
74func convertCode(err error) codes.Code {
75 switch err {
76 case nil:
77 return codes.OK
78 case io.EOF:
79 return codes.OutOfRange
80 case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
81 return codes.FailedPrecondition
82 case os.ErrInvalid:
83 return codes.InvalidArgument
84 case context.Canceled, netctx.Canceled:
85 return codes.Canceled
86 case context.DeadlineExceeded, netctx.DeadlineExceeded:
87 return codes.DeadlineExceeded
88 }
89 switch {
90 case os.IsExist(err):
91 return codes.AlreadyExists
92 case os.IsNotExist(err):
93 return codes.NotFound
94 case os.IsPermission(err):
95 return codes.PermissionDenied
96 }
97 return codes.Unknown
98}
diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go
deleted file mode 100644
index f7b6b7d..0000000
--- a/vendor/google.golang.org/grpc/grpclb.go
+++ /dev/null
@@ -1,737 +0,0 @@
1/*
2 *
3 * Copyright 2016 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package grpc
20
21import (
22 "errors"
23 "fmt"
24 "math/rand"
25 "net"
26 "sync"
27 "time"
28
29 "golang.org/x/net/context"
30 "google.golang.org/grpc/codes"
31 lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1"
32 "google.golang.org/grpc/grpclog"
33 "google.golang.org/grpc/metadata"
34 "google.golang.org/grpc/naming"
35)
36
37// Client API for LoadBalancer service.
38// Mostly copied from generated pb.go file.
39// To avoid circular dependency.
40type loadBalancerClient struct {
41 cc *ClientConn
42}
43
44func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) {
45 desc := &StreamDesc{
46 StreamName: "BalanceLoad",
47 ServerStreams: true,
48 ClientStreams: true,
49 }
50 stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
51 if err != nil {
52 return nil, err
53 }
54 x := &balanceLoadClientStream{stream}
55 return x, nil
56}
57
58type balanceLoadClientStream struct {
59 ClientStream
60}
61
62func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
63 return x.ClientStream.SendMsg(m)
64}
65
66func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
67 m := new(lbpb.LoadBalanceResponse)
68 if err := x.ClientStream.RecvMsg(m); err != nil {
69 return nil, err
70 }
71 return m, nil
72}
73
74// NewGRPCLBBalancer creates a grpclb load balancer.
75func NewGRPCLBBalancer(r naming.Resolver) Balancer {
76 return &balancer{
77 r: r,
78 }
79}
80
81type remoteBalancerInfo struct {
82 addr string
83 // the server name used for authentication with the remote LB server.
84 name string
85}
86
87// grpclbAddrInfo consists of the information of a backend server.
88type grpclbAddrInfo struct {
89 addr Address
90 connected bool
91 // dropForRateLimiting indicates whether this particular request should be
92 // dropped by the client for rate limiting.
93 dropForRateLimiting bool
94 // dropForLoadBalancing indicates whether this particular request should be
95 // dropped by the client for load balancing.
96 dropForLoadBalancing bool
97}
98
99type balancer struct {
100 r naming.Resolver
101 target string
102 mu sync.Mutex
103 seq int // a sequence number to make sure addrCh does not get stale addresses.
104 w naming.Watcher
105 addrCh chan []Address
106 rbs []remoteBalancerInfo
107 addrs []*grpclbAddrInfo
108 next int
109 waitCh chan struct{}
110 done bool
111 expTimer *time.Timer
112 rand *rand.Rand
113
114 clientStats lbpb.ClientStats
115}
116
117func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
118 updates, err := w.Next()
119 if err != nil {
120 grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
121 return err
122 }
123 b.mu.Lock()
124 defer b.mu.Unlock()
125 if b.done {
126 return ErrClientConnClosing
127 }
128 for _, update := range updates {
129 switch update.Op {
130 case naming.Add:
131 var exist bool
132 for _, v := range b.rbs {
133 // TODO: Is the same addr with different server name a different balancer?
134 if update.Addr == v.addr {
135 exist = true
136 break
137 }
138 }
139 if exist {
140 continue
141 }
142 md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB)
143 if !ok {
144 // TODO: Revisit the handling here and may introduce some fallback mechanism.
145 grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata)
146 continue
147 }
148 switch md.AddrType {
149 case naming.Backend:
150 // TODO: Revisit the handling here and may introduce some fallback mechanism.
151 grpclog.Errorf("The name resolution does not give grpclb addresses")
152 continue
153 case naming.GRPCLB:
154 b.rbs = append(b.rbs, remoteBalancerInfo{
155 addr: update.Addr,
156 name: md.ServerName,
157 })
158 default:
159 grpclog.Errorf("Received unknow address type %d", md.AddrType)
160 continue
161 }
162 case naming.Delete:
163 for i, v := range b.rbs {
164 if update.Addr == v.addr {
165 copy(b.rbs[i:], b.rbs[i+1:])
166 b.rbs = b.rbs[:len(b.rbs)-1]
167 break
168 }
169 }
170 default:
171 grpclog.Errorf("Unknown update.Op %v", update.Op)
172 }
173 }
174 // TODO: Fall back to the basic round-robin load balancing if the resulting address is
175 // not a load balancer.
176 select {
177 case <-ch:
178 default:
179 }
180 ch <- b.rbs
181 return nil
182}
183
184func (b *balancer) serverListExpire(seq int) {
185 b.mu.Lock()
186 defer b.mu.Unlock()
187 // TODO: gRPC interanls do not clear the connections when the server list is stale.
188 // This means RPCs will keep using the existing server list until b receives new
189 // server list even though the list is expired. Revisit this behavior later.
190 if b.done || seq < b.seq {
191 return
192 }
193 b.next = 0
194 b.addrs = nil
195 // Ask grpc internals to close all the corresponding connections.
196 b.addrCh <- nil
197}
198
199func convertDuration(d *lbpb.Duration) time.Duration {
200 if d == nil {
201 return 0
202 }
203 return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
204}
205
206func (b *balancer) processServerList(l *lbpb.ServerList, seq int) {
207 if l == nil {
208 return
209 }
210 servers := l.GetServers()
211 expiration := convertDuration(l.GetExpirationInterval())
212 var (
213 sl []*grpclbAddrInfo
214 addrs []Address
215 )
216 for _, s := range servers {
217 md := metadata.Pairs("lb-token", s.LoadBalanceToken)
218 ip := net.IP(s.IpAddress)
219 ipStr := ip.String()
220 if ip.To4() == nil {
221 // Add square brackets to ipv6 addresses, otherwise net.Dial() and
222 // net.SplitHostPort() will return too many colons error.
223 ipStr = fmt.Sprintf("[%s]", ipStr)
224 }
225 addr := Address{
226 Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
227 Metadata: &md,
228 }
229 sl = append(sl, &grpclbAddrInfo{
230 addr: addr,
231 dropForRateLimiting: s.DropForRateLimiting,
232 dropForLoadBalancing: s.DropForLoadBalancing,
233 })
234 addrs = append(addrs, addr)
235 }
236 b.mu.Lock()
237 defer b.mu.Unlock()
238 if b.done || seq < b.seq {
239 return
240 }
241 if len(sl) > 0 {
242 // reset b.next to 0 when replacing the server list.
243 b.next = 0
244 b.addrs = sl
245 b.addrCh <- addrs
246 if b.expTimer != nil {
247 b.expTimer.Stop()
248 b.expTimer = nil
249 }
250 if expiration > 0 {
251 b.expTimer = time.AfterFunc(expiration, func() {
252 b.serverListExpire(seq)
253 })
254 }
255 }
256 return
257}
258
259func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
260 ticker := time.NewTicker(interval)
261 defer ticker.Stop()
262 for {
263 select {
264 case <-ticker.C:
265 case <-done:
266 return
267 }
268 b.mu.Lock()
269 stats := b.clientStats
270 b.clientStats = lbpb.ClientStats{} // Clear the stats.
271 b.mu.Unlock()
272 t := time.Now()
273 stats.Timestamp = &lbpb.Timestamp{
274 Seconds: t.Unix(),
275 Nanos: int32(t.Nanosecond()),
276 }
277 if err := s.Send(&lbpb.LoadBalanceRequest{
278 LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
279 ClientStats: &stats,
280 },
281 }); err != nil {
282 grpclog.Errorf("grpclb: failed to send load report: %v", err)
283 return
284 }
285 }
286}
287
288func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
289 ctx, cancel := context.WithCancel(context.Background())
290 defer cancel()
291 stream, err := lbc.BalanceLoad(ctx)
292 if err != nil {
293 grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
294 return
295 }
296 b.mu.Lock()
297 if b.done {
298 b.mu.Unlock()
299 return
300 }
301 b.mu.Unlock()
302 initReq := &lbpb.LoadBalanceRequest{
303 LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
304 InitialRequest: &lbpb.InitialLoadBalanceRequest{
305 Name: b.target,
306 },
307 },
308 }
309 if err := stream.Send(initReq); err != nil {
310 grpclog.Errorf("grpclb: failed to send init request: %v", err)
311 // TODO: backoff on retry?
312 return true
313 }
314 reply, err := stream.Recv()
315 if err != nil {
316 grpclog.Errorf("grpclb: failed to recv init response: %v", err)
317 // TODO: backoff on retry?
318 return true
319 }
320 initResp := reply.GetInitialResponse()
321 if initResp == nil {
322 grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.")
323 return
324 }
325 // TODO: Support delegation.
326 if initResp.LoadBalancerDelegate != "" {
327 // delegation
328 grpclog.Errorf("TODO: Delegation is not supported yet.")
329 return
330 }
331 streamDone := make(chan struct{})
332 defer close(streamDone)
333 b.mu.Lock()
334 b.clientStats = lbpb.ClientStats{} // Clear client stats.
335 b.mu.Unlock()
336 if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
337 go b.sendLoadReport(stream, d, streamDone)
338 }
339 // Retrieve the server list.
340 for {
341 reply, err := stream.Recv()
342 if err != nil {
343 grpclog.Errorf("grpclb: failed to recv server list: %v", err)
344 break
345 }
346 b.mu.Lock()
347 if b.done || seq < b.seq {
348 b.mu.Unlock()
349 return
350 }
351 b.seq++ // tick when receiving a new list of servers.
352 seq = b.seq
353 b.mu.Unlock()
354 if serverList := reply.GetServerList(); serverList != nil {
355 b.processServerList(serverList, seq)
356 }
357 }
358 return true
359}
360
361func (b *balancer) Start(target string, config BalancerConfig) error {
362 b.rand = rand.New(rand.NewSource(time.Now().Unix()))
363 // TODO: Fall back to the basic direct connection if there is no name resolver.
364 if b.r == nil {
365 return errors.New("there is no name resolver installed")
366 }
367 b.target = target
368 b.mu.Lock()
369 if b.done {
370 b.mu.Unlock()
371 return ErrClientConnClosing
372 }
373 b.addrCh = make(chan []Address)
374 w, err := b.r.Resolve(target)
375 if err != nil {
376 b.mu.Unlock()
377 grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err)
378 return err
379 }
380 b.w = w
381 b.mu.Unlock()
382 balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
383 // Spawn a goroutine to monitor the name resolution of remote load balancer.
384 go func() {
385 for {
386 if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
387 grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err)
388 close(balancerAddrsCh)
389 return
390 }
391 }
392 }()
393 // Spawn a goroutine to talk to the remote load balancer.
394 go func() {
395 var (
396 cc *ClientConn
397 // ccError is closed when there is an error in the current cc.
398 // A new rb should be picked from rbs and connected.
399 ccError chan struct{}
400 rb *remoteBalancerInfo
401 rbs []remoteBalancerInfo
402 rbIdx int
403 )
404
405 defer func() {
406 if ccError != nil {
407 select {
408 case <-ccError:
409 default:
410 close(ccError)
411 }
412 }
413 if cc != nil {
414 cc.Close()
415 }
416 }()
417
418 for {
419 var ok bool
420 select {
421 case rbs, ok = <-balancerAddrsCh:
422 if !ok {
423 return
424 }
425 foundIdx := -1
426 if rb != nil {
427 for i, trb := range rbs {
428 if trb == *rb {
429 foundIdx = i
430 break
431 }
432 }
433 }
434 if foundIdx >= 0 {
435 if foundIdx >= 1 {
436 // Move the address in use to the beginning of the list.
437 b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
438 rbIdx = 0
439 }
440 continue // If found, don't dial new cc.
441 } else if len(rbs) > 0 {
442 // Pick a random one from the list, instead of always using the first one.
443 if l := len(rbs); l > 1 && rb != nil {
444 tmpIdx := b.rand.Intn(l - 1)
445 b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
446 }
447 rbIdx = 0
448 rb = &rbs[0]
449 } else {
450 // foundIdx < 0 && len(rbs) <= 0.
451 rb = nil
452 }
453 case <-ccError:
454 ccError = nil
455 if rbIdx < len(rbs)-1 {
456 rbIdx++
457 rb = &rbs[rbIdx]
458 } else {
459 rb = nil
460 }
461 }
462
463 if rb == nil {
464 continue
465 }
466
467 if cc != nil {
468 cc.Close()
469 }
470 // Talk to the remote load balancer to get the server list.
471 var (
472 err error
473 dopts []DialOption
474 )
475 if creds := config.DialCreds; creds != nil {
476 if rb.name != "" {
477 if err := creds.OverrideServerName(rb.name); err != nil {
478 grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err)
479 continue
480 }
481 }
482 dopts = append(dopts, WithTransportCredentials(creds))
483 } else {
484 dopts = append(dopts, WithInsecure())
485 }
486 if dialer := config.Dialer; dialer != nil {
487 // WithDialer takes a different type of function, so we instead use a special DialOption here.
488 dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
489 }
490 ccError = make(chan struct{})
491 cc, err = Dial(rb.addr, dopts...)
492 if err != nil {
493 grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
494 close(ccError)
495 continue
496 }
497 b.mu.Lock()
498 b.seq++ // tick when getting a new balancer address
499 seq := b.seq
500 b.next = 0
501 b.mu.Unlock()
502 go func(cc *ClientConn, ccError chan struct{}) {
503 lbc := &loadBalancerClient{cc}
504 b.callRemoteBalancer(lbc, seq)
505 cc.Close()
506 select {
507 case <-ccError:
508 default:
509 close(ccError)
510 }
511 }(cc, ccError)
512 }
513 }()
514 return nil
515}
516
517func (b *balancer) down(addr Address, err error) {
518 b.mu.Lock()
519 defer b.mu.Unlock()
520 for _, a := range b.addrs {
521 if addr == a.addr {
522 a.connected = false
523 break
524 }
525 }
526}
527
528func (b *balancer) Up(addr Address) func(error) {
529 b.mu.Lock()
530 defer b.mu.Unlock()
531 if b.done {
532 return nil
533 }
534 var cnt int
535 for _, a := range b.addrs {
536 if a.addr == addr {
537 if a.connected {
538 return nil
539 }
540 a.connected = true
541 }
542 if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
543 cnt++
544 }
545 }
546 // addr is the only one which is connected. Notify the Get() callers who are blocking.
547 if cnt == 1 && b.waitCh != nil {
548 close(b.waitCh)
549 b.waitCh = nil
550 }
551 return func(err error) {
552 b.down(addr, err)
553 }
554}
555
556func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
557 var ch chan struct{}
558 b.mu.Lock()
559 if b.done {
560 b.mu.Unlock()
561 err = ErrClientConnClosing
562 return
563 }
564 seq := b.seq
565
566 defer func() {
567 if err != nil {
568 return
569 }
570 put = func() {
571 s, ok := rpcInfoFromContext(ctx)
572 if !ok {
573 return
574 }
575 b.mu.Lock()
576 defer b.mu.Unlock()
577 if b.done || seq < b.seq {
578 return
579 }
580 b.clientStats.NumCallsFinished++
581 if !s.bytesSent {
582 b.clientStats.NumCallsFinishedWithClientFailedToSend++
583 } else if s.bytesReceived {
584 b.clientStats.NumCallsFinishedKnownReceived++
585 }
586 }
587 }()
588
589 b.clientStats.NumCallsStarted++
590 if len(b.addrs) > 0 {
591 if b.next >= len(b.addrs) {
592 b.next = 0
593 }
594 next := b.next
595 for {
596 a := b.addrs[next]
597 next = (next + 1) % len(b.addrs)
598 if a.connected {
599 if !a.dropForRateLimiting && !a.dropForLoadBalancing {
600 addr = a.addr
601 b.next = next
602 b.mu.Unlock()
603 return
604 }
605 if !opts.BlockingWait {
606 b.next = next
607 if a.dropForLoadBalancing {
608 b.clientStats.NumCallsFinished++
609 b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
610 } else if a.dropForRateLimiting {
611 b.clientStats.NumCallsFinished++
612 b.clientStats.NumCallsFinishedWithDropForRateLimiting++
613 }
614 b.mu.Unlock()
615 err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
616 return
617 }
618 }
619 if next == b.next {
620 // Has iterated all the possible address but none is connected.
621 break
622 }
623 }
624 }
625 if !opts.BlockingWait {
626 if len(b.addrs) == 0 {
627 b.clientStats.NumCallsFinished++
628 b.clientStats.NumCallsFinishedWithClientFailedToSend++
629 b.mu.Unlock()
630 err = Errorf(codes.Unavailable, "there is no address available")
631 return
632 }
633 // Returns the next addr on b.addrs for a failfast RPC.
634 addr = b.addrs[b.next].addr
635 b.next++
636 b.mu.Unlock()
637 return
638 }
639 // Wait on b.waitCh for non-failfast RPCs.
640 if b.waitCh == nil {
641 ch = make(chan struct{})
642 b.waitCh = ch
643 } else {
644 ch = b.waitCh
645 }
646 b.mu.Unlock()
647 for {
648 select {
649 case <-ctx.Done():
650 b.mu.Lock()
651 b.clientStats.NumCallsFinished++
652 b.clientStats.NumCallsFinishedWithClientFailedToSend++
653 b.mu.Unlock()
654 err = ctx.Err()
655 return
656 case <-ch:
657 b.mu.Lock()
658 if b.done {
659 b.clientStats.NumCallsFinished++
660 b.clientStats.NumCallsFinishedWithClientFailedToSend++
661 b.mu.Unlock()
662 err = ErrClientConnClosing
663 return
664 }
665
666 if len(b.addrs) > 0 {
667 if b.next >= len(b.addrs) {
668 b.next = 0
669 }
670 next := b.next
671 for {
672 a := b.addrs[next]
673 next = (next + 1) % len(b.addrs)
674 if a.connected {
675 if !a.dropForRateLimiting && !a.dropForLoadBalancing {
676 addr = a.addr
677 b.next = next
678 b.mu.Unlock()
679 return
680 }
681 if !opts.BlockingWait {
682 b.next = next
683 if a.dropForLoadBalancing {
684 b.clientStats.NumCallsFinished++
685 b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
686 } else if a.dropForRateLimiting {
687 b.clientStats.NumCallsFinished++
688 b.clientStats.NumCallsFinishedWithDropForRateLimiting++
689 }
690 b.mu.Unlock()
691 err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
692 return
693 }
694 }
695 if next == b.next {
696 // Has iterated all the possible address but none is connected.
697 break
698 }
699 }
700 }
701 // The newly added addr got removed by Down() again.
702 if b.waitCh == nil {
703 ch = make(chan struct{})
704 b.waitCh = ch
705 } else {
706 ch = b.waitCh
707 }
708 b.mu.Unlock()
709 }
710 }
711}
712
713func (b *balancer) Notify() <-chan []Address {
714 return b.addrCh
715}
716
717func (b *balancer) Close() error {
718 b.mu.Lock()
719 defer b.mu.Unlock()
720 if b.done {
721 return errBalancerClosed
722 }
723 b.done = true
724 if b.expTimer != nil {
725 b.expTimer.Stop()
726 }
727 if b.waitCh != nil {
728 close(b.waitCh)
729 }
730 if b.addrCh != nil {
731 close(b.addrCh)
732 }
733 if b.w != nil {
734 b.w.Close()
735 }
736 return nil
737}
diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go
deleted file mode 100644
index f63941b..0000000
--- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go
+++ /dev/null
@@ -1,629 +0,0 @@
1// Code generated by protoc-gen-go.
2// source: grpclb.proto
3// DO NOT EDIT!
4
5/*
6Package grpc_lb_v1 is a generated protocol buffer package.
7
8It is generated from these files:
9 grpclb.proto
10
11It has these top-level messages:
12 Duration
13 Timestamp
14 LoadBalanceRequest
15 InitialLoadBalanceRequest
16 ClientStats
17 LoadBalanceResponse
18 InitialLoadBalanceResponse
19 ServerList
20 Server
21*/
22package grpc_lb_v1
23
24import proto "github.com/golang/protobuf/proto"
25import fmt "fmt"
26import math "math"
27
28// Reference imports to suppress errors if they are not otherwise used.
29var _ = proto.Marshal
30var _ = fmt.Errorf
31var _ = math.Inf
32
33// This is a compile-time assertion to ensure that this generated file
34// is compatible with the proto package it is being compiled against.
35// A compilation error at this line likely means your copy of the
36// proto package needs to be updated.
37const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
38
39type Duration struct {
40 // Signed seconds of the span of time. Must be from -315,576,000,000
41 // to +315,576,000,000 inclusive.
42 Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
43 // Signed fractions of a second at nanosecond resolution of the span
44 // of time. Durations less than one second are represented with a 0
45 // `seconds` field and a positive or negative `nanos` field. For durations
46 // of one second or more, a non-zero value for the `nanos` field must be
47 // of the same sign as the `seconds` field. Must be from -999,999,999
48 // to +999,999,999 inclusive.
49 Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
50}
51
52func (m *Duration) Reset() { *m = Duration{} }
53func (m *Duration) String() string { return proto.CompactTextString(m) }
54func (*Duration) ProtoMessage() {}
55func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
56
57func (m *Duration) GetSeconds() int64 {
58 if m != nil {
59 return m.Seconds
60 }
61 return 0
62}
63
64func (m *Duration) GetNanos() int32 {
65 if m != nil {
66 return m.Nanos
67 }
68 return 0
69}
70
71type Timestamp struct {
72 // Represents seconds of UTC time since Unix epoch
73 // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
74 // 9999-12-31T23:59:59Z inclusive.
75 Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
76 // Non-negative fractions of a second at nanosecond resolution. Negative
77 // second values with fractions must still have non-negative nanos values
78 // that count forward in time. Must be from 0 to 999,999,999
79 // inclusive.
80 Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
81}
82
83func (m *Timestamp) Reset() { *m = Timestamp{} }
84func (m *Timestamp) String() string { return proto.CompactTextString(m) }
85func (*Timestamp) ProtoMessage() {}
86func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
87
88func (m *Timestamp) GetSeconds() int64 {
89 if m != nil {
90 return m.Seconds
91 }
92 return 0
93}
94
95func (m *Timestamp) GetNanos() int32 {
96 if m != nil {
97 return m.Nanos
98 }
99 return 0
100}
101
102type LoadBalanceRequest struct {
103 // Types that are valid to be assigned to LoadBalanceRequestType:
104 // *LoadBalanceRequest_InitialRequest
105 // *LoadBalanceRequest_ClientStats
106 LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
107}
108
109func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} }
110func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
111func (*LoadBalanceRequest) ProtoMessage() {}
112func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
113
114type isLoadBalanceRequest_LoadBalanceRequestType interface {
115 isLoadBalanceRequest_LoadBalanceRequestType()
116}
117
118type LoadBalanceRequest_InitialRequest struct {
119 InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"`
120}
121type LoadBalanceRequest_ClientStats struct {
122 ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"`
123}
124
125func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {}
126func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {}
127
128func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
129 if m != nil {
130 return m.LoadBalanceRequestType
131 }
132 return nil
133}
134
135func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest {
136 if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok {
137 return x.InitialRequest
138 }
139 return nil
140}
141
142func (m *LoadBalanceRequest) GetClientStats() *ClientStats {
143 if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok {
144 return x.ClientStats
145 }
146 return nil
147}
148
149// XXX_OneofFuncs is for the internal use of the proto package.
150func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
151 return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{
152 (*LoadBalanceRequest_InitialRequest)(nil),
153 (*LoadBalanceRequest_ClientStats)(nil),
154 }
155}
156
157func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
158 m := msg.(*LoadBalanceRequest)
159 // load_balance_request_type
160 switch x := m.LoadBalanceRequestType.(type) {
161 case *LoadBalanceRequest_InitialRequest:
162 b.EncodeVarint(1<<3 | proto.WireBytes)
163 if err := b.EncodeMessage(x.InitialRequest); err != nil {
164 return err
165 }
166 case *LoadBalanceRequest_ClientStats:
167 b.EncodeVarint(2<<3 | proto.WireBytes)
168 if err := b.EncodeMessage(x.ClientStats); err != nil {
169 return err
170 }
171 case nil:
172 default:
173 return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x)
174 }
175 return nil
176}
177
178func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
179 m := msg.(*LoadBalanceRequest)
180 switch tag {
181 case 1: // load_balance_request_type.initial_request
182 if wire != proto.WireBytes {
183 return true, proto.ErrInternalBadWireType
184 }
185 msg := new(InitialLoadBalanceRequest)
186 err := b.DecodeMessage(msg)
187 m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg}
188 return true, err
189 case 2: // load_balance_request_type.client_stats
190 if wire != proto.WireBytes {
191 return true, proto.ErrInternalBadWireType
192 }
193 msg := new(ClientStats)
194 err := b.DecodeMessage(msg)
195 m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg}
196 return true, err
197 default:
198 return false, nil
199 }
200}
201
202func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
203 m := msg.(*LoadBalanceRequest)
204 // load_balance_request_type
205 switch x := m.LoadBalanceRequestType.(type) {
206 case *LoadBalanceRequest_InitialRequest:
207 s := proto.Size(x.InitialRequest)
208 n += proto.SizeVarint(1<<3 | proto.WireBytes)
209 n += proto.SizeVarint(uint64(s))
210 n += s
211 case *LoadBalanceRequest_ClientStats:
212 s := proto.Size(x.ClientStats)
213 n += proto.SizeVarint(2<<3 | proto.WireBytes)
214 n += proto.SizeVarint(uint64(s))
215 n += s
216 case nil:
217 default:
218 panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
219 }
220 return n
221}
222
223type InitialLoadBalanceRequest struct {
224 // Name of load balanced service (IE, balancer.service.com)
225 // length should be less than 256 bytes.
226 Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
227}
228
229func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} }
230func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
231func (*InitialLoadBalanceRequest) ProtoMessage() {}
232func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
233
234func (m *InitialLoadBalanceRequest) GetName() string {
235 if m != nil {
236 return m.Name
237 }
238 return ""
239}
240
241// Contains client level statistics that are useful to load balancing. Each
242// count except the timestamp should be reset to zero after reporting the stats.
243type ClientStats struct {
244 // The timestamp of generating the report.
245 Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
246 // The total number of RPCs that started.
247 NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"`
248 // The total number of RPCs that finished.
249 NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"`
250 // The total number of RPCs that were dropped by the client because of rate
251 // limiting.
252 NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"`
253 // The total number of RPCs that were dropped by the client because of load
254 // balancing.
255 NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"`
256 // The total number of RPCs that failed to reach a server except dropped RPCs.
257 NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
258 // The total number of RPCs that finished and are known to have been received
259 // by a server.
260 NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
261}
262
263func (m *ClientStats) Reset() { *m = ClientStats{} }
264func (m *ClientStats) String() string { return proto.CompactTextString(m) }
265func (*ClientStats) ProtoMessage() {}
266func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
267
268func (m *ClientStats) GetTimestamp() *Timestamp {
269 if m != nil {
270 return m.Timestamp
271 }
272 return nil
273}
274
275func (m *ClientStats) GetNumCallsStarted() int64 {
276 if m != nil {
277 return m.NumCallsStarted
278 }
279 return 0
280}
281
282func (m *ClientStats) GetNumCallsFinished() int64 {
283 if m != nil {
284 return m.NumCallsFinished
285 }
286 return 0
287}
288
289func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 {
290 if m != nil {
291 return m.NumCallsFinishedWithDropForRateLimiting
292 }
293 return 0
294}
295
296func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 {
297 if m != nil {
298 return m.NumCallsFinishedWithDropForLoadBalancing
299 }
300 return 0
301}
302
303func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 {
304 if m != nil {
305 return m.NumCallsFinishedWithClientFailedToSend
306 }
307 return 0
308}
309
310func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 {
311 if m != nil {
312 return m.NumCallsFinishedKnownReceived
313 }
314 return 0
315}
316
317type LoadBalanceResponse struct {
318 // Types that are valid to be assigned to LoadBalanceResponseType:
319 // *LoadBalanceResponse_InitialResponse
320 // *LoadBalanceResponse_ServerList
321 LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
322}
323
324func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} }
325func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
326func (*LoadBalanceResponse) ProtoMessage() {}
327func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
328
329type isLoadBalanceResponse_LoadBalanceResponseType interface {
330 isLoadBalanceResponse_LoadBalanceResponseType()
331}
332
333type LoadBalanceResponse_InitialResponse struct {
334 InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"`
335}
336type LoadBalanceResponse_ServerList struct {
337 ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"`
338}
339
340func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
341func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {}
342
343func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
344 if m != nil {
345 return m.LoadBalanceResponseType
346 }
347 return nil
348}
349
350func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse {
351 if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok {
352 return x.InitialResponse
353 }
354 return nil
355}
356
357func (m *LoadBalanceResponse) GetServerList() *ServerList {
358 if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok {
359 return x.ServerList
360 }
361 return nil
362}
363
364// XXX_OneofFuncs is for the internal use of the proto package.
365func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
366 return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{
367 (*LoadBalanceResponse_InitialResponse)(nil),
368 (*LoadBalanceResponse_ServerList)(nil),
369 }
370}
371
372func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
373 m := msg.(*LoadBalanceResponse)
374 // load_balance_response_type
375 switch x := m.LoadBalanceResponseType.(type) {
376 case *LoadBalanceResponse_InitialResponse:
377 b.EncodeVarint(1<<3 | proto.WireBytes)
378 if err := b.EncodeMessage(x.InitialResponse); err != nil {
379 return err
380 }
381 case *LoadBalanceResponse_ServerList:
382 b.EncodeVarint(2<<3 | proto.WireBytes)
383 if err := b.EncodeMessage(x.ServerList); err != nil {
384 return err
385 }
386 case nil:
387 default:
388 return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x)
389 }
390 return nil
391}
392
393func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
394 m := msg.(*LoadBalanceResponse)
395 switch tag {
396 case 1: // load_balance_response_type.initial_response
397 if wire != proto.WireBytes {
398 return true, proto.ErrInternalBadWireType
399 }
400 msg := new(InitialLoadBalanceResponse)
401 err := b.DecodeMessage(msg)
402 m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg}
403 return true, err
404 case 2: // load_balance_response_type.server_list
405 if wire != proto.WireBytes {
406 return true, proto.ErrInternalBadWireType
407 }
408 msg := new(ServerList)
409 err := b.DecodeMessage(msg)
410 m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg}
411 return true, err
412 default:
413 return false, nil
414 }
415}
416
417func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
418 m := msg.(*LoadBalanceResponse)
419 // load_balance_response_type
420 switch x := m.LoadBalanceResponseType.(type) {
421 case *LoadBalanceResponse_InitialResponse:
422 s := proto.Size(x.InitialResponse)
423 n += proto.SizeVarint(1<<3 | proto.WireBytes)
424 n += proto.SizeVarint(uint64(s))
425 n += s
426 case *LoadBalanceResponse_ServerList:
427 s := proto.Size(x.ServerList)
428 n += proto.SizeVarint(2<<3 | proto.WireBytes)
429 n += proto.SizeVarint(uint64(s))
430 n += s
431 case nil:
432 default:
433 panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
434 }
435 return n
436}
437
438type InitialLoadBalanceResponse struct {
439 // This is an application layer redirect that indicates the client should use
440 // the specified server for load balancing. When this field is non-empty in
441 // the response, the client should open a separate connection to the
442 // load_balancer_delegate and call the BalanceLoad method. Its length should
443 // be less than 64 bytes.
444 LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"`
445 // This interval defines how often the client should send the client stats
446 // to the load balancer. Stats should only be reported when the duration is
447 // positive.
448 ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
449}
450
451func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} }
452func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
453func (*InitialLoadBalanceResponse) ProtoMessage() {}
454func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
455
456func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
457 if m != nil {
458 return m.LoadBalancerDelegate
459 }
460 return ""
461}
462
463func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration {
464 if m != nil {
465 return m.ClientStatsReportInterval
466 }
467 return nil
468}
469
470type ServerList struct {
471 // Contains a list of servers selected by the load balancer. The list will
472 // be updated when server resolutions change or as needed to balance load
473 // across more servers. The client should consume the server list in order
474 // unless instructed otherwise via the client_config.
475 Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
476 // Indicates the amount of time that the client should consider this server
477 // list as valid. It may be considered stale after waiting this interval of
478 // time after receiving the list. If the interval is not positive, the
479 // client can assume the list is valid until the next list is received.
480 ExpirationInterval *Duration `protobuf:"bytes,3,opt,name=expiration_interval,json=expirationInterval" json:"expiration_interval,omitempty"`
481}
482
483func (m *ServerList) Reset() { *m = ServerList{} }
484func (m *ServerList) String() string { return proto.CompactTextString(m) }
485func (*ServerList) ProtoMessage() {}
486func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
487
488func (m *ServerList) GetServers() []*Server {
489 if m != nil {
490 return m.Servers
491 }
492 return nil
493}
494
495func (m *ServerList) GetExpirationInterval() *Duration {
496 if m != nil {
497 return m.ExpirationInterval
498 }
499 return nil
500}
501
502// Contains server information. When none of the [drop_for_*] fields are true,
503// use the other fields. When drop_for_rate_limiting is true, ignore all other
504// fields. Use drop_for_load_balancing only when it is true and
505// drop_for_rate_limiting is false.
506type Server struct {
507 // A resolved address for the server, serialized in network-byte-order. It may
508 // either be an IPv4 or IPv6 address.
509 IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
510 // A resolved port number for the server.
511 Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
512 // An opaque but printable token given to the frontend for each pick. All
513 // frontend requests for that pick must include the token in its initial
514 // metadata. The token is used by the backend to verify the request and to
515 // allow the backend to report load to the gRPC LB system.
516 //
517 // Its length is variable but less than 50 bytes.
518 LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"`
519 // Indicates whether this particular request should be dropped by the client
520 // for rate limiting.
521 DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
522 // Indicates whether this particular request should be dropped by the client
523 // for load balancing.
524 DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
525}
526
527func (m *Server) Reset() { *m = Server{} }
528func (m *Server) String() string { return proto.CompactTextString(m) }
529func (*Server) ProtoMessage() {}
530func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
531
532func (m *Server) GetIpAddress() []byte {
533 if m != nil {
534 return m.IpAddress
535 }
536 return nil
537}
538
539func (m *Server) GetPort() int32 {
540 if m != nil {
541 return m.Port
542 }
543 return 0
544}
545
546func (m *Server) GetLoadBalanceToken() string {
547 if m != nil {
548 return m.LoadBalanceToken
549 }
550 return ""
551}
552
553func (m *Server) GetDropForRateLimiting() bool {
554 if m != nil {
555 return m.DropForRateLimiting
556 }
557 return false
558}
559
560func (m *Server) GetDropForLoadBalancing() bool {
561 if m != nil {
562 return m.DropForLoadBalancing
563 }
564 return false
565}
566
567func init() {
568 proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration")
569 proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp")
570 proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest")
571 proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest")
572 proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats")
573 proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse")
574 proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse")
575 proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList")
576 proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
577}
578
579func init() { proto.RegisterFile("grpclb.proto", fileDescriptor0) }
580
581var fileDescriptor0 = []byte{
582 // 733 bytes of a gzipped FileDescriptorProto
583 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39,
584 0x14, 0x66, 0x36, 0xfc, 0xe5, 0x24, 0x5a, 0x58, 0x93, 0x85, 0xc0, 0xc2, 0x2e, 0x1b, 0xa9, 0x34,
585 0xaa, 0x68, 0x68, 0x43, 0x7b, 0xd1, 0x9f, 0x9b, 0x02, 0x45, 0x41, 0xe5, 0xa2, 0x72, 0xa8, 0x7a,
586 0x55, 0x59, 0x4e, 0xc6, 0x80, 0xc5, 0xc4, 0x9e, 0xda, 0x4e, 0x68, 0x2f, 0x7b, 0xd9, 0x47, 0xe9,
587 0x63, 0x54, 0x7d, 0x86, 0xbe, 0x4f, 0x65, 0x7b, 0x26, 0x33, 0x90, 0x1f, 0xd4, 0xbb, 0xf1, 0xf1,
588 0x77, 0xbe, 0xf3, 0xf9, 0xd8, 0xdf, 0x19, 0x28, 0x5f, 0xa8, 0xb8, 0x1b, 0x75, 0x1a, 0xb1, 0x92,
589 0x46, 0x22, 0xb0, 0xab, 0x46, 0xd4, 0x69, 0x0c, 0x1e, 0xd7, 0x9e, 0xc3, 0xe2, 0x51, 0x5f, 0x51,
590 0xc3, 0xa5, 0x40, 0x55, 0x58, 0xd0, 0xac, 0x2b, 0x45, 0xa8, 0xab, 0xc1, 0x76, 0x50, 0x2f, 0xe0,
591 0x74, 0x89, 0x2a, 0x30, 0x27, 0xa8, 0x90, 0xba, 0xfa, 0xc7, 0x76, 0x50, 0x9f, 0xc3, 0x7e, 0x51,
592 0x7b, 0x01, 0xc5, 0x33, 0xde, 0x63, 0xda, 0xd0, 0x5e, 0xfc, 0xdb, 0xc9, 0xdf, 0x03, 0x40, 0xa7,
593 0x92, 0x86, 0x07, 0x34, 0xa2, 0xa2, 0xcb, 0x30, 0xfb, 0xd8, 0x67, 0xda, 0xa0, 0xb7, 0xb0, 0xc4,
594 0x05, 0x37, 0x9c, 0x46, 0x44, 0xf9, 0x90, 0xa3, 0x2b, 0x35, 0xef, 0x35, 0x32, 0xd5, 0x8d, 0x13,
595 0x0f, 0x19, 0xcd, 0x6f, 0xcd, 0xe0, 0x3f, 0x93, 0xfc, 0x94, 0xf1, 0x25, 0x94, 0xbb, 0x11, 0x67,
596 0xc2, 0x10, 0x6d, 0xa8, 0xf1, 0x2a, 0x4a, 0xcd, 0xb5, 0x3c, 0xdd, 0xa1, 0xdb, 0x6f, 0xdb, 0xed,
597 0xd6, 0x0c, 0x2e, 0x75, 0xb3, 0xe5, 0xc1, 0x3f, 0xb0, 0x1e, 0x49, 0x1a, 0x92, 0x8e, 0x2f, 0x93,
598 0x8a, 0x22, 0xe6, 0x73, 0xcc, 0x6a, 0x7b, 0xb0, 0x3e, 0x51, 0x09, 0x42, 0x30, 0x2b, 0x68, 0x8f,
599 0x39, 0xf9, 0x45, 0xec, 0xbe, 0x6b, 0x5f, 0x67, 0xa1, 0x94, 0x2b, 0x86, 0xf6, 0xa1, 0x68, 0xd2,
600 0x0e, 0x26, 0xe7, 0xfc, 0x3b, 0x2f, 0x6c, 0xd8, 0x5e, 0x9c, 0xe1, 0xd0, 0x03, 0xf8, 0x4b, 0xf4,
601 0x7b, 0xa4, 0x4b, 0xa3, 0x48, 0xdb, 0x33, 0x29, 0xc3, 0x42, 0x77, 0xaa, 0x02, 0x5e, 0x12, 0xfd,
602 0xde, 0xa1, 0x8d, 0xb7, 0x7d, 0x18, 0xed, 0x02, 0xca, 0xb0, 0xe7, 0x5c, 0x70, 0x7d, 0xc9, 0xc2,
603 0x6a, 0xc1, 0x81, 0x97, 0x53, 0xf0, 0x71, 0x12, 0x47, 0x04, 0x1a, 0xa3, 0x68, 0x72, 0xcd, 0xcd,
604 0x25, 0x09, 0x95, 0x8c, 0xc9, 0xb9, 0x54, 0x44, 0x51, 0xc3, 0x48, 0xc4, 0x7b, 0xdc, 0x70, 0x71,
605 0x51, 0x9d, 0x75, 0x4c, 0xf7, 0x6f, 0x33, 0xbd, 0xe7, 0xe6, 0xf2, 0x48, 0xc9, 0xf8, 0x58, 0x2a,
606 0x4c, 0x0d, 0x3b, 0x4d, 0xe0, 0x88, 0xc2, 0xde, 0x9d, 0x05, 0x72, 0xed, 0xb6, 0x15, 0xe6, 0x5c,
607 0x85, 0xfa, 0x94, 0x0a, 0x59, 0xef, 0x6d, 0x89, 0x0f, 0xf0, 0x70, 0x52, 0x89, 0xe4, 0x19, 0x9c,
608 0x53, 0x1e, 0xb1, 0x90, 0x18, 0x49, 0x34, 0x13, 0x61, 0x75, 0xde, 0x15, 0xd8, 0x19, 0x57, 0xc0,
609 0x5f, 0xd5, 0xb1, 0xc3, 0x9f, 0xc9, 0x36, 0x13, 0x21, 0x6a, 0xc1, 0xff, 0x63, 0xe8, 0xaf, 0x84,
610 0xbc, 0x16, 0x44, 0xb1, 0x2e, 0xe3, 0x03, 0x16, 0x56, 0x17, 0x1c, 0xe5, 0xd6, 0x6d, 0xca, 0x37,
611 0x16, 0x85, 0x13, 0x50, 0xed, 0x47, 0x00, 0x2b, 0x37, 0x9e, 0x8d, 0x8e, 0xa5, 0xd0, 0x0c, 0xb5,
612 0x61, 0x39, 0x73, 0x80, 0x8f, 0x25, 0x4f, 0x63, 0xe7, 0x2e, 0x0b, 0x78, 0x74, 0x6b, 0x06, 0x2f,
613 0x0d, 0x3d, 0x90, 0x90, 0x3e, 0x83, 0x92, 0x66, 0x6a, 0xc0, 0x14, 0x89, 0xb8, 0x36, 0x89, 0x07,
614 0x56, 0xf3, 0x7c, 0x6d, 0xb7, 0x7d, 0xca, 0x9d, 0x87, 0x40, 0x0f, 0x57, 0x07, 0x9b, 0xb0, 0x71,
615 0xcb, 0x01, 0x9e, 0xd3, 0x5b, 0xe0, 0x5b, 0x00, 0x1b, 0x93, 0xa5, 0xa0, 0x27, 0xb0, 0x9a, 0x4f,
616 0x56, 0x24, 0x64, 0x11, 0xbb, 0xa0, 0x26, 0xb5, 0x45, 0x25, 0xca, 0x92, 0xd4, 0x51, 0xb2, 0x87,
617 0xde, 0xc1, 0x66, 0xde, 0xb2, 0x44, 0xb1, 0x58, 0x2a, 0x43, 0xb8, 0x30, 0x4c, 0x0d, 0x68, 0x94,
618 0xc8, 0xaf, 0xe4, 0xe5, 0xa7, 0x43, 0x0c, 0xaf, 0xe7, 0xdc, 0x8b, 0x5d, 0xde, 0x49, 0x92, 0x56,
619 0xfb, 0x12, 0x00, 0x64, 0xc7, 0x44, 0xbb, 0x76, 0x62, 0xd9, 0x95, 0x9d, 0x58, 0x85, 0x7a, 0xa9,
620 0x89, 0x46, 0xfb, 0x81, 0x53, 0x08, 0x7a, 0x0d, 0x2b, 0xec, 0x53, 0xcc, 0x7d, 0x95, 0x4c, 0x4a,
621 0x61, 0x8a, 0x14, 0x94, 0x25, 0x0c, 0x35, 0xfc, 0x0c, 0x60, 0xde, 0x53, 0xa3, 0x2d, 0x00, 0x1e,
622 0x13, 0x1a, 0x86, 0x8a, 0x69, 0x3f, 0x34, 0xcb, 0xb8, 0xc8, 0xe3, 0x57, 0x3e, 0x60, 0xe7, 0x87,
623 0x55, 0x9f, 0x4c, 0x4d, 0xf7, 0x6d, 0xed, 0x7c, 0xe3, 0x2e, 0x8c, 0xbc, 0x62, 0xc2, 0x69, 0x28,
624 0xe2, 0xe5, 0x5c, 0x2b, 0xcf, 0x6c, 0x1c, 0xed, 0xc3, 0xea, 0x14, 0xdb, 0x2e, 0xe2, 0x95, 0x70,
625 0x8c, 0x45, 0x9f, 0xc2, 0xda, 0x34, 0x2b, 0x2e, 0xe2, 0x4a, 0x38, 0xc6, 0x76, 0xcd, 0x0e, 0x94,
626 0x73, 0xf7, 0xaf, 0x10, 0x86, 0x52, 0xf2, 0x6d, 0xc3, 0xe8, 0xdf, 0x7c, 0x83, 0x46, 0x87, 0xe5,
627 0xc6, 0x7f, 0x13, 0xf7, 0xfd, 0x43, 0xaa, 0x07, 0x8f, 0x82, 0xce, 0xbc, 0xfb, 0x7d, 0xed, 0xff,
628 0x0a, 0x00, 0x00, 0xff, 0xff, 0x64, 0xbf, 0xda, 0x5e, 0xce, 0x06, 0x00, 0x00,
629}
diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto
deleted file mode 100644
index b13b343..0000000
--- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.proto
+++ /dev/null
@@ -1,164 +0,0 @@
1// Copyright 2016 gRPC authors.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package grpc.lb.v1;
18
19message Duration {
20 // Signed seconds of the span of time. Must be from -315,576,000,000
21 // to +315,576,000,000 inclusive.
22 int64 seconds = 1;
23
24 // Signed fractions of a second at nanosecond resolution of the span
25 // of time. Durations less than one second are represented with a 0
26 // `seconds` field and a positive or negative `nanos` field. For durations
27 // of one second or more, a non-zero value for the `nanos` field must be
28 // of the same sign as the `seconds` field. Must be from -999,999,999
29 // to +999,999,999 inclusive.
30 int32 nanos = 2;
31}
32
33message Timestamp {
34
35 // Represents seconds of UTC time since Unix epoch
36 // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
37 // 9999-12-31T23:59:59Z inclusive.
38 int64 seconds = 1;
39
40 // Non-negative fractions of a second at nanosecond resolution. Negative
41 // second values with fractions must still have non-negative nanos values
42 // that count forward in time. Must be from 0 to 999,999,999
43 // inclusive.
44 int32 nanos = 2;
45}
46
47service LoadBalancer {
48 // Bidirectional rpc to get a list of servers.
49 rpc BalanceLoad(stream LoadBalanceRequest)
50 returns (stream LoadBalanceResponse);
51}
52
53message LoadBalanceRequest {
54 oneof load_balance_request_type {
55 // This message should be sent on the first request to the load balancer.
56 InitialLoadBalanceRequest initial_request = 1;
57
58 // The client stats should be periodically reported to the load balancer
59 // based on the duration defined in the InitialLoadBalanceResponse.
60 ClientStats client_stats = 2;
61 }
62}
63
64message InitialLoadBalanceRequest {
65 // Name of load balanced service (IE, balancer.service.com)
66 // length should be less than 256 bytes.
67 string name = 1;
68}
69
70// Contains client level statistics that are useful to load balancing. Each
71// count except the timestamp should be reset to zero after reporting the stats.
72message ClientStats {
73 // The timestamp of generating the report.
74 Timestamp timestamp = 1;
75
76 // The total number of RPCs that started.
77 int64 num_calls_started = 2;
78
79 // The total number of RPCs that finished.
80 int64 num_calls_finished = 3;
81
82 // The total number of RPCs that were dropped by the client because of rate
83 // limiting.
84 int64 num_calls_finished_with_drop_for_rate_limiting = 4;
85
86 // The total number of RPCs that were dropped by the client because of load
87 // balancing.
88 int64 num_calls_finished_with_drop_for_load_balancing = 5;
89
90 // The total number of RPCs that failed to reach a server except dropped RPCs.
91 int64 num_calls_finished_with_client_failed_to_send = 6;
92
93 // The total number of RPCs that finished and are known to have been received
94 // by a server.
95 int64 num_calls_finished_known_received = 7;
96}
97
98message LoadBalanceResponse {
99 oneof load_balance_response_type {
100 // This message should be sent on the first response to the client.
101 InitialLoadBalanceResponse initial_response = 1;
102
103 // Contains the list of servers selected by the load balancer. The client
104 // should send requests to these servers in the specified order.
105 ServerList server_list = 2;
106 }
107}
108
109message InitialLoadBalanceResponse {
110 // This is an application layer redirect that indicates the client should use
111 // the specified server for load balancing. When this field is non-empty in
112 // the response, the client should open a separate connection to the
113 // load_balancer_delegate and call the BalanceLoad method. Its length should
114 // be less than 64 bytes.
115 string load_balancer_delegate = 1;
116
117 // This interval defines how often the client should send the client stats
118 // to the load balancer. Stats should only be reported when the duration is
119 // positive.
120 Duration client_stats_report_interval = 2;
121}
122
123message ServerList {
124 // Contains a list of servers selected by the load balancer. The list will
125 // be updated when server resolutions change or as needed to balance load
126 // across more servers. The client should consume the server list in order
127 // unless instructed otherwise via the client_config.
128 repeated Server servers = 1;
129
130 // Indicates the amount of time that the client should consider this server
131 // list as valid. It may be considered stale after waiting this interval of
132 // time after receiving the list. If the interval is not positive, the
133 // client can assume the list is valid until the next list is received.
134 Duration expiration_interval = 3;
135}
136
137// Contains server information. When none of the [drop_for_*] fields are true,
138// use the other fields. When drop_for_rate_limiting is true, ignore all other
139// fields. Use drop_for_load_balancing only when it is true and
140// drop_for_rate_limiting is false.
141message Server {
142 // A resolved address for the server, serialized in network-byte-order. It may
143 // either be an IPv4 or IPv6 address.
144 bytes ip_address = 1;
145
146 // A resolved port number for the server.
147 int32 port = 2;
148
149 // An opaque but printable token given to the frontend for each pick. All
150 // frontend requests for that pick must include the token in its initial
151 // metadata. The token is used by the backend to verify the request and to
152 // allow the backend to report load to the gRPC LB system.
153 //
154 // Its length is variable but less than 50 bytes.
155 string load_balance_token = 3;
156
157 // Indicates whether this particular request should be dropped by the client
158 // for rate limiting.
159 bool drop_for_rate_limiting = 4;
160
161 // Indicates whether this particular request should be dropped by the client
162 // for load balancing.
163 bool drop_for_load_balancing = 5;
164}
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 16a7d88..1fabb11 100644
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -105,18 +105,21 @@ func Fatalln(args ...interface{}) {
105} 105}
106 106
107// Print prints to the logger. Arguments are handled in the manner of fmt.Print. 107// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
108//
108// Deprecated: use Info. 109// Deprecated: use Info.
109func Print(args ...interface{}) { 110func Print(args ...interface{}) {
110 logger.Info(args...) 111 logger.Info(args...)
111} 112}
112 113
113// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. 114// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
115//
114// Deprecated: use Infof. 116// Deprecated: use Infof.
115func Printf(format string, args ...interface{}) { 117func Printf(format string, args ...interface{}) {
116 logger.Infof(format, args...) 118 logger.Infof(format, args...)
117} 119}
118 120
119// Println prints to the logger. Arguments are handled in the manner of fmt.Println. 121// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
122//
120// Deprecated: use Infoln. 123// Deprecated: use Infoln.
121func Println(args ...interface{}) { 124func Println(args ...interface{}) {
122 logger.Infoln(args...) 125 logger.Infoln(args...)
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
index d03b239..097494f 100644
--- a/vendor/google.golang.org/grpc/grpclog/logger.go
+++ b/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -19,6 +19,7 @@
19package grpclog 19package grpclog
20 20
21// Logger mimics golang's standard Logger as an interface. 21// Logger mimics golang's standard Logger as an interface.
22//
22// Deprecated: use LoggerV2. 23// Deprecated: use LoggerV2.
23type Logger interface { 24type Logger interface {
24 Fatal(args ...interface{}) 25 Fatal(args ...interface{})
@@ -31,6 +32,7 @@ type Logger interface {
31 32
32// SetLogger sets the logger that is used in grpc. Call only from 33// SetLogger sets the logger that is used in grpc. Call only from
33// init() functions. 34// init() functions.
35//
34// Deprecated: use SetLoggerV2. 36// Deprecated: use SetLoggerV2.
35func SetLogger(l Logger) { 37func SetLogger(l Logger) {
36 logger = &loggerWrapper{Logger: l} 38 logger = &loggerWrapper{Logger: l}
diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go
new file mode 100644
index 0000000..e15f04c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/client.go
@@ -0,0 +1,107 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package health
20
21import (
22 "context"
23 "fmt"
24 "io"
25 "time"
26
27 "google.golang.org/grpc"
28 "google.golang.org/grpc/codes"
29 healthpb "google.golang.org/grpc/health/grpc_health_v1"
30 "google.golang.org/grpc/internal"
31 "google.golang.org/grpc/internal/backoff"
32 "google.golang.org/grpc/status"
33)
34
35const maxDelay = 120 * time.Second
36
37var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay}
38var backoffFunc = func(ctx context.Context, retries int) bool {
39 d := backoffStrategy.Backoff(retries)
40 timer := time.NewTimer(d)
41 select {
42 case <-timer.C:
43 return true
44 case <-ctx.Done():
45 timer.Stop()
46 return false
47 }
48}
49
50func init() {
51 internal.HealthCheckFunc = clientHealthCheck
52}
53
54func clientHealthCheck(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), service string) error {
55 tryCnt := 0
56
57retryConnection:
58 for {
59 // Backs off if the connection has failed in some way without receiving a message in the previous retry.
60 if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) {
61 return nil
62 }
63 tryCnt++
64
65 if ctx.Err() != nil {
66 return nil
67 }
68 rawS, err := newStream()
69 if err != nil {
70 continue retryConnection
71 }
72
73 s, ok := rawS.(grpc.ClientStream)
74 // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
75 if !ok {
76 reportHealth(true)
77 return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
78 }
79
80 if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF {
81 // Stream should have been closed, so we can safely continue to create a new stream.
82 continue retryConnection
83 }
84 s.CloseSend()
85
86 resp := new(healthpb.HealthCheckResponse)
87 for {
88 err = s.RecvMsg(resp)
89
90 // Reports healthy for the LBing purposes if health check is not implemented in the server.
91 if status.Code(err) == codes.Unimplemented {
92 reportHealth(true)
93 return err
94 }
95
96 // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
97 if err != nil {
98 reportHealth(false)
99 continue retryConnection
100 }
101
102 // As a message has been received, removes the need for backoff for the next retry by reseting the try count.
103 tryCnt = 0
104 reportHealth(resp.Status == healthpb.HealthCheckResponse_SERVING)
105 }
106 }
107}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index 89c4d45..c2f2c77 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -1,18 +1,7 @@
1// Code generated by protoc-gen-go. 1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: health.proto 2// source: grpc/health/v1/health.proto
3// DO NOT EDIT!
4 3
5/* 4package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
6Package grpc_health_v1 is a generated protocol buffer package.
7
8It is generated from these files:
9 health.proto
10
11It has these top-level messages:
12 HealthCheckRequest
13 HealthCheckResponse
14*/
15package grpc_health_v1
16 5
17import proto "github.com/golang/protobuf/proto" 6import proto "github.com/golang/protobuf/proto"
18import fmt "fmt" 7import fmt "fmt"
@@ -37,46 +26,107 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
37type HealthCheckResponse_ServingStatus int32 26type HealthCheckResponse_ServingStatus int32
38 27
39const ( 28const (
40 HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 29 HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
41 HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 30 HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
42 HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 31 HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
32 HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3
43) 33)
44 34
45var HealthCheckResponse_ServingStatus_name = map[int32]string{ 35var HealthCheckResponse_ServingStatus_name = map[int32]string{
46 0: "UNKNOWN", 36 0: "UNKNOWN",
47 1: "SERVING", 37 1: "SERVING",
48 2: "NOT_SERVING", 38 2: "NOT_SERVING",
39 3: "SERVICE_UNKNOWN",
49} 40}
50var HealthCheckResponse_ServingStatus_value = map[string]int32{ 41var HealthCheckResponse_ServingStatus_value = map[string]int32{
51 "UNKNOWN": 0, 42 "UNKNOWN": 0,
52 "SERVING": 1, 43 "SERVING": 1,
53 "NOT_SERVING": 2, 44 "NOT_SERVING": 2,
45 "SERVICE_UNKNOWN": 3,
54} 46}
55 47
56func (x HealthCheckResponse_ServingStatus) String() string { 48func (x HealthCheckResponse_ServingStatus) String() string {
57 return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) 49 return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
58} 50}
59func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { 51func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
60 return fileDescriptor0, []int{1, 0} 52 return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0}
61} 53}
62 54
63type HealthCheckRequest struct { 55type HealthCheckRequest struct {
64 Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` 56 Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
57 XXX_NoUnkeyedLiteral struct{} `json:"-"`
58 XXX_unrecognized []byte `json:"-"`
59 XXX_sizecache int32 `json:"-"`
60}
61
62func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
63func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
64func (*HealthCheckRequest) ProtoMessage() {}
65func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
66 return fileDescriptor_health_6b1a06aa67f91efd, []int{0}
67}
68func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
69 return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
70}
71func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
72 return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
73}
74func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) {
75 xxx_messageInfo_HealthCheckRequest.Merge(dst, src)
76}
77func (m *HealthCheckRequest) XXX_Size() int {
78 return xxx_messageInfo_HealthCheckRequest.Size(m)
79}
80func (m *HealthCheckRequest) XXX_DiscardUnknown() {
81 xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m)
65} 82}
66 83
67func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } 84var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo
68func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } 85
69func (*HealthCheckRequest) ProtoMessage() {} 86func (m *HealthCheckRequest) GetService() string {
70func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } 87 if m != nil {
88 return m.Service
89 }
90 return ""
91}
71 92
72type HealthCheckResponse struct { 93type HealthCheckResponse struct {
73 Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` 94 Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
95 XXX_NoUnkeyedLiteral struct{} `json:"-"`
96 XXX_unrecognized []byte `json:"-"`
97 XXX_sizecache int32 `json:"-"`
74} 98}
75 99
76func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } 100func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
77func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } 101func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
78func (*HealthCheckResponse) ProtoMessage() {} 102func (*HealthCheckResponse) ProtoMessage() {}
79func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } 103func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
104 return fileDescriptor_health_6b1a06aa67f91efd, []int{1}
105}
106func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
107 return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
108}
109func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
110 return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
111}
112func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) {
113 xxx_messageInfo_HealthCheckResponse.Merge(dst, src)
114}
115func (m *HealthCheckResponse) XXX_Size() int {
116 return xxx_messageInfo_HealthCheckResponse.Size(m)
117}
118func (m *HealthCheckResponse) XXX_DiscardUnknown() {
119 xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m)
120}
121
122var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo
123
124func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
125 if m != nil {
126 return m.Status
127 }
128 return HealthCheckResponse_UNKNOWN
129}
80 130
81func init() { 131func init() {
82 proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") 132 proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest")
@@ -92,10 +142,29 @@ var _ grpc.ClientConn
92// is compatible with the grpc package it is being compiled against. 142// is compatible with the grpc package it is being compiled against.
93const _ = grpc.SupportPackageIsVersion4 143const _ = grpc.SupportPackageIsVersion4
94 144
95// Client API for Health service 145// HealthClient is the client API for Health service.
96 146//
147// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
97type HealthClient interface { 148type HealthClient interface {
149 // If the requested service is unknown, the call will fail with status
150 // NOT_FOUND.
98 Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) 151 Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
152 // Performs a watch for the serving status of the requested service.
153 // The server will immediately send back a message indicating the current
154 // serving status. It will then subsequently send a new message whenever
155 // the service's serving status changes.
156 //
157 // If the requested service is unknown when the call is received, the
158 // server will send a message setting the serving status to
159 // SERVICE_UNKNOWN but will *not* terminate the call. If at some
160 // future point, the serving status of the service becomes known, the
161 // server will send a new message with the service's serving status.
162 //
163 // If the call terminates with status UNIMPLEMENTED, then clients
164 // should assume this method is not supported and should not retry the
165 // call. If the call terminates with any other status (including OK),
166 // clients should retry the call with appropriate exponential backoff.
167 Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
99} 168}
100 169
101type healthClient struct { 170type healthClient struct {
@@ -108,17 +177,66 @@ func NewHealthClient(cc *grpc.ClientConn) HealthClient {
108 177
109func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { 178func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
110 out := new(HealthCheckResponse) 179 out := new(HealthCheckResponse)
111 err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...) 180 err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...)
112 if err != nil { 181 if err != nil {
113 return nil, err 182 return nil, err
114 } 183 }
115 return out, nil 184 return out, nil
116} 185}
117 186
118// Server API for Health service 187func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
188 stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...)
189 if err != nil {
190 return nil, err
191 }
192 x := &healthWatchClient{stream}
193 if err := x.ClientStream.SendMsg(in); err != nil {
194 return nil, err
195 }
196 if err := x.ClientStream.CloseSend(); err != nil {
197 return nil, err
198 }
199 return x, nil
200}
201
202type Health_WatchClient interface {
203 Recv() (*HealthCheckResponse, error)
204 grpc.ClientStream
205}
206
207type healthWatchClient struct {
208 grpc.ClientStream
209}
119 210
211func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
212 m := new(HealthCheckResponse)
213 if err := x.ClientStream.RecvMsg(m); err != nil {
214 return nil, err
215 }
216 return m, nil
217}
218
219// HealthServer is the server API for Health service.
120type HealthServer interface { 220type HealthServer interface {
221 // If the requested service is unknown, the call will fail with status
222 // NOT_FOUND.
121 Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) 223 Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
224 // Performs a watch for the serving status of the requested service.
225 // The server will immediately send back a message indicating the current
226 // serving status. It will then subsequently send a new message whenever
227 // the service's serving status changes.
228 //
229 // If the requested service is unknown when the call is received, the
230 // server will send a message setting the serving status to
231 // SERVICE_UNKNOWN but will *not* terminate the call. If at some
232 // future point, the serving status of the service becomes known, the
233 // server will send a new message with the service's serving status.
234 //
235 // If the call terminates with status UNIMPLEMENTED, then clients
236 // should assume this method is not supported and should not retry the
237 // call. If the call terminates with any other status (including OK),
238 // clients should retry the call with appropriate exponential backoff.
239 Watch(*HealthCheckRequest, Health_WatchServer) error
122} 240}
123 241
124func RegisterHealthServer(s *grpc.Server, srv HealthServer) { 242func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
@@ -143,6 +261,27 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf
143 return interceptor(ctx, in, info, handler) 261 return interceptor(ctx, in, info, handler)
144} 262}
145 263
264func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
265 m := new(HealthCheckRequest)
266 if err := stream.RecvMsg(m); err != nil {
267 return err
268 }
269 return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
270}
271
272type Health_WatchServer interface {
273 Send(*HealthCheckResponse) error
274 grpc.ServerStream
275}
276
277type healthWatchServer struct {
278 grpc.ServerStream
279}
280
281func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
282 return x.ServerStream.SendMsg(m)
283}
284
146var _Health_serviceDesc = grpc.ServiceDesc{ 285var _Health_serviceDesc = grpc.ServiceDesc{
147 ServiceName: "grpc.health.v1.Health", 286 ServiceName: "grpc.health.v1.Health",
148 HandlerType: (*HealthServer)(nil), 287 HandlerType: (*HealthServer)(nil),
@@ -152,25 +291,37 @@ var _Health_serviceDesc = grpc.ServiceDesc{
152 Handler: _Health_Check_Handler, 291 Handler: _Health_Check_Handler,
153 }, 292 },
154 }, 293 },
155 Streams: []grpc.StreamDesc{}, 294 Streams: []grpc.StreamDesc{
156 Metadata: "health.proto", 295 {
157} 296 StreamName: "Watch",
158 297 Handler: _Health_Watch_Handler,
159func init() { proto.RegisterFile("health.proto", fileDescriptor0) } 298 ServerStreams: true,
160 299 },
161var fileDescriptor0 = []byte{ 300 },
162 // 204 bytes of a gzipped FileDescriptorProto 301 Metadata: "grpc/health/v1/health.proto",
163 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc, 302}
164 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x83, 303
165 0x0a, 0x95, 0x19, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x39, 0xce, 0x19, 0xa9, 0xc9, 0xd9, 0x41, 304func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) }
166 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, 305
167 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0xd2, 0x1c, 0x46, 0x2e, 0x61, 0x14, 306var fileDescriptor_health_6b1a06aa67f91efd = []byte{
168 0x0d, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x9e, 0x5c, 0x6c, 0xc5, 0x25, 0x89, 0x25, 0xa5, 307 // 297 bytes of a gzipped FileDescriptorProto
169 0xc5, 0x60, 0x0d, 0x7c, 0x46, 0x86, 0x7a, 0xa8, 0x16, 0xe9, 0x61, 0xd1, 0xa4, 0x17, 0x0c, 0x32, 308 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
170 0x34, 0x2f, 0x3d, 0x18, 0xac, 0x31, 0x08, 0x6a, 0x80, 0x92, 0x15, 0x17, 0x2f, 0x8a, 0x84, 0x10, 309 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
171 0x37, 0x17, 0x7b, 0xa8, 0x9f, 0xb7, 0x9f, 0x7f, 0xb8, 0x9f, 0x00, 0x03, 0x88, 0x13, 0xec, 0x1a, 310 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
172 0x14, 0xe6, 0xe9, 0xe7, 0x2e, 0xc0, 0x28, 0xc4, 0xcf, 0xc5, 0xed, 0xe7, 0x1f, 0x12, 0x0f, 0x13, 311 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
173 0x60, 0x32, 0x8a, 0xe2, 0x62, 0x83, 0x58, 0x24, 0x14, 0xc0, 0xc5, 0x0a, 0xb6, 0x4c, 0x48, 0x09, 312 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
174 0xaf, 0x4b, 0xc0, 0xfe, 0x95, 0x52, 0x26, 0xc2, 0xb5, 0x49, 0x6c, 0xe0, 0x10, 0x34, 0x06, 0x04, 313 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
175 0x00, 0x00, 0xff, 0xff, 0xac, 0x56, 0x2a, 0xcb, 0x51, 0x01, 0x00, 0x00, 314 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
315 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
316 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
317 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
318 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
319 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
320 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
321 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
322 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
323 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
324 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
325 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
326 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
176} 327}
diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go
deleted file mode 100644
index 4dccbc7..0000000
--- a/vendor/google.golang.org/grpc/health/health.go
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package health provides some utility functions to health-check a server. The implementation
20// is based on protobuf. Users need to write their own implementations if other IDLs are used.
21package health
22
23import (
24 "sync"
25
26 "golang.org/x/net/context"
27 "google.golang.org/grpc"
28 "google.golang.org/grpc/codes"
29 healthpb "google.golang.org/grpc/health/grpc_health_v1"
30)
31
32// Server implements `service Health`.
33type Server struct {
34 mu sync.Mutex
35 // statusMap stores the serving status of the services this Server monitors.
36 statusMap map[string]healthpb.HealthCheckResponse_ServingStatus
37}
38
39// NewServer returns a new Server.
40func NewServer() *Server {
41 return &Server{
42 statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus),
43 }
44}
45
46// Check implements `service Health`.
47func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
48 s.mu.Lock()
49 defer s.mu.Unlock()
50 if in.Service == "" {
51 // check the server overall health status.
52 return &healthpb.HealthCheckResponse{
53 Status: healthpb.HealthCheckResponse_SERVING,
54 }, nil
55 }
56 if status, ok := s.statusMap[in.Service]; ok {
57 return &healthpb.HealthCheckResponse{
58 Status: status,
59 }, nil
60 }
61 return nil, grpc.Errorf(codes.NotFound, "unknown service")
62}
63
64// SetServingStatus is called when need to reset the serving status of a service
65// or insert a new service entry into the statusMap.
66func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
67 s.mu.Lock()
68 s.statusMap[service] = status
69 s.mu.Unlock()
70}
diff --git a/vendor/google.golang.org/grpc/health/regenerate.sh b/vendor/google.golang.org/grpc/health/regenerate.sh
new file mode 100644
index 0000000..b11eccb
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/regenerate.sh
@@ -0,0 +1,33 @@
1#!/bin/bash
2# Copyright 2018 gRPC authors.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16set -eux -o pipefail
17
18TMP=$(mktemp -d)
19
20function finish {
21 rm -rf "$TMP"
22}
23trap finish EXIT
24
25pushd "$TMP"
26mkdir -p grpc/health/v1
27curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto
28
29protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto
30popd
31rm -f grpc_health_v1/*.pb.go
32cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/
33
diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go
new file mode 100644
index 0000000..c79f9d2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/server.go
@@ -0,0 +1,165 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19//go:generate ./regenerate.sh
20
21// Package health provides a service that exposes server's health and it must be
22// imported to enable support for client-side health checks.
23package health
24
25import (
26 "context"
27 "sync"
28
29 "google.golang.org/grpc/codes"
30 "google.golang.org/grpc/grpclog"
31 healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
32 healthpb "google.golang.org/grpc/health/grpc_health_v1"
33 "google.golang.org/grpc/status"
34)
35
36// Server implements `service Health`.
37type Server struct {
38 mu sync.Mutex
39 // If shutdown is true, it's expected all serving status is NOT_SERVING, and
40 // will stay in NOT_SERVING.
41 shutdown bool
42 // statusMap stores the serving status of the services this Server monitors.
43 statusMap map[string]healthpb.HealthCheckResponse_ServingStatus
44 updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus
45}
46
47// NewServer returns a new Server.
48func NewServer() *Server {
49 return &Server{
50 statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING},
51 updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus),
52 }
53}
54
55// Check implements `service Health`.
56func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
57 s.mu.Lock()
58 defer s.mu.Unlock()
59 if servingStatus, ok := s.statusMap[in.Service]; ok {
60 return &healthpb.HealthCheckResponse{
61 Status: servingStatus,
62 }, nil
63 }
64 return nil, status.Error(codes.NotFound, "unknown service")
65}
66
67// Watch implements `service Health`.
68func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error {
69 service := in.Service
70 // update channel is used for getting service status updates.
71 update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1)
72 s.mu.Lock()
73 // Puts the initial status to the channel.
74 if servingStatus, ok := s.statusMap[service]; ok {
75 update <- servingStatus
76 } else {
77 update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN
78 }
79
80 // Registers the update channel to the correct place in the updates map.
81 if _, ok := s.updates[service]; !ok {
82 s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus)
83 }
84 s.updates[service][stream] = update
85 defer func() {
86 s.mu.Lock()
87 delete(s.updates[service], stream)
88 s.mu.Unlock()
89 }()
90 s.mu.Unlock()
91
92 var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1
93 for {
94 select {
95 // Status updated. Sends the up-to-date status to the client.
96 case servingStatus := <-update:
97 if lastSentStatus == servingStatus {
98 continue
99 }
100 lastSentStatus = servingStatus
101 err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus})
102 if err != nil {
103 return status.Error(codes.Canceled, "Stream has ended.")
104 }
105 // Context done. Removes the update channel from the updates map.
106 case <-stream.Context().Done():
107 return status.Error(codes.Canceled, "Stream has ended.")
108 }
109 }
110}
111
112// SetServingStatus is called when need to reset the serving status of a service
113// or insert a new service entry into the statusMap.
114func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) {
115 s.mu.Lock()
116 defer s.mu.Unlock()
117 if s.shutdown {
118 grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus)
119 return
120 }
121
122 s.setServingStatusLocked(service, servingStatus)
123}
124
125func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) {
126 s.statusMap[service] = servingStatus
127 for _, update := range s.updates[service] {
128 // Clears previous updates, that are not sent to the client, from the channel.
129 // This can happen if the client is not reading and the server gets flow control limited.
130 select {
131 case <-update:
132 default:
133 }
134 // Puts the most recent update to the channel.
135 update <- servingStatus
136 }
137}
138
139// Shutdown sets all serving status to NOT_SERVING, and configures the server to
140// ignore all future status changes.
141//
142// This changes serving status for all services. To set status for a perticular
143// services, call SetServingStatus().
144func (s *Server) Shutdown() {
145 s.mu.Lock()
146 defer s.mu.Unlock()
147 s.shutdown = true
148 for service := range s.statusMap {
149 s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING)
150 }
151}
152
153// Resume sets all serving status to SERVING, and configures the server to
154// accept all future status changes.
155//
156// This changes serving status for all services. To set status for a perticular
157// services, call SetServingStatus().
158func (s *Server) Resume() {
159 s.mu.Lock()
160 defer s.mu.Unlock()
161 s.shutdown = false
162 for service := range s.statusMap {
163 s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING)
164 }
165}
diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh
new file mode 100644
index 0000000..7c7bcad
--- /dev/null
+++ b/vendor/google.golang.org/grpc/install_gae.sh
@@ -0,0 +1,6 @@
1#!/bin/bash
2
3TMP=$(mktemp -d /tmp/sdk.XXX) \
4&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \
5&& unzip -q $TMP.zip -d $TMP \
6&& export PATH="$PATH:$TMP/go_appengine"
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
index 06dc825..8b73500 100644
--- a/vendor/google.golang.org/grpc/interceptor.go
+++ b/vendor/google.golang.org/grpc/interceptor.go
@@ -19,7 +19,7 @@
19package grpc 19package grpc
20 20
21import ( 21import (
22 "golang.org/x/net/context" 22 "context"
23) 23)
24 24
25// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. 25// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
@@ -48,7 +48,9 @@ type UnaryServerInfo struct {
48} 48}
49 49
50// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal 50// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
51// execution of a unary RPC. 51// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
52// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
53// the status message of the RPC.
52type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) 54type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
53 55
54// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info 56// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
new file mode 100644
index 0000000..1bd0cce
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -0,0 +1,78 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package backoff implement the backoff strategy for gRPC.
20//
21// This is kept in internal until the gRPC project decides whether or not to
22// allow alternative backoff strategies.
23package backoff
24
25import (
26 "time"
27
28 "google.golang.org/grpc/internal/grpcrand"
29)
30
31// Strategy defines the methodology for backing off after a grpc connection
32// failure.
33//
34type Strategy interface {
35 // Backoff returns the amount of time to wait before the next retry given
36 // the number of consecutive failures.
37 Backoff(retries int) time.Duration
38}
39
40const (
41 // baseDelay is the amount of time to wait before retrying after the first
42 // failure.
43 baseDelay = 1.0 * time.Second
44 // factor is applied to the backoff after each retry.
45 factor = 1.6
46 // jitter provides a range to randomize backoff delays.
47 jitter = 0.2
48)
49
50// Exponential implements exponential backoff algorithm as defined in
51// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
52type Exponential struct {
53 // MaxDelay is the upper bound of backoff delay.
54 MaxDelay time.Duration
55}
56
57// Backoff returns the amount of time to wait before the next retry given the
58// number of retries.
59func (bc Exponential) Backoff(retries int) time.Duration {
60 if retries == 0 {
61 return baseDelay
62 }
63 backoff, max := float64(baseDelay), float64(bc.MaxDelay)
64 for backoff < max && retries > 0 {
65 backoff *= factor
66 retries--
67 }
68 if backoff > max {
69 backoff = max
70 }
71 // Randomize backoff delays so that if a cluster of requests start at
72 // the same time, they won't operate in lockstep.
73 backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
74 if backoff < 0 {
75 return 0
76 }
77 return time.Duration(backoff)
78}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
new file mode 100644
index 0000000..fee6aec
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -0,0 +1,167 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package binarylog implementation binary logging as defined in
20// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
21package binarylog
22
23import (
24 "fmt"
25 "os"
26
27 "google.golang.org/grpc/grpclog"
28)
29
30// Logger is the global binary logger. It can be used to get binary logger for
31// each method.
32type Logger interface {
33 getMethodLogger(methodName string) *MethodLogger
34}
35
36// binLogger is the global binary logger for the binary. One of this should be
37// built at init time from the configuration (environment varialbe or flags).
38//
39// It is used to get a methodLogger for each individual method.
40var binLogger Logger
41
42// SetLogger sets the binarg logger.
43//
44// Only call this at init time.
45func SetLogger(l Logger) {
46 binLogger = l
47}
48
49// GetMethodLogger returns the methodLogger for the given methodName.
50//
51// methodName should be in the format of "/service/method".
52//
53// Each methodLogger returned by this method is a new instance. This is to
54// generate sequence id within the call.
55func GetMethodLogger(methodName string) *MethodLogger {
56 if binLogger == nil {
57 return nil
58 }
59 return binLogger.getMethodLogger(methodName)
60}
61
62func init() {
63 const envStr = "GRPC_BINARY_LOG_FILTER"
64 configStr := os.Getenv(envStr)
65 binLogger = NewLoggerFromConfigString(configStr)
66}
67
68type methodLoggerConfig struct {
69 // Max length of header and message.
70 hdr, msg uint64
71}
72
73type logger struct {
74 all *methodLoggerConfig
75 services map[string]*methodLoggerConfig
76 methods map[string]*methodLoggerConfig
77
78 blacklist map[string]struct{}
79}
80
81// newEmptyLogger creates an empty logger. The map fields need to be filled in
82// using the set* functions.
83func newEmptyLogger() *logger {
84 return &logger{}
85}
86
87// Set method logger for "*".
88func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
89 if l.all != nil {
90 return fmt.Errorf("conflicting global rules found")
91 }
92 l.all = ml
93 return nil
94}
95
96// Set method logger for "service/*".
97//
98// New methodLogger with same service overrides the old one.
99func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
100 if _, ok := l.services[service]; ok {
101 return fmt.Errorf("conflicting rules for service %v found", service)
102 }
103 if l.services == nil {
104 l.services = make(map[string]*methodLoggerConfig)
105 }
106 l.services[service] = ml
107 return nil
108}
109
110// Set method logger for "service/method".
111//
112// New methodLogger with same method overrides the old one.
113func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
114 if _, ok := l.blacklist[method]; ok {
115 return fmt.Errorf("conflicting rules for method %v found", method)
116 }
117 if _, ok := l.methods[method]; ok {
118 return fmt.Errorf("conflicting rules for method %v found", method)
119 }
120 if l.methods == nil {
121 l.methods = make(map[string]*methodLoggerConfig)
122 }
123 l.methods[method] = ml
124 return nil
125}
126
127// Set blacklist method for "-service/method".
128func (l *logger) setBlacklist(method string) error {
129 if _, ok := l.blacklist[method]; ok {
130 return fmt.Errorf("conflicting rules for method %v found", method)
131 }
132 if _, ok := l.methods[method]; ok {
133 return fmt.Errorf("conflicting rules for method %v found", method)
134 }
135 if l.blacklist == nil {
136 l.blacklist = make(map[string]struct{})
137 }
138 l.blacklist[method] = struct{}{}
139 return nil
140}
141
142// getMethodLogger returns the methodLogger for the given methodName.
143//
144// methodName should be in the format of "/service/method".
145//
146// Each methodLogger returned by this method is a new instance. This is to
147// generate sequence id within the call.
148func (l *logger) getMethodLogger(methodName string) *MethodLogger {
149 s, m, err := parseMethodName(methodName)
150 if err != nil {
151 grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
152 return nil
153 }
154 if ml, ok := l.methods[s+"/"+m]; ok {
155 return newMethodLogger(ml.hdr, ml.msg)
156 }
157 if _, ok := l.blacklist[s+"/"+m]; ok {
158 return nil
159 }
160 if ml, ok := l.services[s]; ok {
161 return newMethodLogger(ml.hdr, ml.msg)
162 }
163 if l.all == nil {
164 return nil
165 }
166 return newMethodLogger(l.all.hdr, l.all.msg)
167}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
new file mode 100644
index 0000000..1ee00a3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
@@ -0,0 +1,42 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// This file contains exported variables/functions that are exported for testing
20// only.
21//
22// An ideal way for this would be to put those in a *_test.go but in binarylog
23// package. But this doesn't work with staticcheck with go module. Error was:
24// "MdToMetadataProto not declared by package binarylog". This could be caused
25// by the way staticcheck looks for files for a certain package, which doesn't
26// support *_test.go files.
27//
28// Move those to binary_test.go when staticcheck is fixed.
29
30package binarylog
31
32var (
33 // AllLogger is a logger that logs all headers/messages for all RPCs. It's
34 // for testing only.
35 AllLogger = NewLoggerFromConfigString("*")
36 // MdToMetadataProto converts metadata to a binary logging proto message.
37 // It's for testing only.
38 MdToMetadataProto = mdToMetadataProto
39 // AddrToProto converts an address to a binary logging proto message. It's
40 // for testing only.
41 AddrToProto = addrToProto
42)
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
new file mode 100644
index 0000000..eb188ea
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -0,0 +1,210 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package binarylog
20
21import (
22 "errors"
23 "fmt"
24 "regexp"
25 "strconv"
26 "strings"
27
28 "google.golang.org/grpc/grpclog"
29)
30
31// NewLoggerFromConfigString reads the string and build a logger. It can be used
32// to build a new logger and assign it to binarylog.Logger.
33//
34// Example filter config strings:
35// - "" Nothing will be logged
36// - "*" All headers and messages will be fully logged.
37// - "*{h}" Only headers will be logged.
38// - "*{m:256}" Only the first 256 bytes of each message will be logged.
39// - "Foo/*" Logs every method in service Foo
40// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
41// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
42// /Foo/Bar, logs all headers and messages in every other method in service
43// Foo.
44//
45// If two configs exist for one certain method or service, the one specified
46// later overrides the privous config.
47func NewLoggerFromConfigString(s string) Logger {
48 if s == "" {
49 return nil
50 }
51 l := newEmptyLogger()
52 methods := strings.Split(s, ",")
53 for _, method := range methods {
54 if err := l.fillMethodLoggerWithConfigString(method); err != nil {
55 grpclog.Warningf("failed to parse binary log config: %v", err)
56 return nil
57 }
58 }
59 return l
60}
61
62// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
63// it to the right map in the logger.
64func (l *logger) fillMethodLoggerWithConfigString(config string) error {
65 // "" is invalid.
66 if config == "" {
67 return errors.New("empty string is not a valid method binary logging config")
68 }
69
70 // "-service/method", blacklist, no * or {} allowed.
71 if config[0] == '-' {
72 s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
73 if err != nil {
74 return fmt.Errorf("invalid config: %q, %v", config, err)
75 }
76 if m == "*" {
77 return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
78 }
79 if suffix != "" {
80 return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
81 }
82 if err := l.setBlacklist(s + "/" + m); err != nil {
83 return fmt.Errorf("invalid config: %v", err)
84 }
85 return nil
86 }
87
88 // "*{h:256;m:256}"
89 if config[0] == '*' {
90 hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
91 if err != nil {
92 return fmt.Errorf("invalid config: %q, %v", config, err)
93 }
94 if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
95 return fmt.Errorf("invalid config: %v", err)
96 }
97 return nil
98 }
99
100 s, m, suffix, err := parseMethodConfigAndSuffix(config)
101 if err != nil {
102 return fmt.Errorf("invalid config: %q, %v", config, err)
103 }
104 hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
105 if err != nil {
106 return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
107 }
108 if m == "*" {
109 if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
110 return fmt.Errorf("invalid config: %v", err)
111 }
112 } else {
113 if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
114 return fmt.Errorf("invalid config: %v", err)
115 }
116 }
117 return nil
118}
119
120const (
121 // TODO: this const is only used by env_config now. But could be useful for
122 // other config. Move to binarylog.go if necessary.
123 maxUInt = ^uint64(0)
124
125 // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
126 // expected output.
127 longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
128
129 // For suffix from above, "{h:123,m:123}". See test for expected output.
130 optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123".
131 headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$`
132 messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$`
133 headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
134)
135
136var (
137 longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr)
138 headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr)
139 messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr)
140 headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
141)
142
143// Turn "service/method{h;m}" into "service", "method", "{h;m}".
144func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
145 // Regexp result:
146 //
147 // in: "p.s/m{h:123,m:123}",
148 // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
149 match := longMethodConfigRegexp.FindStringSubmatch(c)
150 if match == nil {
151 return "", "", "", fmt.Errorf("%q contains invalid substring", c)
152 }
153 service = match[1]
154 method = match[2]
155 suffix = match[3]
156 return
157}
158
159// Turn "{h:123;m:345}" into 123, 345.
160//
161// Return maxUInt if length is unspecified.
162func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
163 if c == "" {
164 return maxUInt, maxUInt, nil
165 }
166 // Header config only.
167 if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
168 if s := match[1]; s != "" {
169 hdrLenStr, err = strconv.ParseUint(s, 10, 64)
170 if err != nil {
171 return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
172 }
173 return hdrLenStr, 0, nil
174 }
175 return maxUInt, 0, nil
176 }
177
178 // Message config only.
179 if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
180 if s := match[1]; s != "" {
181 msgLenStr, err = strconv.ParseUint(s, 10, 64)
182 if err != nil {
183 return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
184 }
185 return 0, msgLenStr, nil
186 }
187 return 0, maxUInt, nil
188 }
189
190 // Header and message config both.
191 if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
192 // Both hdr and msg are specified, but one or two of them might be empty.
193 hdrLenStr = maxUInt
194 msgLenStr = maxUInt
195 if s := match[1]; s != "" {
196 hdrLenStr, err = strconv.ParseUint(s, 10, 64)
197 if err != nil {
198 return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
199 }
200 }
201 if s := match[2]; s != "" {
202 msgLenStr, err = strconv.ParseUint(s, 10, 64)
203 if err != nil {
204 return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
205 }
206 }
207 return hdrLenStr, msgLenStr, nil
208 }
209 return 0, 0, fmt.Errorf("%q contains invalid substring", c)
210}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
new file mode 100644
index 0000000..b06cdd4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -0,0 +1,426 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package binarylog
20
21import (
22 "net"
23 "strings"
24 "sync/atomic"
25 "time"
26
27 "github.com/golang/protobuf/proto"
28 "github.com/golang/protobuf/ptypes"
29 pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
30 "google.golang.org/grpc/grpclog"
31 "google.golang.org/grpc/metadata"
32 "google.golang.org/grpc/status"
33)
34
35type callIDGenerator struct {
36 id uint64
37}
38
39func (g *callIDGenerator) next() uint64 {
40 id := atomic.AddUint64(&g.id, 1)
41 return id
42}
43
44// reset is for testing only, and doesn't need to be thread safe.
45func (g *callIDGenerator) reset() {
46 g.id = 0
47}
48
49var idGen callIDGenerator
50
51// MethodLogger is the sub-logger for each method.
52type MethodLogger struct {
53 headerMaxLen, messageMaxLen uint64
54
55 callID uint64
56 idWithinCallGen *callIDGenerator
57
58 sink Sink // TODO(blog): make this plugable.
59}
60
61func newMethodLogger(h, m uint64) *MethodLogger {
62 return &MethodLogger{
63 headerMaxLen: h,
64 messageMaxLen: m,
65
66 callID: idGen.next(),
67 idWithinCallGen: &callIDGenerator{},
68
69 sink: defaultSink, // TODO(blog): make it plugable.
70 }
71}
72
73// Log creates a proto binary log entry, and logs it to the sink.
74func (ml *MethodLogger) Log(c LogEntryConfig) {
75 m := c.toProto()
76 timestamp, _ := ptypes.TimestampProto(time.Now())
77 m.Timestamp = timestamp
78 m.CallId = ml.callID
79 m.SequenceIdWithinCall = ml.idWithinCallGen.next()
80
81 switch pay := m.Payload.(type) {
82 case *pb.GrpcLogEntry_ClientHeader:
83 m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
84 case *pb.GrpcLogEntry_ServerHeader:
85 m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
86 case *pb.GrpcLogEntry_Message:
87 m.PayloadTruncated = ml.truncateMessage(pay.Message)
88 }
89
90 ml.sink.Write(m)
91}
92
93func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
94 if ml.headerMaxLen == maxUInt {
95 return false
96 }
97 var (
98 bytesLimit = ml.headerMaxLen
99 index int
100 )
101 // At the end of the loop, index will be the first entry where the total
102 // size is greater than the limit:
103 //
104 // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
105 for ; index < len(mdPb.Entry); index++ {
106 entry := mdPb.Entry[index]
107 if entry.Key == "grpc-trace-bin" {
108 // "grpc-trace-bin" is a special key. It's kept in the log entry,
109 // but not counted towards the size limit.
110 continue
111 }
112 currentEntryLen := uint64(len(entry.Value))
113 if currentEntryLen > bytesLimit {
114 break
115 }
116 bytesLimit -= currentEntryLen
117 }
118 truncated = index < len(mdPb.Entry)
119 mdPb.Entry = mdPb.Entry[:index]
120 return truncated
121}
122
123func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
124 if ml.messageMaxLen == maxUInt {
125 return false
126 }
127 if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
128 return false
129 }
130 msgPb.Data = msgPb.Data[:ml.messageMaxLen]
131 return true
132}
133
134// LogEntryConfig represents the configuration for binary log entry.
135type LogEntryConfig interface {
136 toProto() *pb.GrpcLogEntry
137}
138
139// ClientHeader configs the binary log entry to be a ClientHeader entry.
140type ClientHeader struct {
141 OnClientSide bool
142 Header metadata.MD
143 MethodName string
144 Authority string
145 Timeout time.Duration
146 // PeerAddr is required only when it's on server side.
147 PeerAddr net.Addr
148}
149
150func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
151 // This function doesn't need to set all the fields (e.g. seq ID). The Log
152 // function will set the fields when necessary.
153 clientHeader := &pb.ClientHeader{
154 Metadata: mdToMetadataProto(c.Header),
155 MethodName: c.MethodName,
156 Authority: c.Authority,
157 }
158 if c.Timeout > 0 {
159 clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
160 }
161 ret := &pb.GrpcLogEntry{
162 Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
163 Payload: &pb.GrpcLogEntry_ClientHeader{
164 ClientHeader: clientHeader,
165 },
166 }
167 if c.OnClientSide {
168 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
169 } else {
170 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
171 }
172 if c.PeerAddr != nil {
173 ret.Peer = addrToProto(c.PeerAddr)
174 }
175 return ret
176}
177
178// ServerHeader configs the binary log entry to be a ServerHeader entry.
179type ServerHeader struct {
180 OnClientSide bool
181 Header metadata.MD
182 // PeerAddr is required only when it's on client side.
183 PeerAddr net.Addr
184}
185
186func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
187 ret := &pb.GrpcLogEntry{
188 Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
189 Payload: &pb.GrpcLogEntry_ServerHeader{
190 ServerHeader: &pb.ServerHeader{
191 Metadata: mdToMetadataProto(c.Header),
192 },
193 },
194 }
195 if c.OnClientSide {
196 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
197 } else {
198 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
199 }
200 if c.PeerAddr != nil {
201 ret.Peer = addrToProto(c.PeerAddr)
202 }
203 return ret
204}
205
206// ClientMessage configs the binary log entry to be a ClientMessage entry.
207type ClientMessage struct {
208 OnClientSide bool
209 // Message can be a proto.Message or []byte. Other messages formats are not
210 // supported.
211 Message interface{}
212}
213
214func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
215 var (
216 data []byte
217 err error
218 )
219 if m, ok := c.Message.(proto.Message); ok {
220 data, err = proto.Marshal(m)
221 if err != nil {
222 grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
223 }
224 } else if b, ok := c.Message.([]byte); ok {
225 data = b
226 } else {
227 grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
228 }
229 ret := &pb.GrpcLogEntry{
230 Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
231 Payload: &pb.GrpcLogEntry_Message{
232 Message: &pb.Message{
233 Length: uint32(len(data)),
234 Data: data,
235 },
236 },
237 }
238 if c.OnClientSide {
239 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
240 } else {
241 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
242 }
243 return ret
244}
245
246// ServerMessage configs the binary log entry to be a ServerMessage entry.
247type ServerMessage struct {
248 OnClientSide bool
249 // Message can be a proto.Message or []byte. Other messages formats are not
250 // supported.
251 Message interface{}
252}
253
254func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
255 var (
256 data []byte
257 err error
258 )
259 if m, ok := c.Message.(proto.Message); ok {
260 data, err = proto.Marshal(m)
261 if err != nil {
262 grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
263 }
264 } else if b, ok := c.Message.([]byte); ok {
265 data = b
266 } else {
267 grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
268 }
269 ret := &pb.GrpcLogEntry{
270 Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
271 Payload: &pb.GrpcLogEntry_Message{
272 Message: &pb.Message{
273 Length: uint32(len(data)),
274 Data: data,
275 },
276 },
277 }
278 if c.OnClientSide {
279 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
280 } else {
281 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
282 }
283 return ret
284}
285
286// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
287type ClientHalfClose struct {
288 OnClientSide bool
289}
290
291func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
292 ret := &pb.GrpcLogEntry{
293 Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
294 Payload: nil, // No payload here.
295 }
296 if c.OnClientSide {
297 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
298 } else {
299 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
300 }
301 return ret
302}
303
304// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
305type ServerTrailer struct {
306 OnClientSide bool
307 Trailer metadata.MD
308 // Err is the status error.
309 Err error
310 // PeerAddr is required only when it's on client side and the RPC is trailer
311 // only.
312 PeerAddr net.Addr
313}
314
315func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
316 st, ok := status.FromError(c.Err)
317 if !ok {
318 grpclog.Info("binarylogging: error in trailer is not a status error")
319 }
320 var (
321 detailsBytes []byte
322 err error
323 )
324 stProto := st.Proto()
325 if stProto != nil && len(stProto.Details) != 0 {
326 detailsBytes, err = proto.Marshal(stProto)
327 if err != nil {
328 grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
329 }
330 }
331 ret := &pb.GrpcLogEntry{
332 Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
333 Payload: &pb.GrpcLogEntry_Trailer{
334 Trailer: &pb.Trailer{
335 Metadata: mdToMetadataProto(c.Trailer),
336 StatusCode: uint32(st.Code()),
337 StatusMessage: st.Message(),
338 StatusDetails: detailsBytes,
339 },
340 },
341 }
342 if c.OnClientSide {
343 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
344 } else {
345 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
346 }
347 if c.PeerAddr != nil {
348 ret.Peer = addrToProto(c.PeerAddr)
349 }
350 return ret
351}
352
353// Cancel configs the binary log entry to be a Cancel entry.
354type Cancel struct {
355 OnClientSide bool
356}
357
358func (c *Cancel) toProto() *pb.GrpcLogEntry {
359 ret := &pb.GrpcLogEntry{
360 Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
361 Payload: nil,
362 }
363 if c.OnClientSide {
364 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
365 } else {
366 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
367 }
368 return ret
369}
370
371// metadataKeyOmit returns whether the metadata entry with this key should be
372// omitted.
373func metadataKeyOmit(key string) bool {
374 switch key {
375 case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
376 return true
377 case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
378 return false
379 }
380 if strings.HasPrefix(key, "grpc-") {
381 return true
382 }
383 return false
384}
385
386func mdToMetadataProto(md metadata.MD) *pb.Metadata {
387 ret := &pb.Metadata{}
388 for k, vv := range md {
389 if metadataKeyOmit(k) {
390 continue
391 }
392 for _, v := range vv {
393 ret.Entry = append(ret.Entry,
394 &pb.MetadataEntry{
395 Key: k,
396 Value: []byte(v),
397 },
398 )
399 }
400 }
401 return ret
402}
403
404func addrToProto(addr net.Addr) *pb.Address {
405 ret := &pb.Address{}
406 switch a := addr.(type) {
407 case *net.TCPAddr:
408 if a.IP.To4() != nil {
409 ret.Type = pb.Address_TYPE_IPV4
410 } else if a.IP.To16() != nil {
411 ret.Type = pb.Address_TYPE_IPV6
412 } else {
413 ret.Type = pb.Address_TYPE_UNKNOWN
414 // Do not set address and port fields.
415 break
416 }
417 ret.Address = a.IP.String()
418 ret.IpPort = uint32(a.Port)
419 case *net.UnixAddr:
420 ret.Type = pb.Address_TYPE_UNIX
421 ret.Address = a.String()
422 default:
423 ret.Type = pb.Address_TYPE_UNKNOWN
424 }
425 return ret
426}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
new file mode 100644
index 0000000..113d40c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
@@ -0,0 +1,33 @@
1#!/bin/bash
2# Copyright 2018 gRPC authors.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16set -eux -o pipefail
17
18TMP=$(mktemp -d)
19
20function finish {
21 rm -rf "$TMP"
22}
23trap finish EXIT
24
25pushd "$TMP"
26mkdir -p grpc/binarylog/grpc_binarylog_v1
27curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
28
29protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
30popd
31rm -f ./grpc_binarylog_v1/*.pb.go
32cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/
33
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
new file mode 100644
index 0000000..20d044f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -0,0 +1,162 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package binarylog
20
21import (
22 "bufio"
23 "encoding/binary"
24 "fmt"
25 "io"
26 "io/ioutil"
27 "sync"
28 "time"
29
30 "github.com/golang/protobuf/proto"
31 pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
32 "google.golang.org/grpc/grpclog"
33)
34
35var (
36 defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
37)
38
39// SetDefaultSink sets the sink where binary logs will be written to.
40//
41// Not thread safe. Only set during initialization.
42func SetDefaultSink(s Sink) {
43 if defaultSink != nil {
44 defaultSink.Close()
45 }
46 defaultSink = s
47}
48
49// Sink writes log entry into the binary log sink.
50type Sink interface {
51 // Write will be called to write the log entry into the sink.
52 //
53 // It should be thread-safe so it can be called in parallel.
54 Write(*pb.GrpcLogEntry) error
55 // Close will be called when the Sink is replaced by a new Sink.
56 Close() error
57}
58
59type noopSink struct{}
60
61func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
62func (ns *noopSink) Close() error { return nil }
63
64// newWriterSink creates a binary log sink with the given writer.
65//
66// Write() marshalls the proto message and writes it to the given writer. Each
67// message is prefixed with a 4 byte big endian unsigned integer as the length.
68//
69// No buffer is done, Close() doesn't try to close the writer.
70func newWriterSink(w io.Writer) *writerSink {
71 return &writerSink{out: w}
72}
73
74type writerSink struct {
75 out io.Writer
76}
77
78func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
79 b, err := proto.Marshal(e)
80 if err != nil {
81 grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
82 }
83 hdr := make([]byte, 4)
84 binary.BigEndian.PutUint32(hdr, uint32(len(b)))
85 if _, err := ws.out.Write(hdr); err != nil {
86 return err
87 }
88 if _, err := ws.out.Write(b); err != nil {
89 return err
90 }
91 return nil
92}
93
94func (ws *writerSink) Close() error { return nil }
95
96type bufWriteCloserSink struct {
97 mu sync.Mutex
98 closer io.Closer
99 out *writerSink // out is built on buf.
100 buf *bufio.Writer // buf is kept for flush.
101
102 writeStartOnce sync.Once
103 writeTicker *time.Ticker
104}
105
106func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
107 // Start the write loop when Write is called.
108 fs.writeStartOnce.Do(fs.startFlushGoroutine)
109 fs.mu.Lock()
110 if err := fs.out.Write(e); err != nil {
111 fs.mu.Unlock()
112 return err
113 }
114 fs.mu.Unlock()
115 return nil
116}
117
118const (
119 bufFlushDuration = 60 * time.Second
120)
121
122func (fs *bufWriteCloserSink) startFlushGoroutine() {
123 fs.writeTicker = time.NewTicker(bufFlushDuration)
124 go func() {
125 for range fs.writeTicker.C {
126 fs.mu.Lock()
127 fs.buf.Flush()
128 fs.mu.Unlock()
129 }
130 }()
131}
132
133func (fs *bufWriteCloserSink) Close() error {
134 if fs.writeTicker != nil {
135 fs.writeTicker.Stop()
136 }
137 fs.mu.Lock()
138 fs.buf.Flush()
139 fs.closer.Close()
140 fs.out.Close()
141 fs.mu.Unlock()
142 return nil
143}
144
145func newBufWriteCloserSink(o io.WriteCloser) Sink {
146 bufW := bufio.NewWriter(o)
147 return &bufWriteCloserSink{
148 closer: o,
149 out: newWriterSink(bufW),
150 buf: bufW,
151 }
152}
153
154// NewTempFileSink creates a temp file and returns a Sink that writes to this
155// file.
156func NewTempFileSink() (Sink, error) {
157 tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
158 if err != nil {
159 return nil, fmt.Errorf("failed to create temp file: %v", err)
160 }
161 return newBufWriteCloserSink(tempFile), nil
162}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go
new file mode 100644
index 0000000..15dc780
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/util.go
@@ -0,0 +1,41 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package binarylog
20
21import (
22 "errors"
23 "strings"
24)
25
26// parseMethodName splits service and method from the input. It expects format
27// "/service/method".
28//
29// TODO: move to internal/grpcutil.
30func parseMethodName(methodName string) (service, method string, _ error) {
31 if !strings.HasPrefix(methodName, "/") {
32 return "", "", errors.New("invalid method name: should start with /")
33 }
34 methodName = methodName[1:]
35
36 pos := strings.LastIndex(methodName, "/")
37 if pos < 0 {
38 return "", "", errors.New("invalid method name: suffix /method is missing")
39 }
40 return methodName[:pos], methodName[pos+1:], nil
41}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
new file mode 100644
index 0000000..041520d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -0,0 +1,699 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package channelz defines APIs for enabling channelz service, entry
20// registration/deletion, and accessing channelz data. It also defines channelz
21// metric struct formats.
22//
23// All APIs in this package are experimental.
24package channelz
25
26import (
27 "sort"
28 "sync"
29 "sync/atomic"
30 "time"
31
32 "google.golang.org/grpc/grpclog"
33)
34
35const (
36 defaultMaxTraceEntry int32 = 30
37)
38
39var (
40 db dbWrapper
41 idGen idGenerator
42 // EntryPerPage defines the number of channelz entries to be shown on a web page.
43 EntryPerPage = int64(50)
44 curState int32
45 maxTraceEntry = defaultMaxTraceEntry
46)
47
48// TurnOn turns on channelz data collection.
49func TurnOn() {
50 if !IsOn() {
51 NewChannelzStorage()
52 atomic.StoreInt32(&curState, 1)
53 }
54}
55
56// IsOn returns whether channelz data collection is on.
57func IsOn() bool {
58 return atomic.CompareAndSwapInt32(&curState, 1, 1)
59}
60
61// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
62// Setting it to 0 will disable channel tracing.
63func SetMaxTraceEntry(i int32) {
64 atomic.StoreInt32(&maxTraceEntry, i)
65}
66
67// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
68func ResetMaxTraceEntryToDefault() {
69 atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
70}
71
72func getMaxTraceEntry() int {
73 i := atomic.LoadInt32(&maxTraceEntry)
74 return int(i)
75}
76
77// dbWarpper wraps around a reference to internal channelz data storage, and
78// provide synchronized functionality to set and get the reference.
79type dbWrapper struct {
80 mu sync.RWMutex
81 DB *channelMap
82}
83
84func (d *dbWrapper) set(db *channelMap) {
85 d.mu.Lock()
86 d.DB = db
87 d.mu.Unlock()
88}
89
90func (d *dbWrapper) get() *channelMap {
91 d.mu.RLock()
92 defer d.mu.RUnlock()
93 return d.DB
94}
95
96// NewChannelzStorage initializes channelz data storage and id generator.
97//
98// Note: This function is exported for testing purpose only. User should not call
99// it in most cases.
100func NewChannelzStorage() {
101 db.set(&channelMap{
102 topLevelChannels: make(map[int64]struct{}),
103 channels: make(map[int64]*channel),
104 listenSockets: make(map[int64]*listenSocket),
105 normalSockets: make(map[int64]*normalSocket),
106 servers: make(map[int64]*server),
107 subChannels: make(map[int64]*subChannel),
108 })
109 idGen.reset()
110}
111
112// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
113// boolean indicating whether there's more top channels to be queried for.
114//
115// The arg id specifies that only top channel with id at or above it will be included
116// in the result. The returned slice is up to a length of the arg maxResults or
117// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
118func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
119 return db.get().GetTopChannels(id, maxResults)
120}
121
122// GetServers returns a slice of server's ServerMetric, along with a
123// boolean indicating whether there's more servers to be queried for.
124//
125// The arg id specifies that only server with id at or above it will be included
126// in the result. The returned slice is up to a length of the arg maxResults or
127// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
128func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
129 return db.get().GetServers(id, maxResults)
130}
131
132// GetServerSockets returns a slice of server's (identified by id) normal socket's
133// SocketMetric, along with a boolean indicating whether there's more sockets to
134// be queried for.
135//
136// The arg startID specifies that only sockets with id at or above it will be
137// included in the result. The returned slice is up to a length of the arg maxResults
138// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
139func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
140 return db.get().GetServerSockets(id, startID, maxResults)
141}
142
143// GetChannel returns the ChannelMetric for the channel (identified by id).
144func GetChannel(id int64) *ChannelMetric {
145 return db.get().GetChannel(id)
146}
147
148// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
149func GetSubChannel(id int64) *SubChannelMetric {
150 return db.get().GetSubChannel(id)
151}
152
153// GetSocket returns the SocketInternalMetric for the socket (identified by id).
154func GetSocket(id int64) *SocketMetric {
155 return db.get().GetSocket(id)
156}
157
158// GetServer returns the ServerMetric for the server (identified by id).
159func GetServer(id int64) *ServerMetric {
160 return db.get().GetServer(id)
161}
162
163// RegisterChannel registers the given channel c in channelz database with ref
164// as its reference name, and add it to the child list of its parent (identified
165// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
166// assigned to this channel.
167func RegisterChannel(c Channel, pid int64, ref string) int64 {
168 id := idGen.genID()
169 cn := &channel{
170 refName: ref,
171 c: c,
172 subChans: make(map[int64]string),
173 nestedChans: make(map[int64]string),
174 id: id,
175 pid: pid,
176 trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
177 }
178 if pid == 0 {
179 db.get().addChannel(id, cn, true, pid, ref)
180 } else {
181 db.get().addChannel(id, cn, false, pid, ref)
182 }
183 return id
184}
185
186// RegisterSubChannel registers the given channel c in channelz database with ref
187// as its reference name, and add it to the child list of its parent (identified
188// by pid). It returns the unique channelz tracking id assigned to this subchannel.
189func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
190 if pid == 0 {
191 grpclog.Error("a SubChannel's parent id cannot be 0")
192 return 0
193 }
194 id := idGen.genID()
195 sc := &subChannel{
196 refName: ref,
197 c: c,
198 sockets: make(map[int64]string),
199 id: id,
200 pid: pid,
201 trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
202 }
203 db.get().addSubChannel(id, sc, pid, ref)
204 return id
205}
206
207// RegisterServer registers the given server s in channelz database. It returns
208// the unique channelz tracking id assigned to this server.
209func RegisterServer(s Server, ref string) int64 {
210 id := idGen.genID()
211 svr := &server{
212 refName: ref,
213 s: s,
214 sockets: make(map[int64]string),
215 listenSockets: make(map[int64]string),
216 id: id,
217 }
218 db.get().addServer(id, svr)
219 return id
220}
221
222// RegisterListenSocket registers the given listen socket s in channelz database
223// with ref as its reference name, and add it to the child list of its parent
224// (identified by pid). It returns the unique channelz tracking id assigned to
225// this listen socket.
226func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
227 if pid == 0 {
228 grpclog.Error("a ListenSocket's parent id cannot be 0")
229 return 0
230 }
231 id := idGen.genID()
232 ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
233 db.get().addListenSocket(id, ls, pid, ref)
234 return id
235}
236
237// RegisterNormalSocket registers the given normal socket s in channelz database
238// with ref as its reference name, and add it to the child list of its parent
239// (identified by pid). It returns the unique channelz tracking id assigned to
240// this normal socket.
241func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
242 if pid == 0 {
243 grpclog.Error("a NormalSocket's parent id cannot be 0")
244 return 0
245 }
246 id := idGen.genID()
247 ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
248 db.get().addNormalSocket(id, ns, pid, ref)
249 return id
250}
251
252// RemoveEntry removes an entry with unique channelz trakcing id to be id from
253// channelz database.
254func RemoveEntry(id int64) {
255 db.get().removeEntry(id)
256}
257
258// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
259// to the channel trace.
260// The Parent field is optional. It is used for event that will be recorded in the entity's parent
261// trace also.
262type TraceEventDesc struct {
263 Desc string
264 Severity Severity
265 Parent *TraceEventDesc
266}
267
268// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
269func AddTraceEvent(id int64, desc *TraceEventDesc) {
270 if getMaxTraceEntry() == 0 {
271 return
272 }
273 db.get().traceEvent(id, desc)
274}
275
276// channelMap is the storage data structure for channelz.
277// Methods of channelMap can be divided in two two categories with respect to locking.
278// 1. Methods acquire the global lock.
279// 2. Methods that can only be called when global lock is held.
280// A second type of method need always to be called inside a first type of method.
281type channelMap struct {
282 mu sync.RWMutex
283 topLevelChannels map[int64]struct{}
284 servers map[int64]*server
285 channels map[int64]*channel
286 subChannels map[int64]*subChannel
287 listenSockets map[int64]*listenSocket
288 normalSockets map[int64]*normalSocket
289}
290
291func (c *channelMap) addServer(id int64, s *server) {
292 c.mu.Lock()
293 s.cm = c
294 c.servers[id] = s
295 c.mu.Unlock()
296}
297
298func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
299 c.mu.Lock()
300 cn.cm = c
301 cn.trace.cm = c
302 c.channels[id] = cn
303 if isTopChannel {
304 c.topLevelChannels[id] = struct{}{}
305 } else {
306 c.findEntry(pid).addChild(id, cn)
307 }
308 c.mu.Unlock()
309}
310
311func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
312 c.mu.Lock()
313 sc.cm = c
314 sc.trace.cm = c
315 c.subChannels[id] = sc
316 c.findEntry(pid).addChild(id, sc)
317 c.mu.Unlock()
318}
319
320func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
321 c.mu.Lock()
322 ls.cm = c
323 c.listenSockets[id] = ls
324 c.findEntry(pid).addChild(id, ls)
325 c.mu.Unlock()
326}
327
328func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
329 c.mu.Lock()
330 ns.cm = c
331 c.normalSockets[id] = ns
332 c.findEntry(pid).addChild(id, ns)
333 c.mu.Unlock()
334}
335
336// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
337// wait on the deletion of its children and until no other entity's channel trace references it.
338// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
339// shutting down server will lead to the server being also deleted.
340func (c *channelMap) removeEntry(id int64) {
341 c.mu.Lock()
342 c.findEntry(id).triggerDelete()
343 c.mu.Unlock()
344}
345
346// c.mu must be held by the caller
347func (c *channelMap) decrTraceRefCount(id int64) {
348 e := c.findEntry(id)
349 if v, ok := e.(tracedChannel); ok {
350 v.decrTraceRefCount()
351 e.deleteSelfIfReady()
352 }
353}
354
355// c.mu must be held by the caller.
356func (c *channelMap) findEntry(id int64) entry {
357 var v entry
358 var ok bool
359 if v, ok = c.channels[id]; ok {
360 return v
361 }
362 if v, ok = c.subChannels[id]; ok {
363 return v
364 }
365 if v, ok = c.servers[id]; ok {
366 return v
367 }
368 if v, ok = c.listenSockets[id]; ok {
369 return v
370 }
371 if v, ok = c.normalSockets[id]; ok {
372 return v
373 }
374 return &dummyEntry{idNotFound: id}
375}
376
377// c.mu must be held by the caller
378// deleteEntry simply deletes an entry from the channelMap. Before calling this
379// method, caller must check this entry is ready to be deleted, i.e removeEntry()
380// has been called on it, and no children still exist.
381// Conditionals are ordered by the expected frequency of deletion of each entity
382// type, in order to optimize performance.
383func (c *channelMap) deleteEntry(id int64) {
384 var ok bool
385 if _, ok = c.normalSockets[id]; ok {
386 delete(c.normalSockets, id)
387 return
388 }
389 if _, ok = c.subChannels[id]; ok {
390 delete(c.subChannels, id)
391 return
392 }
393 if _, ok = c.channels[id]; ok {
394 delete(c.channels, id)
395 delete(c.topLevelChannels, id)
396 return
397 }
398 if _, ok = c.listenSockets[id]; ok {
399 delete(c.listenSockets, id)
400 return
401 }
402 if _, ok = c.servers[id]; ok {
403 delete(c.servers, id)
404 return
405 }
406}
407
408func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
409 c.mu.Lock()
410 child := c.findEntry(id)
411 childTC, ok := child.(tracedChannel)
412 if !ok {
413 c.mu.Unlock()
414 return
415 }
416 childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
417 if desc.Parent != nil {
418 parent := c.findEntry(child.getParentID())
419 var chanType RefChannelType
420 switch child.(type) {
421 case *channel:
422 chanType = RefChannel
423 case *subChannel:
424 chanType = RefSubChannel
425 }
426 if parentTC, ok := parent.(tracedChannel); ok {
427 parentTC.getChannelTrace().append(&TraceEvent{
428 Desc: desc.Parent.Desc,
429 Severity: desc.Parent.Severity,
430 Timestamp: time.Now(),
431 RefID: id,
432 RefName: childTC.getRefName(),
433 RefType: chanType,
434 })
435 childTC.incrTraceRefCount()
436 }
437 }
438 c.mu.Unlock()
439}
440
441type int64Slice []int64
442
443func (s int64Slice) Len() int { return len(s) }
444func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
445func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
446
447func copyMap(m map[int64]string) map[int64]string {
448 n := make(map[int64]string)
449 for k, v := range m {
450 n[k] = v
451 }
452 return n
453}
454
455func min(a, b int64) int64 {
456 if a < b {
457 return a
458 }
459 return b
460}
461
462func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
463 if maxResults <= 0 {
464 maxResults = EntryPerPage
465 }
466 c.mu.RLock()
467 l := int64(len(c.topLevelChannels))
468 ids := make([]int64, 0, l)
469 cns := make([]*channel, 0, min(l, maxResults))
470
471 for k := range c.topLevelChannels {
472 ids = append(ids, k)
473 }
474 sort.Sort(int64Slice(ids))
475 idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
476 count := int64(0)
477 var end bool
478 var t []*ChannelMetric
479 for i, v := range ids[idx:] {
480 if count == maxResults {
481 break
482 }
483 if cn, ok := c.channels[v]; ok {
484 cns = append(cns, cn)
485 t = append(t, &ChannelMetric{
486 NestedChans: copyMap(cn.nestedChans),
487 SubChans: copyMap(cn.subChans),
488 })
489 count++
490 }
491 if i == len(ids[idx:])-1 {
492 end = true
493 break
494 }
495 }
496 c.mu.RUnlock()
497 if count == 0 {
498 end = true
499 }
500
501 for i, cn := range cns {
502 t[i].ChannelData = cn.c.ChannelzMetric()
503 t[i].ID = cn.id
504 t[i].RefName = cn.refName
505 t[i].Trace = cn.trace.dumpData()
506 }
507 return t, end
508}
509
510func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
511 if maxResults <= 0 {
512 maxResults = EntryPerPage
513 }
514 c.mu.RLock()
515 l := int64(len(c.servers))
516 ids := make([]int64, 0, l)
517 ss := make([]*server, 0, min(l, maxResults))
518 for k := range c.servers {
519 ids = append(ids, k)
520 }
521 sort.Sort(int64Slice(ids))
522 idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
523 count := int64(0)
524 var end bool
525 var s []*ServerMetric
526 for i, v := range ids[idx:] {
527 if count == maxResults {
528 break
529 }
530 if svr, ok := c.servers[v]; ok {
531 ss = append(ss, svr)
532 s = append(s, &ServerMetric{
533 ListenSockets: copyMap(svr.listenSockets),
534 })
535 count++
536 }
537 if i == len(ids[idx:])-1 {
538 end = true
539 break
540 }
541 }
542 c.mu.RUnlock()
543 if count == 0 {
544 end = true
545 }
546
547 for i, svr := range ss {
548 s[i].ServerData = svr.s.ChannelzMetric()
549 s[i].ID = svr.id
550 s[i].RefName = svr.refName
551 }
552 return s, end
553}
554
555func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
556 if maxResults <= 0 {
557 maxResults = EntryPerPage
558 }
559 var svr *server
560 var ok bool
561 c.mu.RLock()
562 if svr, ok = c.servers[id]; !ok {
563 // server with id doesn't exist.
564 c.mu.RUnlock()
565 return nil, true
566 }
567 svrskts := svr.sockets
568 l := int64(len(svrskts))
569 ids := make([]int64, 0, l)
570 sks := make([]*normalSocket, 0, min(l, maxResults))
571 for k := range svrskts {
572 ids = append(ids, k)
573 }
574 sort.Sort(int64Slice(ids))
575 idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
576 count := int64(0)
577 var end bool
578 for i, v := range ids[idx:] {
579 if count == maxResults {
580 break
581 }
582 if ns, ok := c.normalSockets[v]; ok {
583 sks = append(sks, ns)
584 count++
585 }
586 if i == len(ids[idx:])-1 {
587 end = true
588 break
589 }
590 }
591 c.mu.RUnlock()
592 if count == 0 {
593 end = true
594 }
595 var s []*SocketMetric
596 for _, ns := range sks {
597 sm := &SocketMetric{}
598 sm.SocketData = ns.s.ChannelzMetric()
599 sm.ID = ns.id
600 sm.RefName = ns.refName
601 s = append(s, sm)
602 }
603 return s, end
604}
605
606func (c *channelMap) GetChannel(id int64) *ChannelMetric {
607 cm := &ChannelMetric{}
608 var cn *channel
609 var ok bool
610 c.mu.RLock()
611 if cn, ok = c.channels[id]; !ok {
612 // channel with id doesn't exist.
613 c.mu.RUnlock()
614 return nil
615 }
616 cm.NestedChans = copyMap(cn.nestedChans)
617 cm.SubChans = copyMap(cn.subChans)
618 // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
619 // holding the lock to prevent potential data race.
620 chanCopy := cn.c
621 c.mu.RUnlock()
622 cm.ChannelData = chanCopy.ChannelzMetric()
623 cm.ID = cn.id
624 cm.RefName = cn.refName
625 cm.Trace = cn.trace.dumpData()
626 return cm
627}
628
629func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
630 cm := &SubChannelMetric{}
631 var sc *subChannel
632 var ok bool
633 c.mu.RLock()
634 if sc, ok = c.subChannels[id]; !ok {
635 // subchannel with id doesn't exist.
636 c.mu.RUnlock()
637 return nil
638 }
639 cm.Sockets = copyMap(sc.sockets)
640 // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
641 // holding the lock to prevent potential data race.
642 chanCopy := sc.c
643 c.mu.RUnlock()
644 cm.ChannelData = chanCopy.ChannelzMetric()
645 cm.ID = sc.id
646 cm.RefName = sc.refName
647 cm.Trace = sc.trace.dumpData()
648 return cm
649}
650
651func (c *channelMap) GetSocket(id int64) *SocketMetric {
652 sm := &SocketMetric{}
653 c.mu.RLock()
654 if ls, ok := c.listenSockets[id]; ok {
655 c.mu.RUnlock()
656 sm.SocketData = ls.s.ChannelzMetric()
657 sm.ID = ls.id
658 sm.RefName = ls.refName
659 return sm
660 }
661 if ns, ok := c.normalSockets[id]; ok {
662 c.mu.RUnlock()
663 sm.SocketData = ns.s.ChannelzMetric()
664 sm.ID = ns.id
665 sm.RefName = ns.refName
666 return sm
667 }
668 c.mu.RUnlock()
669 return nil
670}
671
672func (c *channelMap) GetServer(id int64) *ServerMetric {
673 sm := &ServerMetric{}
674 var svr *server
675 var ok bool
676 c.mu.RLock()
677 if svr, ok = c.servers[id]; !ok {
678 c.mu.RUnlock()
679 return nil
680 }
681 sm.ListenSockets = copyMap(svr.listenSockets)
682 c.mu.RUnlock()
683 sm.ID = svr.id
684 sm.RefName = svr.refName
685 sm.ServerData = svr.s.ChannelzMetric()
686 return sm
687}
688
689type idGenerator struct {
690 id int64
691}
692
693func (i *idGenerator) reset() {
694 atomic.StoreInt64(&i.id, 0)
695}
696
697func (i *idGenerator) genID() int64 {
698 return atomic.AddInt64(&i.id, 1)
699}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
new file mode 100644
index 0000000..17c2274
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types.go
@@ -0,0 +1,702 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package channelz
20
21import (
22 "net"
23 "sync"
24 "sync/atomic"
25 "time"
26
27 "google.golang.org/grpc/connectivity"
28 "google.golang.org/grpc/credentials"
29 "google.golang.org/grpc/grpclog"
30)
31
32// entry represents a node in the channelz database.
33type entry interface {
34 // addChild adds a child e, whose channelz id is id to child list
35 addChild(id int64, e entry)
36 // deleteChild deletes a child with channelz id to be id from child list
37 deleteChild(id int64)
38 // triggerDelete tries to delete self from channelz database. However, if child
39 // list is not empty, then deletion from the database is on hold until the last
40 // child is deleted from database.
41 triggerDelete()
42 // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
43 // list is now empty. If both conditions are met, then delete self from database.
44 deleteSelfIfReady()
45 // getParentID returns parent ID of the entry. 0 value parent ID means no parent.
46 getParentID() int64
47}
48
49// dummyEntry is a fake entry to handle entry not found case.
50type dummyEntry struct {
51 idNotFound int64
52}
53
54func (d *dummyEntry) addChild(id int64, e entry) {
55 // Note: It is possible for a normal program to reach here under race condition.
56 // For example, there could be a race between ClientConn.Close() info being propagated
57 // to addrConn and http2Client. ClientConn.Close() cancel the context and result
58 // in http2Client to error. The error info is then caught by transport monitor
59 // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
60 // the addrConn will create a new transport. And when registering the new transport in
61 // channelz, its parent addrConn could have already been torn down and deleted
62 // from channelz tracking, and thus reach the code here.
63 grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
64}
65
66func (d *dummyEntry) deleteChild(id int64) {
67 // It is possible for a normal program to reach here under race condition.
68 // Refer to the example described in addChild().
69 grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
70}
71
72func (d *dummyEntry) triggerDelete() {
73 grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
74}
75
76func (*dummyEntry) deleteSelfIfReady() {
77 // code should not reach here. deleteSelfIfReady is always called on an existing entry.
78}
79
80func (*dummyEntry) getParentID() int64 {
81 return 0
82}
83
84// ChannelMetric defines the info channelz provides for a specific Channel, which
85// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
86// child list, etc.
87type ChannelMetric struct {
88 // ID is the channelz id of this channel.
89 ID int64
90 // RefName is the human readable reference string of this channel.
91 RefName string
92 // ChannelData contains channel internal metric reported by the channel through
93 // ChannelzMetric().
94 ChannelData *ChannelInternalMetric
95 // NestedChans tracks the nested channel type children of this channel in the format of
96 // a map from nested channel channelz id to corresponding reference string.
97 NestedChans map[int64]string
98 // SubChans tracks the subchannel type children of this channel in the format of a
99 // map from subchannel channelz id to corresponding reference string.
100 SubChans map[int64]string
101 // Sockets tracks the socket type children of this channel in the format of a map
102 // from socket channelz id to corresponding reference string.
103 // Note current grpc implementation doesn't allow channel having sockets directly,
104 // therefore, this is field is unused.
105 Sockets map[int64]string
106 // Trace contains the most recent traced events.
107 Trace *ChannelTrace
108}
109
110// SubChannelMetric defines the info channelz provides for a specific SubChannel,
111// which includes ChannelInternalMetric and channelz-specific data, such as
112// channelz id, child list, etc.
113type SubChannelMetric struct {
114 // ID is the channelz id of this subchannel.
115 ID int64
116 // RefName is the human readable reference string of this subchannel.
117 RefName string
118 // ChannelData contains subchannel internal metric reported by the subchannel
119 // through ChannelzMetric().
120 ChannelData *ChannelInternalMetric
121 // NestedChans tracks the nested channel type children of this subchannel in the format of
122 // a map from nested channel channelz id to corresponding reference string.
123 // Note current grpc implementation doesn't allow subchannel to have nested channels
124 // as children, therefore, this field is unused.
125 NestedChans map[int64]string
126 // SubChans tracks the subchannel type children of this subchannel in the format of a
127 // map from subchannel channelz id to corresponding reference string.
128 // Note current grpc implementation doesn't allow subchannel to have subchannels
129 // as children, therefore, this field is unused.
130 SubChans map[int64]string
131 // Sockets tracks the socket type children of this subchannel in the format of a map
132 // from socket channelz id to corresponding reference string.
133 Sockets map[int64]string
134 // Trace contains the most recent traced events.
135 Trace *ChannelTrace
136}
137
138// ChannelInternalMetric defines the struct that the implementor of Channel interface
139// should return from ChannelzMetric().
140type ChannelInternalMetric struct {
141 // current connectivity state of the channel.
142 State connectivity.State
143 // The target this channel originally tried to connect to. May be absent
144 Target string
145 // The number of calls started on the channel.
146 CallsStarted int64
147 // The number of calls that have completed with an OK status.
148 CallsSucceeded int64
149 // The number of calls that have a completed with a non-OK status.
150 CallsFailed int64
151 // The last time a call was started on the channel.
152 LastCallStartedTimestamp time.Time
153}
154
155// ChannelTrace stores traced events on a channel/subchannel and related info.
156type ChannelTrace struct {
157 // EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
158 EventNum int64
159 // CreationTime is the creation time of the trace.
160 CreationTime time.Time
161 // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
162 // oldest one)
163 Events []*TraceEvent
164}
165
166// TraceEvent represent a single trace event
167type TraceEvent struct {
168 // Desc is a simple description of the trace event.
169 Desc string
170 // Severity states the severity of this trace event.
171 Severity Severity
172 // Timestamp is the event time.
173 Timestamp time.Time
174 // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
175 // involved in this event.
176 // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
177 RefID int64
178 // RefName is the reference name for the entity that gets referenced in the event.
179 RefName string
180 // RefType indicates the referenced entity type, i.e Channel or SubChannel.
181 RefType RefChannelType
182}
183
184// Channel is the interface that should be satisfied in order to be tracked by
185// channelz as Channel or SubChannel.
186type Channel interface {
187 ChannelzMetric() *ChannelInternalMetric
188}
189
190type dummyChannel struct{}
191
192func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
193 return &ChannelInternalMetric{}
194}
195
196type channel struct {
197 refName string
198 c Channel
199 closeCalled bool
200 nestedChans map[int64]string
201 subChans map[int64]string
202 id int64
203 pid int64
204 cm *channelMap
205 trace *channelTrace
206 // traceRefCount is the number of trace events that reference this channel.
207 // Non-zero traceRefCount means the trace of this channel cannot be deleted.
208 traceRefCount int32
209}
210
211func (c *channel) addChild(id int64, e entry) {
212 switch v := e.(type) {
213 case *subChannel:
214 c.subChans[id] = v.refName
215 case *channel:
216 c.nestedChans[id] = v.refName
217 default:
218 grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
219 }
220}
221
222func (c *channel) deleteChild(id int64) {
223 delete(c.subChans, id)
224 delete(c.nestedChans, id)
225 c.deleteSelfIfReady()
226}
227
228func (c *channel) triggerDelete() {
229 c.closeCalled = true
230 c.deleteSelfIfReady()
231}
232
233func (c *channel) getParentID() int64 {
234 return c.pid
235}
236
237// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
238// deleting the channel reference from its parent's child list.
239//
240// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
241// corresponding grpc object has been invoked, and the channel does not have any children left.
242//
243// The returned boolean value indicates whether the channel has been successfully deleted from tree.
244func (c *channel) deleteSelfFromTree() (deleted bool) {
245 if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
246 return false
247 }
248 // not top channel
249 if c.pid != 0 {
250 c.cm.findEntry(c.pid).deleteChild(c.id)
251 }
252 return true
253}
254
255// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
256// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
257// channel, and its memory will be garbage collected.
258//
259// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
260// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
261// the trace of the referenced entity must not be deleted. In order to release the resource allocated
262// by grpc, the reference to the grpc object is reset to a dummy object.
263//
264// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
265//
266// It returns a bool to indicate whether the channel can be safely deleted from map.
267func (c *channel) deleteSelfFromMap() (delete bool) {
268 if c.getTraceRefCount() != 0 {
269 c.c = &dummyChannel{}
270 return false
271 }
272 return true
273}
274
275// deleteSelfIfReady tries to delete the channel itself from the channelz database.
276// The delete process includes two steps:
277// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
278// parent's child list.
279// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
280// will return entry not found error.
281func (c *channel) deleteSelfIfReady() {
282 if !c.deleteSelfFromTree() {
283 return
284 }
285 if !c.deleteSelfFromMap() {
286 return
287 }
288 c.cm.deleteEntry(c.id)
289 c.trace.clear()
290}
291
292func (c *channel) getChannelTrace() *channelTrace {
293 return c.trace
294}
295
296func (c *channel) incrTraceRefCount() {
297 atomic.AddInt32(&c.traceRefCount, 1)
298}
299
300func (c *channel) decrTraceRefCount() {
301 atomic.AddInt32(&c.traceRefCount, -1)
302}
303
304func (c *channel) getTraceRefCount() int {
305 i := atomic.LoadInt32(&c.traceRefCount)
306 return int(i)
307}
308
309func (c *channel) getRefName() string {
310 return c.refName
311}
312
313type subChannel struct {
314 refName string
315 c Channel
316 closeCalled bool
317 sockets map[int64]string
318 id int64
319 pid int64
320 cm *channelMap
321 trace *channelTrace
322 traceRefCount int32
323}
324
325func (sc *subChannel) addChild(id int64, e entry) {
326 if v, ok := e.(*normalSocket); ok {
327 sc.sockets[id] = v.refName
328 } else {
329 grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
330 }
331}
332
333func (sc *subChannel) deleteChild(id int64) {
334 delete(sc.sockets, id)
335 sc.deleteSelfIfReady()
336}
337
338func (sc *subChannel) triggerDelete() {
339 sc.closeCalled = true
340 sc.deleteSelfIfReady()
341}
342
343func (sc *subChannel) getParentID() int64 {
344 return sc.pid
345}
346
347// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
348// means deleting the subchannel reference from its parent's child list.
349//
350// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
351// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
352//
353// The returned boolean value indicates whether the channel has been successfully deleted from tree.
354func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
355 if !sc.closeCalled || len(sc.sockets) != 0 {
356 return false
357 }
358 sc.cm.findEntry(sc.pid).deleteChild(sc.id)
359 return true
360}
361
362// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
363// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
364// the subchannel, and its memory will be garbage collected.
365//
366// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
367// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
368// the trace of the referenced entity must not be deleted. In order to release the resource allocated
369// by grpc, the reference to the grpc object is reset to a dummy object.
370//
371// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
372//
373// It returns a bool to indicate whether the channel can be safely deleted from map.
374func (sc *subChannel) deleteSelfFromMap() (delete bool) {
375 if sc.getTraceRefCount() != 0 {
376 // free the grpc struct (i.e. addrConn)
377 sc.c = &dummyChannel{}
378 return false
379 }
380 return true
381}
382
383// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
384// The delete process includes two steps:
385// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
386// its parent's child list.
387// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
388// by id will return entry not found error.
389func (sc *subChannel) deleteSelfIfReady() {
390 if !sc.deleteSelfFromTree() {
391 return
392 }
393 if !sc.deleteSelfFromMap() {
394 return
395 }
396 sc.cm.deleteEntry(sc.id)
397 sc.trace.clear()
398}
399
400func (sc *subChannel) getChannelTrace() *channelTrace {
401 return sc.trace
402}
403
404func (sc *subChannel) incrTraceRefCount() {
405 atomic.AddInt32(&sc.traceRefCount, 1)
406}
407
408func (sc *subChannel) decrTraceRefCount() {
409 atomic.AddInt32(&sc.traceRefCount, -1)
410}
411
412func (sc *subChannel) getTraceRefCount() int {
413 i := atomic.LoadInt32(&sc.traceRefCount)
414 return int(i)
415}
416
417func (sc *subChannel) getRefName() string {
418 return sc.refName
419}
420
421// SocketMetric defines the info channelz provides for a specific Socket, which
422// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
423type SocketMetric struct {
424 // ID is the channelz id of this socket.
425 ID int64
426 // RefName is the human readable reference string of this socket.
427 RefName string
428 // SocketData contains socket internal metric reported by the socket through
429 // ChannelzMetric().
430 SocketData *SocketInternalMetric
431}
432
433// SocketInternalMetric defines the struct that the implementor of Socket interface
434// should return from ChannelzMetric().
435type SocketInternalMetric struct {
436 // The number of streams that have been started.
437 StreamsStarted int64
438 // The number of streams that have ended successfully:
439 // On client side, receiving frame with eos bit set.
440 // On server side, sending frame with eos bit set.
441 StreamsSucceeded int64
442 // The number of streams that have ended unsuccessfully:
443 // On client side, termination without receiving frame with eos bit set.
444 // On server side, termination without sending frame with eos bit set.
445 StreamsFailed int64
446 // The number of messages successfully sent on this socket.
447 MessagesSent int64
448 MessagesReceived int64
449 // The number of keep alives sent. This is typically implemented with HTTP/2
450 // ping messages.
451 KeepAlivesSent int64
452 // The last time a stream was created by this endpoint. Usually unset for
453 // servers.
454 LastLocalStreamCreatedTimestamp time.Time
455 // The last time a stream was created by the remote endpoint. Usually unset
456 // for clients.
457 LastRemoteStreamCreatedTimestamp time.Time
458 // The last time a message was sent by this endpoint.
459 LastMessageSentTimestamp time.Time
460 // The last time a message was received by this endpoint.
461 LastMessageReceivedTimestamp time.Time
462 // The amount of window, granted to the local endpoint by the remote endpoint.
463 // This may be slightly out of date due to network latency. This does NOT
464 // include stream level or TCP level flow control info.
465 LocalFlowControlWindow int64
466 // The amount of window, granted to the remote endpoint by the local endpoint.
467 // This may be slightly out of date due to network latency. This does NOT
468 // include stream level or TCP level flow control info.
469 RemoteFlowControlWindow int64
470 // The locally bound address.
471 LocalAddr net.Addr
472 // The remote bound address. May be absent.
473 RemoteAddr net.Addr
474 // Optional, represents the name of the remote endpoint, if different than
475 // the original target name.
476 RemoteName string
477 SocketOptions *SocketOptionData
478 Security credentials.ChannelzSecurityValue
479}
480
481// Socket is the interface that should be satisfied in order to be tracked by
482// channelz as Socket.
483type Socket interface {
484 ChannelzMetric() *SocketInternalMetric
485}
486
487type listenSocket struct {
488 refName string
489 s Socket
490 id int64
491 pid int64
492 cm *channelMap
493}
494
495func (ls *listenSocket) addChild(id int64, e entry) {
496 grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
497}
498
499func (ls *listenSocket) deleteChild(id int64) {
500 grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
501}
502
503func (ls *listenSocket) triggerDelete() {
504 ls.cm.deleteEntry(ls.id)
505 ls.cm.findEntry(ls.pid).deleteChild(ls.id)
506}
507
508func (ls *listenSocket) deleteSelfIfReady() {
509 grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
510}
511
512func (ls *listenSocket) getParentID() int64 {
513 return ls.pid
514}
515
516type normalSocket struct {
517 refName string
518 s Socket
519 id int64
520 pid int64
521 cm *channelMap
522}
523
524func (ns *normalSocket) addChild(id int64, e entry) {
525 grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
526}
527
528func (ns *normalSocket) deleteChild(id int64) {
529 grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
530}
531
532func (ns *normalSocket) triggerDelete() {
533 ns.cm.deleteEntry(ns.id)
534 ns.cm.findEntry(ns.pid).deleteChild(ns.id)
535}
536
537func (ns *normalSocket) deleteSelfIfReady() {
538 grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
539}
540
541func (ns *normalSocket) getParentID() int64 {
542 return ns.pid
543}
544
545// ServerMetric defines the info channelz provides for a specific Server, which
546// includes ServerInternalMetric and channelz-specific data, such as channelz id,
547// child list, etc.
548type ServerMetric struct {
549 // ID is the channelz id of this server.
550 ID int64
551 // RefName is the human readable reference string of this server.
552 RefName string
553 // ServerData contains server internal metric reported by the server through
554 // ChannelzMetric().
555 ServerData *ServerInternalMetric
556 // ListenSockets tracks the listener socket type children of this server in the
557 // format of a map from socket channelz id to corresponding reference string.
558 ListenSockets map[int64]string
559}
560
561// ServerInternalMetric defines the struct that the implementor of Server interface
562// should return from ChannelzMetric().
563type ServerInternalMetric struct {
564 // The number of incoming calls started on the server.
565 CallsStarted int64
566 // The number of incoming calls that have completed with an OK status.
567 CallsSucceeded int64
568 // The number of incoming calls that have a completed with a non-OK status.
569 CallsFailed int64
570 // The last time a call was started on the server.
571 LastCallStartedTimestamp time.Time
572}
573
574// Server is the interface to be satisfied in order to be tracked by channelz as
575// Server.
576type Server interface {
577 ChannelzMetric() *ServerInternalMetric
578}
579
580type server struct {
581 refName string
582 s Server
583 closeCalled bool
584 sockets map[int64]string
585 listenSockets map[int64]string
586 id int64
587 cm *channelMap
588}
589
590func (s *server) addChild(id int64, e entry) {
591 switch v := e.(type) {
592 case *normalSocket:
593 s.sockets[id] = v.refName
594 case *listenSocket:
595 s.listenSockets[id] = v.refName
596 default:
597 grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
598 }
599}
600
601func (s *server) deleteChild(id int64) {
602 delete(s.sockets, id)
603 delete(s.listenSockets, id)
604 s.deleteSelfIfReady()
605}
606
607func (s *server) triggerDelete() {
608 s.closeCalled = true
609 s.deleteSelfIfReady()
610}
611
612func (s *server) deleteSelfIfReady() {
613 if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
614 return
615 }
616 s.cm.deleteEntry(s.id)
617}
618
619func (s *server) getParentID() int64 {
620 return 0
621}
622
623type tracedChannel interface {
624 getChannelTrace() *channelTrace
625 incrTraceRefCount()
626 decrTraceRefCount()
627 getRefName() string
628}
629
630type channelTrace struct {
631 cm *channelMap
632 createdTime time.Time
633 eventCount int64
634 mu sync.Mutex
635 events []*TraceEvent
636}
637
638func (c *channelTrace) append(e *TraceEvent) {
639 c.mu.Lock()
640 if len(c.events) == getMaxTraceEntry() {
641 del := c.events[0]
642 c.events = c.events[1:]
643 if del.RefID != 0 {
644 // start recursive cleanup in a goroutine to not block the call originated from grpc.
645 go func() {
646 // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
647 c.cm.mu.Lock()
648 c.cm.decrTraceRefCount(del.RefID)
649 c.cm.mu.Unlock()
650 }()
651 }
652 }
653 e.Timestamp = time.Now()
654 c.events = append(c.events, e)
655 c.eventCount++
656 c.mu.Unlock()
657}
658
659func (c *channelTrace) clear() {
660 c.mu.Lock()
661 for _, e := range c.events {
662 if e.RefID != 0 {
663 // caller should have already held the c.cm.mu lock.
664 c.cm.decrTraceRefCount(e.RefID)
665 }
666 }
667 c.mu.Unlock()
668}
669
670// Severity is the severity level of a trace event.
671// The canonical enumeration of all valid values is here:
672// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
673type Severity int
674
675const (
676 // CtUNKNOWN indicates unknown severity of a trace event.
677 CtUNKNOWN Severity = iota
678 // CtINFO indicates info level severity of a trace event.
679 CtINFO
680 // CtWarning indicates warning level severity of a trace event.
681 CtWarning
682 // CtError indicates error level severity of a trace event.
683 CtError
684)
685
686// RefChannelType is the type of the entity being referenced in a trace event.
687type RefChannelType int
688
689const (
690 // RefChannel indicates the referenced entity is a Channel.
691 RefChannel RefChannelType = iota
692 // RefSubChannel indicates the referenced entity is a SubChannel.
693 RefSubChannel
694)
695
696func (c *channelTrace) dumpData() *ChannelTrace {
697 c.mu.Lock()
698 ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
699 ct.Events = c.events[:len(c.events)]
700 c.mu.Unlock()
701 return ct
702}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
new file mode 100644
index 0000000..692dd61
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
@@ -0,0 +1,53 @@
1// +build !appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package channelz
22
23import (
24 "syscall"
25
26 "golang.org/x/sys/unix"
27)
28
29// SocketOptionData defines the struct to hold socket option data, and related
30// getter function to obtain info from fd.
31type SocketOptionData struct {
32 Linger *unix.Linger
33 RecvTimeout *unix.Timeval
34 SendTimeout *unix.Timeval
35 TCPInfo *unix.TCPInfo
36}
37
38// Getsockopt defines the function to get socket options requested by channelz.
39// It is to be passed to syscall.RawConn.Control().
40func (s *SocketOptionData) Getsockopt(fd uintptr) {
41 if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
42 s.Linger = v
43 }
44 if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
45 s.RecvTimeout = v
46 }
47 if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
48 s.SendTimeout = v
49 }
50 if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
51 s.TCPInfo = v
52 }
53}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
new file mode 100644
index 0000000..79edbef
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
@@ -0,0 +1,44 @@
1// +build !linux appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package channelz
22
23import (
24 "sync"
25
26 "google.golang.org/grpc/grpclog"
27)
28
29var once sync.Once
30
31// SocketOptionData defines the struct to hold socket option data, and related
32// getter function to obtain info from fd.
33// Windows OS doesn't support Socket Option
34type SocketOptionData struct {
35}
36
37// Getsockopt defines the function to get socket options requested by channelz.
38// It is to be passed to syscall.RawConn.Control().
39// Windows OS doesn't support Socket Option
40func (s *SocketOptionData) Getsockopt(fd uintptr) {
41 once.Do(func() {
42 grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
43 })
44}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
index 93f0e1d..fdf409d 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
@@ -1,8 +1,8 @@
1// +build go1.8 1// +build linux,!appengine
2 2
3/* 3/*
4 * 4 *
5 * Copyright 2017 gRPC authors. 5 * Copyright 2018 gRPC authors.
6 * 6 *
7 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License. 8 * you may not use this file except in compliance with the License.
@@ -18,21 +18,22 @@
18 * 18 *
19 */ 19 */
20 20
21package credentials 21package channelz
22 22
23import ( 23import (
24 "crypto/tls" 24 "syscall"
25) 25)
26 26
27// cloneTLSConfig returns a shallow clone of the exported 27// GetSocketOption gets the socket option info of the conn.
28// fields of cfg, ignoring the unexported sync.Once, which 28func GetSocketOption(socket interface{}) *SocketOptionData {
29// contains a mutex and must not be copied. 29 c, ok := socket.(syscall.Conn)
30// 30 if !ok {
31// If cfg is nil, a new zero tls.Config is returned. 31 return nil
32func cloneTLSConfig(cfg *tls.Config) *tls.Config {
33 if cfg == nil {
34 return &tls.Config{}
35 } 32 }
36 33 data := &SocketOptionData{}
37 return cfg.Clone() 34 if rawConn, err := c.SyscallConn(); err == nil {
35 rawConn.Control(data.Getsockopt)
36 return data
37 }
38 return nil
38} 39}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
new file mode 100644
index 0000000..8864a08
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
@@ -0,0 +1,26 @@
1// +build !linux appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package channelz
22
23// GetSocketOption gets the socket option info of the conn.
24func GetSocketOption(c interface{}) *SocketOptionData {
25 return nil
26}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
new file mode 100644
index 0000000..d2193b3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -0,0 +1,70 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package envconfig contains grpc settings configured by environment variables.
20package envconfig
21
22import (
23 "os"
24 "strings"
25)
26
27const (
28 prefix = "GRPC_GO_"
29 retryStr = prefix + "RETRY"
30 requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE"
31)
32
33// RequireHandshakeSetting describes the settings for handshaking.
34type RequireHandshakeSetting int
35
36const (
37 // RequireHandshakeHybrid (default, deprecated) indicates to not wait for
38 // handshake before considering a connection ready, but wait before
39 // considering successful.
40 RequireHandshakeHybrid RequireHandshakeSetting = iota
41 // RequireHandshakeOn (default after the 1.17 release) indicates to wait
42 // for handshake before considering a connection ready/successful.
43 RequireHandshakeOn
44 // RequireHandshakeOff indicates to not wait for handshake before
45 // considering a connection ready/successful.
46 RequireHandshakeOff
47)
48
49var (
50 // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
51 Retry = strings.EqualFold(os.Getenv(retryStr), "on")
52 // RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE
53 // environment variable.
54 //
55 // Will be removed after the 1.18 release.
56 RequireHandshake RequireHandshakeSetting
57)
58
59func init() {
60 switch strings.ToLower(os.Getenv(requireHandshakeStr)) {
61 case "on":
62 default:
63 RequireHandshake = RequireHandshakeOn
64 case "off":
65 RequireHandshake = RequireHandshakeOff
66 case "hybrid":
67 // Will be removed after the 1.17 release.
68 RequireHandshake = RequireHandshakeHybrid
69 }
70}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
new file mode 100644
index 0000000..200b115
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
@@ -0,0 +1,56 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package grpcrand implements math/rand functions in a concurrent-safe way
20// with a global random source, independent of math/rand's global source.
21package grpcrand
22
23import (
24 "math/rand"
25 "sync"
26 "time"
27)
28
29var (
30 r = rand.New(rand.NewSource(time.Now().UnixNano()))
31 mu sync.Mutex
32)
33
34// Int63n implements rand.Int63n on the grpcrand global source.
35func Int63n(n int64) int64 {
36 mu.Lock()
37 res := r.Int63n(n)
38 mu.Unlock()
39 return res
40}
41
42// Intn implements rand.Intn on the grpcrand global source.
43func Intn(n int) int {
44 mu.Lock()
45 res := r.Intn(n)
46 mu.Unlock()
47 return res
48}
49
50// Float64 implements rand.Float64 on the grpcrand global source.
51func Float64() float64 {
52 mu.Lock()
53 res := r.Float64()
54 mu.Unlock()
55 return res
56}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
new file mode 100644
index 0000000..fbe697c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
@@ -0,0 +1,61 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package grpcsync implements additional synchronization primitives built upon
20// the sync package.
21package grpcsync
22
23import (
24 "sync"
25 "sync/atomic"
26)
27
28// Event represents a one-time event that may occur in the future.
29type Event struct {
30 fired int32
31 c chan struct{}
32 o sync.Once
33}
34
35// Fire causes e to complete. It is safe to call multiple times, and
36// concurrently. It returns true iff this call to Fire caused the signaling
37// channel returned by Done to close.
38func (e *Event) Fire() bool {
39 ret := false
40 e.o.Do(func() {
41 atomic.StoreInt32(&e.fired, 1)
42 close(e.c)
43 ret = true
44 })
45 return ret
46}
47
48// Done returns a channel that will be closed when Fire is called.
49func (e *Event) Done() <-chan struct{} {
50 return e.c
51}
52
53// HasFired returns true if Fire has been called.
54func (e *Event) HasFired() bool {
55 return atomic.LoadInt32(&e.fired) == 1
56}
57
58// NewEvent returns a new, ready-to-use Event.
59func NewEvent() *Event {
60 return &Event{c: make(chan struct{})}
61}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 0708383..eaa54d4 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -15,20 +15,36 @@
15 * 15 *
16 */ 16 */
17 17
18// Package internal contains gRPC-internal code for testing, to avoid polluting 18// Package internal contains gRPC-internal code, to avoid polluting
19// the godoc of the top-level grpc package. 19// the godoc of the top-level grpc package. It must not import any grpc
20// symbols to avoid circular dependencies.
20package internal 21package internal
21 22
22// TestingCloseConns closes all existing transports but keeps 23import "context"
23// grpcServer.lis accepting new connections.
24//
25// The provided grpcServer must be of type *grpc.Server. It is untyped
26// for circular dependency reasons.
27var TestingCloseConns func(grpcServer interface{})
28 24
29// TestingUseHandlerImpl enables the http.Handler-based server implementation. 25var (
30// It must be called before Serve and requires TLS credentials. 26 // WithContextDialer is exported by dialoptions.go
31// 27 WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption
32// The provided grpcServer must be of type *grpc.Server. It is untyped 28 // WithResolverBuilder is exported by dialoptions.go
33// for circular dependency reasons. 29 WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
34var TestingUseHandlerImpl func(grpcServer interface{}) 30 // WithHealthCheckFunc is not exported by dialoptions.go
31 WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
32 // HealthCheckFunc is used to provide client-side LB channel health checking
33 HealthCheckFunc HealthChecker
34 // BalancerUnregister is exported by package balancer to unregister a balancer.
35 BalancerUnregister func(name string)
36)
37
38// HealthChecker defines the signature of the client-side LB channel health checking function.
39type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
40
41const (
42 // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
43 CredsBundleModeFallback = "fallback"
44 // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
45 // mode.
46 CredsBundleModeBalancer = "balancer"
47 // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
48 // that supports backend returned by grpclb balancer.
49 CredsBundleModeBackendFromBalancer = "backend-from-balancer"
50)
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
new file mode 100644
index 0000000..43281a3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
@@ -0,0 +1,114 @@
1// +build !appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21// Package syscall provides functionalities that grpc uses to get low-level operating system
22// stats/info.
23package syscall
24
25import (
26 "fmt"
27 "net"
28 "syscall"
29 "time"
30
31 "golang.org/x/sys/unix"
32 "google.golang.org/grpc/grpclog"
33)
34
35// GetCPUTime returns the how much CPU time has passed since the start of this process.
36func GetCPUTime() int64 {
37 var ts unix.Timespec
38 if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
39 grpclog.Fatal(err)
40 }
41 return ts.Nano()
42}
43
44// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
45type Rusage syscall.Rusage
46
47// GetRusage returns the resource usage of current process.
48func GetRusage() (rusage *Rusage) {
49 rusage = new(Rusage)
50 syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
51 return
52}
53
54// CPUTimeDiff returns the differences of user CPU time and system CPU time used
55// between two Rusage structs.
56func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
57 f := (*syscall.Rusage)(first)
58 l := (*syscall.Rusage)(latest)
59 var (
60 utimeDiffs = l.Utime.Sec - f.Utime.Sec
61 utimeDiffus = l.Utime.Usec - f.Utime.Usec
62 stimeDiffs = l.Stime.Sec - f.Stime.Sec
63 stimeDiffus = l.Stime.Usec - f.Stime.Usec
64 )
65
66 uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
67 sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
68
69 return uTimeElapsed, sTimeElapsed
70}
71
72// SetTCPUserTimeout sets the TCP user timeout on a connection's socket
73func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
74 tcpconn, ok := conn.(*net.TCPConn)
75 if !ok {
76 // not a TCP connection. exit early
77 return nil
78 }
79 rawConn, err := tcpconn.SyscallConn()
80 if err != nil {
81 return fmt.Errorf("error getting raw connection: %v", err)
82 }
83 err = rawConn.Control(func(fd uintptr) {
84 err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond))
85 })
86 if err != nil {
87 return fmt.Errorf("error setting option on socket: %v", err)
88 }
89
90 return nil
91}
92
93// GetTCPUserTimeout gets the TCP user timeout on a connection's socket
94func GetTCPUserTimeout(conn net.Conn) (opt int, err error) {
95 tcpconn, ok := conn.(*net.TCPConn)
96 if !ok {
97 err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn)
98 return
99 }
100 rawConn, err := tcpconn.SyscallConn()
101 if err != nil {
102 err = fmt.Errorf("error getting raw connection: %v", err)
103 return
104 }
105 err = rawConn.Control(func(fd uintptr) {
106 opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT)
107 })
108 if err != nil {
109 err = fmt.Errorf("error getting option on socket: %v", err)
110 return
111 }
112
113 return
114}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
new file mode 100644
index 0000000..61678fe
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -0,0 +1,63 @@
1// +build !linux appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package syscall
22
23import (
24 "net"
25 "time"
26
27 "google.golang.org/grpc/grpclog"
28)
29
30func init() {
31 grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
32}
33
34// GetCPUTime returns the how much CPU time has passed since the start of this process.
35// It always returns 0 under non-linux or appengine environment.
36func GetCPUTime() int64 {
37 return 0
38}
39
40// Rusage is an empty struct under non-linux or appengine environment.
41type Rusage struct{}
42
43// GetRusage is a no-op function under non-linux or appengine environment.
44func GetRusage() (rusage *Rusage) {
45 return nil
46}
47
48// CPUTimeDiff returns the differences of user CPU time and system CPU time used
49// between two Rusage structs. It a no-op function for non-linux or appengine environment.
50func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
51 return 0, 0
52}
53
54// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
55func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
56 return nil
57}
58
59// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
60// a negative return value indicates the operation is not supported
61func GetTCPUserTimeout(conn net.Conn) (int, error) {
62 return -1, nil
63}
diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
index 667edb8..070680e 100644
--- a/vendor/google.golang.org/grpc/transport/bdp_estimator.go
+++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
@@ -24,9 +24,10 @@ import (
24) 24)
25 25
26const ( 26const (
27 // bdpLimit is the maximum value the flow control windows 27 // bdpLimit is the maximum value the flow control windows will be increased
28 // will be increased to. 28 // to. TCP typically limits this to 4MB, but some systems go up to 16MB.
29 bdpLimit = (1 << 20) * 4 29 // Since this is only a limit, it is safe to make it optimistic.
30 bdpLimit = (1 << 20) * 16
30 // alpha is a constant factor used to keep a moving average 31 // alpha is a constant factor used to keep a moving average
31 // of RTTs. 32 // of RTTs.
32 alpha = 0.9 33 alpha = 0.9
@@ -41,12 +42,9 @@ const (
41 gamma = 2 42 gamma = 2
42) 43)
43 44
44var ( 45// Adding arbitrary data to ping so that its ack can be identified.
45 // Adding arbitrary data to ping so that its ack can be 46// Easter-egg: what does the ping message say?
46 // identified. 47var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
47 // Easter-egg: what does the ping message say?
48 bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
49)
50 48
51type bdpEstimator struct { 49type bdpEstimator struct {
52 // sentAt is the time when the ping was sent. 50 // sentAt is the time when the ping was sent.
@@ -59,7 +57,7 @@ type bdpEstimator struct {
59 sample uint32 57 sample uint32
60 // bwMax is the maximum bandwidth noted so far (bytes/sec). 58 // bwMax is the maximum bandwidth noted so far (bytes/sec).
61 bwMax float64 59 bwMax float64
62 // bool to keep track of the begining of a new measurement cycle. 60 // bool to keep track of the beginning of a new measurement cycle.
63 isSent bool 61 isSent bool
64 // Callback to update the window sizes. 62 // Callback to update the window sizes.
65 updateFlowControl func(n uint32) 63 updateFlowControl func(n uint32)
@@ -70,7 +68,7 @@ type bdpEstimator struct {
70} 68}
71 69
72// timesnap registers the time bdp ping was sent out so that 70// timesnap registers the time bdp ping was sent out so that
73// network rtt can be calculated when its ack is recieved. 71// network rtt can be calculated when its ack is received.
74// It is called (by controller) when the bdpPing is 72// It is called (by controller) when the bdpPing is
75// being written on the wire. 73// being written on the wire.
76func (b *bdpEstimator) timesnap(d [8]byte) { 74func (b *bdpEstimator) timesnap(d [8]byte) {
@@ -119,7 +117,7 @@ func (b *bdpEstimator) calculate(d [8]byte) {
119 b.rtt += (rttSample - b.rtt) * float64(alpha) 117 b.rtt += (rttSample - b.rtt) * float64(alpha)
120 } 118 }
121 b.isSent = false 119 b.isSent = false
122 // The number of bytes accumalated so far in the sample is smaller 120 // The number of bytes accumulated so far in the sample is smaller
123 // than or equal to 1.5 times the real BDP on a saturated connection. 121 // than or equal to 1.5 times the real BDP on a saturated connection.
124 bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) 122 bwCurrent := float64(b.sample) / (b.rtt * float64(1.5))
125 if bwCurrent > b.bwMax { 123 if bwCurrent > b.bwMax {
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
new file mode 100644
index 0000000..204ba15
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -0,0 +1,852 @@
1/*
2 *
3 * Copyright 2014 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package transport
20
21import (
22 "bytes"
23 "fmt"
24 "runtime"
25 "sync"
26
27 "golang.org/x/net/http2"
28 "golang.org/x/net/http2/hpack"
29)
30
31var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
32 e.SetMaxDynamicTableSizeLimit(v)
33}
34
35type itemNode struct {
36 it interface{}
37 next *itemNode
38}
39
40type itemList struct {
41 head *itemNode
42 tail *itemNode
43}
44
45func (il *itemList) enqueue(i interface{}) {
46 n := &itemNode{it: i}
47 if il.tail == nil {
48 il.head, il.tail = n, n
49 return
50 }
51 il.tail.next = n
52 il.tail = n
53}
54
55// peek returns the first item in the list without removing it from the
56// list.
57func (il *itemList) peek() interface{} {
58 return il.head.it
59}
60
61func (il *itemList) dequeue() interface{} {
62 if il.head == nil {
63 return nil
64 }
65 i := il.head.it
66 il.head = il.head.next
67 if il.head == nil {
68 il.tail = nil
69 }
70 return i
71}
72
73func (il *itemList) dequeueAll() *itemNode {
74 h := il.head
75 il.head, il.tail = nil, nil
76 return h
77}
78
79func (il *itemList) isEmpty() bool {
80 return il.head == nil
81}
82
83// The following defines various control items which could flow through
84// the control buffer of transport. They represent different aspects of
85// control tasks, e.g., flow control, settings, streaming resetting, etc.
86
87// registerStream is used to register an incoming stream with loopy writer.
88type registerStream struct {
89 streamID uint32
90 wq *writeQuota
91}
92
93// headerFrame is also used to register stream on the client-side.
94type headerFrame struct {
95 streamID uint32
96 hf []hpack.HeaderField
97 endStream bool // Valid on server side.
98 initStream func(uint32) (bool, error) // Used only on the client side.
99 onWrite func()
100 wq *writeQuota // write quota for the stream created.
101 cleanup *cleanupStream // Valid on the server side.
102 onOrphaned func(error) // Valid on client-side
103}
104
105type cleanupStream struct {
106 streamID uint32
107 rst bool
108 rstCode http2.ErrCode
109 onWrite func()
110}
111
112type dataFrame struct {
113 streamID uint32
114 endStream bool
115 h []byte
116 d []byte
117 // onEachWrite is called every time
118 // a part of d is written out.
119 onEachWrite func()
120}
121
122type incomingWindowUpdate struct {
123 streamID uint32
124 increment uint32
125}
126
127type outgoingWindowUpdate struct {
128 streamID uint32
129 increment uint32
130}
131
132type incomingSettings struct {
133 ss []http2.Setting
134}
135
136type outgoingSettings struct {
137 ss []http2.Setting
138}
139
140type incomingGoAway struct {
141}
142
143type goAway struct {
144 code http2.ErrCode
145 debugData []byte
146 headsUp bool
147 closeConn bool
148}
149
150type ping struct {
151 ack bool
152 data [8]byte
153}
154
155type outFlowControlSizeRequest struct {
156 resp chan uint32
157}
158
159type outStreamState int
160
161const (
162 active outStreamState = iota
163 empty
164 waitingOnStreamQuota
165)
166
167type outStream struct {
168 id uint32
169 state outStreamState
170 itl *itemList
171 bytesOutStanding int
172 wq *writeQuota
173
174 next *outStream
175 prev *outStream
176}
177
178func (s *outStream) deleteSelf() {
179 if s.prev != nil {
180 s.prev.next = s.next
181 }
182 if s.next != nil {
183 s.next.prev = s.prev
184 }
185 s.next, s.prev = nil, nil
186}
187
188type outStreamList struct {
189 // Following are sentinel objects that mark the
190 // beginning and end of the list. They do not
191 // contain any item lists. All valid objects are
192 // inserted in between them.
193 // This is needed so that an outStream object can
194 // deleteSelf() in O(1) time without knowing which
195 // list it belongs to.
196 head *outStream
197 tail *outStream
198}
199
200func newOutStreamList() *outStreamList {
201 head, tail := new(outStream), new(outStream)
202 head.next = tail
203 tail.prev = head
204 return &outStreamList{
205 head: head,
206 tail: tail,
207 }
208}
209
210func (l *outStreamList) enqueue(s *outStream) {
211 e := l.tail.prev
212 e.next = s
213 s.prev = e
214 s.next = l.tail
215 l.tail.prev = s
216}
217
218// remove from the beginning of the list.
219func (l *outStreamList) dequeue() *outStream {
220 b := l.head.next
221 if b == l.tail {
222 return nil
223 }
224 b.deleteSelf()
225 return b
226}
227
228// controlBuffer is a way to pass information to loopy.
229// Information is passed as specific struct types called control frames.
230// A control frame not only represents data, messages or headers to be sent out
231// but can also be used to instruct loopy to update its internal state.
232// It shouldn't be confused with an HTTP2 frame, although some of the control frames
233// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
234type controlBuffer struct {
235 ch chan struct{}
236 done <-chan struct{}
237 mu sync.Mutex
238 consumerWaiting bool
239 list *itemList
240 err error
241}
242
243func newControlBuffer(done <-chan struct{}) *controlBuffer {
244 return &controlBuffer{
245 ch: make(chan struct{}, 1),
246 list: &itemList{},
247 done: done,
248 }
249}
250
251func (c *controlBuffer) put(it interface{}) error {
252 _, err := c.executeAndPut(nil, it)
253 return err
254}
255
256func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
257 var wakeUp bool
258 c.mu.Lock()
259 if c.err != nil {
260 c.mu.Unlock()
261 return false, c.err
262 }
263 if f != nil {
264 if !f(it) { // f wasn't successful
265 c.mu.Unlock()
266 return false, nil
267 }
268 }
269 if c.consumerWaiting {
270 wakeUp = true
271 c.consumerWaiting = false
272 }
273 c.list.enqueue(it)
274 c.mu.Unlock()
275 if wakeUp {
276 select {
277 case c.ch <- struct{}{}:
278 default:
279 }
280 }
281 return true, nil
282}
283
284// Note argument f should never be nil.
285func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
286 c.mu.Lock()
287 if c.err != nil {
288 c.mu.Unlock()
289 return false, c.err
290 }
291 if !f(it) { // f wasn't successful
292 c.mu.Unlock()
293 return false, nil
294 }
295 c.mu.Unlock()
296 return true, nil
297}
298
299func (c *controlBuffer) get(block bool) (interface{}, error) {
300 for {
301 c.mu.Lock()
302 if c.err != nil {
303 c.mu.Unlock()
304 return nil, c.err
305 }
306 if !c.list.isEmpty() {
307 h := c.list.dequeue()
308 c.mu.Unlock()
309 return h, nil
310 }
311 if !block {
312 c.mu.Unlock()
313 return nil, nil
314 }
315 c.consumerWaiting = true
316 c.mu.Unlock()
317 select {
318 case <-c.ch:
319 case <-c.done:
320 c.finish()
321 return nil, ErrConnClosing
322 }
323 }
324}
325
326func (c *controlBuffer) finish() {
327 c.mu.Lock()
328 if c.err != nil {
329 c.mu.Unlock()
330 return
331 }
332 c.err = ErrConnClosing
333 // There may be headers for streams in the control buffer.
334 // These streams need to be cleaned out since the transport
335 // is still not aware of these yet.
336 for head := c.list.dequeueAll(); head != nil; head = head.next {
337 hdr, ok := head.it.(*headerFrame)
338 if !ok {
339 continue
340 }
341 if hdr.onOrphaned != nil { // It will be nil on the server-side.
342 hdr.onOrphaned(ErrConnClosing)
343 }
344 }
345 c.mu.Unlock()
346}
347
348type side int
349
350const (
351 clientSide side = iota
352 serverSide
353)
354
355// Loopy receives frames from the control buffer.
356// Each frame is handled individually; most of the work done by loopy goes
357// into handling data frames. Loopy maintains a queue of active streams, and each
358// stream maintains a queue of data frames; as loopy receives data frames
359// it gets added to the queue of the relevant stream.
360// Loopy goes over this list of active streams by processing one node every iteration,
361// thereby closely resemebling to a round-robin scheduling over all streams. While
362// processing a stream, loopy writes out data bytes from this stream capped by the min
363// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
364type loopyWriter struct {
365 side side
366 cbuf *controlBuffer
367 sendQuota uint32
368 oiws uint32 // outbound initial window size.
369 // estdStreams is map of all established streams that are not cleaned-up yet.
370 // On client-side, this is all streams whose headers were sent out.
371 // On server-side, this is all streams whose headers were received.
372 estdStreams map[uint32]*outStream // Established streams.
373 // activeStreams is a linked-list of all streams that have data to send and some
374 // stream-level flow control quota.
375 // Each of these streams internally have a list of data items(and perhaps trailers
376 // on the server-side) to be sent out.
377 activeStreams *outStreamList
378 framer *framer
379 hBuf *bytes.Buffer // The buffer for HPACK encoding.
380 hEnc *hpack.Encoder // HPACK encoder.
381 bdpEst *bdpEstimator
382 draining bool
383
384 // Side-specific handlers
385 ssGoAwayHandler func(*goAway) (bool, error)
386}
387
388func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
389 var buf bytes.Buffer
390 l := &loopyWriter{
391 side: s,
392 cbuf: cbuf,
393 sendQuota: defaultWindowSize,
394 oiws: defaultWindowSize,
395 estdStreams: make(map[uint32]*outStream),
396 activeStreams: newOutStreamList(),
397 framer: fr,
398 hBuf: &buf,
399 hEnc: hpack.NewEncoder(&buf),
400 bdpEst: bdpEst,
401 }
402 return l
403}
404
405const minBatchSize = 1000
406
407// run should be run in a separate goroutine.
408// It reads control frames from controlBuf and processes them by:
409// 1. Updating loopy's internal state, or/and
410// 2. Writing out HTTP2 frames on the wire.
411//
412// Loopy keeps all active streams with data to send in a linked-list.
413// All streams in the activeStreams linked-list must have both:
414// 1. Data to send, and
415// 2. Stream level flow control quota available.
416//
417// In each iteration of run loop, other than processing the incoming control
418// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
419// This results in writing of HTTP2 frames into an underlying write buffer.
420// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
421// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
422// if the batch size is too low to give stream goroutines a chance to fill it up.
423func (l *loopyWriter) run() (err error) {
424 defer func() {
425 if err == ErrConnClosing {
426 // Don't log ErrConnClosing as error since it happens
427 // 1. When the connection is closed by some other known issue.
428 // 2. User closed the connection.
429 // 3. A graceful close of connection.
430 infof("transport: loopyWriter.run returning. %v", err)
431 err = nil
432 }
433 }()
434 for {
435 it, err := l.cbuf.get(true)
436 if err != nil {
437 return err
438 }
439 if err = l.handle(it); err != nil {
440 return err
441 }
442 if _, err = l.processData(); err != nil {
443 return err
444 }
445 gosched := true
446 hasdata:
447 for {
448 it, err := l.cbuf.get(false)
449 if err != nil {
450 return err
451 }
452 if it != nil {
453 if err = l.handle(it); err != nil {
454 return err
455 }
456 if _, err = l.processData(); err != nil {
457 return err
458 }
459 continue hasdata
460 }
461 isEmpty, err := l.processData()
462 if err != nil {
463 return err
464 }
465 if !isEmpty {
466 continue hasdata
467 }
468 if gosched {
469 gosched = false
470 if l.framer.writer.offset < minBatchSize {
471 runtime.Gosched()
472 continue hasdata
473 }
474 }
475 l.framer.writer.Flush()
476 break hasdata
477
478 }
479 }
480}
481
482func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
483 return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
484}
485
486func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
487 // Otherwise update the quota.
488 if w.streamID == 0 {
489 l.sendQuota += w.increment
490 return nil
491 }
492 // Find the stream and update it.
493 if str, ok := l.estdStreams[w.streamID]; ok {
494 str.bytesOutStanding -= int(w.increment)
495 if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
496 str.state = active
497 l.activeStreams.enqueue(str)
498 return nil
499 }
500 }
501 return nil
502}
503
504func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
505 return l.framer.fr.WriteSettings(s.ss...)
506}
507
508func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
509 if err := l.applySettings(s.ss); err != nil {
510 return err
511 }
512 return l.framer.fr.WriteSettingsAck()
513}
514
515func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
516 str := &outStream{
517 id: h.streamID,
518 state: empty,
519 itl: &itemList{},
520 wq: h.wq,
521 }
522 l.estdStreams[h.streamID] = str
523 return nil
524}
525
526func (l *loopyWriter) headerHandler(h *headerFrame) error {
527 if l.side == serverSide {
528 str, ok := l.estdStreams[h.streamID]
529 if !ok {
530 warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
531 return nil
532 }
533 // Case 1.A: Server is responding back with headers.
534 if !h.endStream {
535 return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
536 }
537 // else: Case 1.B: Server wants to close stream.
538
539 if str.state != empty { // either active or waiting on stream quota.
540 // add it str's list of items.
541 str.itl.enqueue(h)
542 return nil
543 }
544 if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
545 return err
546 }
547 return l.cleanupStreamHandler(h.cleanup)
548 }
549 // Case 2: Client wants to originate stream.
550 str := &outStream{
551 id: h.streamID,
552 state: empty,
553 itl: &itemList{},
554 wq: h.wq,
555 }
556 str.itl.enqueue(h)
557 return l.originateStream(str)
558}
559
560func (l *loopyWriter) originateStream(str *outStream) error {
561 hdr := str.itl.dequeue().(*headerFrame)
562 sendPing, err := hdr.initStream(str.id)
563 if err != nil {
564 if err == ErrConnClosing {
565 return err
566 }
567 // Other errors(errStreamDrain) need not close transport.
568 return nil
569 }
570 if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
571 return err
572 }
573 l.estdStreams[str.id] = str
574 if sendPing {
575 return l.pingHandler(&ping{data: [8]byte{}})
576 }
577 return nil
578}
579
580func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
581 if onWrite != nil {
582 onWrite()
583 }
584 l.hBuf.Reset()
585 for _, f := range hf {
586 if err := l.hEnc.WriteField(f); err != nil {
587 warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
588 }
589 }
590 var (
591 err error
592 endHeaders, first bool
593 )
594 first = true
595 for !endHeaders {
596 size := l.hBuf.Len()
597 if size > http2MaxFrameLen {
598 size = http2MaxFrameLen
599 } else {
600 endHeaders = true
601 }
602 if first {
603 first = false
604 err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
605 StreamID: streamID,
606 BlockFragment: l.hBuf.Next(size),
607 EndStream: endStream,
608 EndHeaders: endHeaders,
609 })
610 } else {
611 err = l.framer.fr.WriteContinuation(
612 streamID,
613 endHeaders,
614 l.hBuf.Next(size),
615 )
616 }
617 if err != nil {
618 return err
619 }
620 }
621 return nil
622}
623
624func (l *loopyWriter) preprocessData(df *dataFrame) error {
625 str, ok := l.estdStreams[df.streamID]
626 if !ok {
627 return nil
628 }
629 // If we got data for a stream it means that
630 // stream was originated and the headers were sent out.
631 str.itl.enqueue(df)
632 if str.state == empty {
633 str.state = active
634 l.activeStreams.enqueue(str)
635 }
636 return nil
637}
638
639func (l *loopyWriter) pingHandler(p *ping) error {
640 if !p.ack {
641 l.bdpEst.timesnap(p.data)
642 }
643 return l.framer.fr.WritePing(p.ack, p.data)
644
645}
646
647func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
648 o.resp <- l.sendQuota
649 return nil
650}
651
652func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
653 c.onWrite()
654 if str, ok := l.estdStreams[c.streamID]; ok {
655 // On the server side it could be a trailers-only response or
656 // a RST_STREAM before stream initialization thus the stream might
657 // not be established yet.
658 delete(l.estdStreams, c.streamID)
659 str.deleteSelf()
660 }
661 if c.rst { // If RST_STREAM needs to be sent.
662 if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
663 return err
664 }
665 }
666 if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
667 return ErrConnClosing
668 }
669 return nil
670}
671
672func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
673 if l.side == clientSide {
674 l.draining = true
675 if len(l.estdStreams) == 0 {
676 return ErrConnClosing
677 }
678 }
679 return nil
680}
681
682func (l *loopyWriter) goAwayHandler(g *goAway) error {
683 // Handling of outgoing GoAway is very specific to side.
684 if l.ssGoAwayHandler != nil {
685 draining, err := l.ssGoAwayHandler(g)
686 if err != nil {
687 return err
688 }
689 l.draining = draining
690 }
691 return nil
692}
693
694func (l *loopyWriter) handle(i interface{}) error {
695 switch i := i.(type) {
696 case *incomingWindowUpdate:
697 return l.incomingWindowUpdateHandler(i)
698 case *outgoingWindowUpdate:
699 return l.outgoingWindowUpdateHandler(i)
700 case *incomingSettings:
701 return l.incomingSettingsHandler(i)
702 case *outgoingSettings:
703 return l.outgoingSettingsHandler(i)
704 case *headerFrame:
705 return l.headerHandler(i)
706 case *registerStream:
707 return l.registerStreamHandler(i)
708 case *cleanupStream:
709 return l.cleanupStreamHandler(i)
710 case *incomingGoAway:
711 return l.incomingGoAwayHandler(i)
712 case *dataFrame:
713 return l.preprocessData(i)
714 case *ping:
715 return l.pingHandler(i)
716 case *goAway:
717 return l.goAwayHandler(i)
718 case *outFlowControlSizeRequest:
719 return l.outFlowControlSizeRequestHandler(i)
720 default:
721 return fmt.Errorf("transport: unknown control message type %T", i)
722 }
723}
724
725func (l *loopyWriter) applySettings(ss []http2.Setting) error {
726 for _, s := range ss {
727 switch s.ID {
728 case http2.SettingInitialWindowSize:
729 o := l.oiws
730 l.oiws = s.Val
731 if o < l.oiws {
732 // If the new limit is greater make all depleted streams active.
733 for _, stream := range l.estdStreams {
734 if stream.state == waitingOnStreamQuota {
735 stream.state = active
736 l.activeStreams.enqueue(stream)
737 }
738 }
739 }
740 case http2.SettingHeaderTableSize:
741 updateHeaderTblSize(l.hEnc, s.Val)
742 }
743 }
744 return nil
745}
746
747// processData removes the first stream from active streams, writes out at most 16KB
748// of its data and then puts it at the end of activeStreams if there's still more data
749// to be sent and stream has some stream-level flow control.
750func (l *loopyWriter) processData() (bool, error) {
751 if l.sendQuota == 0 {
752 return true, nil
753 }
754 str := l.activeStreams.dequeue() // Remove the first stream.
755 if str == nil {
756 return true, nil
757 }
758 dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
759 // A data item is represented by a dataFrame, since it later translates into
760 // multiple HTTP2 data frames.
761 // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
762 // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
763 // maximum possilbe HTTP2 frame size.
764
765 if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
766 // Client sends out empty data frame with endStream = true
767 if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
768 return false, err
769 }
770 str.itl.dequeue() // remove the empty data item from stream
771 if str.itl.isEmpty() {
772 str.state = empty
773 } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
774 if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
775 return false, err
776 }
777 if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
778 return false, nil
779 }
780 } else {
781 l.activeStreams.enqueue(str)
782 }
783 return false, nil
784 }
785 var (
786 idx int
787 buf []byte
788 )
789 if len(dataItem.h) != 0 { // data header has not been written out yet.
790 buf = dataItem.h
791 } else {
792 idx = 1
793 buf = dataItem.d
794 }
795 size := http2MaxFrameLen
796 if len(buf) < size {
797 size = len(buf)
798 }
799 if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
800 str.state = waitingOnStreamQuota
801 return false, nil
802 } else if strQuota < size {
803 size = strQuota
804 }
805
806 if l.sendQuota < uint32(size) { // connection-level flow control.
807 size = int(l.sendQuota)
808 }
809 // Now that outgoing flow controls are checked we can replenish str's write quota
810 str.wq.replenish(size)
811 var endStream bool
812 // If this is the last data message on this stream and all of it can be written in this iteration.
813 if dataItem.endStream && size == len(buf) {
814 // buf contains either data or it contains header but data is empty.
815 if idx == 1 || len(dataItem.d) == 0 {
816 endStream = true
817 }
818 }
819 if dataItem.onEachWrite != nil {
820 dataItem.onEachWrite()
821 }
822 if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
823 return false, err
824 }
825 buf = buf[size:]
826 str.bytesOutStanding += size
827 l.sendQuota -= uint32(size)
828 if idx == 0 {
829 dataItem.h = buf
830 } else {
831 dataItem.d = buf
832 }
833
834 if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
835 str.itl.dequeue()
836 }
837 if str.itl.isEmpty() {
838 str.state = empty
839 } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
840 if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
841 return false, err
842 }
843 if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
844 return false, err
845 }
846 } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
847 str.state = waitingOnStreamQuota
848 } else { // Otherwise add it back to the list of active streams.
849 l.activeStreams.enqueue(str)
850 }
851 return false, nil
852}
diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go
new file mode 100644
index 0000000..9fa306b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go
@@ -0,0 +1,49 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package transport
20
21import (
22 "math"
23 "time"
24)
25
26const (
27 // The default value of flow control window size in HTTP2 spec.
28 defaultWindowSize = 65535
29 // The initial window size for flow control.
30 initialWindowSize = defaultWindowSize // for an RPC
31 infinity = time.Duration(math.MaxInt64)
32 defaultClientKeepaliveTime = infinity
33 defaultClientKeepaliveTimeout = 20 * time.Second
34 defaultMaxStreamsClient = 100
35 defaultMaxConnectionIdle = infinity
36 defaultMaxConnectionAge = infinity
37 defaultMaxConnectionAgeGrace = infinity
38 defaultServerKeepaliveTime = 2 * time.Hour
39 defaultServerKeepaliveTimeout = 20 * time.Second
40 defaultKeepalivePolicyMinTime = 5 * time.Minute
41 // max window limit set by HTTP2 Specs.
42 maxWindowSize = math.MaxInt32
43 // defaultWriteQuota is the default value for number of data
44 // bytes that each stream can schedule before some of it being
45 // flushed out.
46 defaultWriteQuota = 64 * 1024
47 defaultClientMaxHeaderListSize = uint32(16 << 20)
48 defaultServerMaxHeaderListSize = uint32(16 << 20)
49)
diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index 501eb03..5ea997a 100644
--- a/vendor/google.golang.org/grpc/transport/control.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -22,128 +22,102 @@ import (
22 "fmt" 22 "fmt"
23 "math" 23 "math"
24 "sync" 24 "sync"
25 "time" 25 "sync/atomic"
26
27 "golang.org/x/net/http2"
28)
29
30const (
31 // The default value of flow control window size in HTTP2 spec.
32 defaultWindowSize = 65535
33 // The initial window size for flow control.
34 initialWindowSize = defaultWindowSize // for an RPC
35 infinity = time.Duration(math.MaxInt64)
36 defaultClientKeepaliveTime = infinity
37 defaultClientKeepaliveTimeout = time.Duration(20 * time.Second)
38 defaultMaxStreamsClient = 100
39 defaultMaxConnectionIdle = infinity
40 defaultMaxConnectionAge = infinity
41 defaultMaxConnectionAgeGrace = infinity
42 defaultServerKeepaliveTime = time.Duration(2 * time.Hour)
43 defaultServerKeepaliveTimeout = time.Duration(20 * time.Second)
44 defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute)
45 // max window limit set by HTTP2 Specs.
46 maxWindowSize = math.MaxInt32
47) 26)
48 27
49// The following defines various control items which could flow through 28// writeQuota is a soft limit on the amount of data a stream can
50// the control buffer of transport. They represent different aspects of 29// schedule before some of it is written out.
51// control tasks, e.g., flow control, settings, streaming resetting, etc. 30type writeQuota struct {
52type windowUpdate struct { 31 quota int32
53 streamID uint32 32 // get waits on read from when quota goes less than or equal to zero.
54 increment uint32 33 // replenish writes on it when quota goes positive again.
55 flush bool 34 ch chan struct{}
56} 35 // done is triggered in error case.
57 36 done <-chan struct{}
58func (*windowUpdate) item() {} 37 // replenish is called by loopyWriter to give quota back to.
59 38 // It is implemented as a field so that it can be updated
60type settings struct { 39 // by tests.
61 ack bool 40 replenish func(n int)
62 ss []http2.Setting 41}
42
43func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
44 w := &writeQuota{
45 quota: sz,
46 ch: make(chan struct{}, 1),
47 done: done,
48 }
49 w.replenish = w.realReplenish
50 return w
63} 51}
64 52
65func (*settings) item() {} 53func (w *writeQuota) get(sz int32) error {
66 54 for {
67type resetStream struct { 55 if atomic.LoadInt32(&w.quota) > 0 {
68 streamID uint32 56 atomic.AddInt32(&w.quota, -sz)
69 code http2.ErrCode 57 return nil
58 }
59 select {
60 case <-w.ch:
61 continue
62 case <-w.done:
63 return errStreamDone
64 }
65 }
70} 66}
71 67
72func (*resetStream) item() {} 68func (w *writeQuota) realReplenish(n int) {
73 69 sz := int32(n)
74type goAway struct { 70 a := atomic.AddInt32(&w.quota, sz)
75 code http2.ErrCode 71 b := a - sz
76 debugData []byte 72 if b <= 0 && a > 0 {
77 headsUp bool 73 select {
78 closeConn bool 74 case w.ch <- struct{}{}:
75 default:
76 }
77 }
79} 78}
80 79
81func (*goAway) item() {} 80type trInFlow struct {
82 81 limit uint32
83type flushIO struct { 82 unacked uint32
83 effectiveWindowSize uint32
84} 84}
85 85
86func (*flushIO) item() {} 86func (f *trInFlow) newLimit(n uint32) uint32 {
87 87 d := n - f.limit
88type ping struct { 88 f.limit = n
89 ack bool 89 f.updateEffectiveWindowSize()
90 data [8]byte 90 return d
91} 91}
92 92
93func (*ping) item() {} 93func (f *trInFlow) onData(n uint32) uint32 {
94 94 f.unacked += n
95// quotaPool is a pool which accumulates the quota and sends it to acquire() 95 if f.unacked >= f.limit/4 {
96// when it is available. 96 w := f.unacked
97type quotaPool struct { 97 f.unacked = 0
98 c chan int 98 f.updateEffectiveWindowSize()
99 99 return w
100 mu sync.Mutex 100 }
101 quota int 101 f.updateEffectiveWindowSize()
102 return 0
102} 103}
103 104
104// newQuotaPool creates a quotaPool which has quota q available to consume. 105func (f *trInFlow) reset() uint32 {
105func newQuotaPool(q int) *quotaPool { 106 w := f.unacked
106 qb := &quotaPool{ 107 f.unacked = 0
107 c: make(chan int, 1), 108 f.updateEffectiveWindowSize()
108 } 109 return w
109 if q > 0 {
110 qb.c <- q
111 } else {
112 qb.quota = q
113 }
114 return qb
115} 110}
116 111
117// add cancels the pending quota sent on acquired, incremented by v and sends 112func (f *trInFlow) updateEffectiveWindowSize() {
118// it back on acquire. 113 atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
119func (qb *quotaPool) add(v int) {
120 qb.mu.Lock()
121 defer qb.mu.Unlock()
122 select {
123 case n := <-qb.c:
124 qb.quota += n
125 default:
126 }
127 qb.quota += v
128 if qb.quota <= 0 {
129 return
130 }
131 // After the pool has been created, this is the only place that sends on
132 // the channel. Since mu is held at this point and any quota that was sent
133 // on the channel has been retrieved, we know that this code will always
134 // place any positive quota value on the channel.
135 select {
136 case qb.c <- qb.quota:
137 qb.quota = 0
138 default:
139 }
140} 114}
141 115
142// acquire returns the channel on which available quota amounts are sent. 116func (f *trInFlow) getSize() uint32 {
143func (qb *quotaPool) acquire() <-chan int { 117 return atomic.LoadUint32(&f.effectiveWindowSize)
144 return qb.c
145} 118}
146 119
120// TODO(mmukhi): Simplify this code.
147// inFlow deals with inbound flow control 121// inFlow deals with inbound flow control
148type inFlow struct { 122type inFlow struct {
149 mu sync.Mutex 123 mu sync.Mutex
@@ -164,9 +138,9 @@ type inFlow struct {
164// It assumes that n is always greater than the old limit. 138// It assumes that n is always greater than the old limit.
165func (f *inFlow) newLimit(n uint32) uint32 { 139func (f *inFlow) newLimit(n uint32) uint32 {
166 f.mu.Lock() 140 f.mu.Lock()
167 defer f.mu.Unlock()
168 d := n - f.limit 141 d := n - f.limit
169 f.limit = n 142 f.limit = n
143 f.mu.Unlock()
170 return d 144 return d
171} 145}
172 146
@@ -175,7 +149,6 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
175 n = uint32(math.MaxInt32) 149 n = uint32(math.MaxInt32)
176 } 150 }
177 f.mu.Lock() 151 f.mu.Lock()
178 defer f.mu.Unlock()
179 // estSenderQuota is the receiver's view of the maximum number of bytes the sender 152 // estSenderQuota is the receiver's view of the maximum number of bytes the sender
180 // can send without a window update. 153 // can send without a window update.
181 estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) 154 estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
@@ -187,7 +160,7 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
187 // for this message. Therefore we must send an update over the limit since there's an active read 160 // for this message. Therefore we must send an update over the limit since there's an active read
188 // request from the application. 161 // request from the application.
189 if estUntransmittedData > estSenderQuota { 162 if estUntransmittedData > estSenderQuota {
190 // Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec. 163 // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
191 if f.limit+n > maxWindowSize { 164 if f.limit+n > maxWindowSize {
192 f.delta = maxWindowSize - f.limit 165 f.delta = maxWindowSize - f.limit
193 } else { 166 } else {
@@ -196,19 +169,24 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
196 // is padded; We will fallback on the current available window(at least a 1/4th of the limit). 169 // is padded; We will fallback on the current available window(at least a 1/4th of the limit).
197 f.delta = n 170 f.delta = n
198 } 171 }
172 f.mu.Unlock()
199 return f.delta 173 return f.delta
200 } 174 }
175 f.mu.Unlock()
201 return 0 176 return 0
202} 177}
203 178
204// onData is invoked when some data frame is received. It updates pendingData. 179// onData is invoked when some data frame is received. It updates pendingData.
205func (f *inFlow) onData(n uint32) error { 180func (f *inFlow) onData(n uint32) error {
206 f.mu.Lock() 181 f.mu.Lock()
207 defer f.mu.Unlock()
208 f.pendingData += n 182 f.pendingData += n
209 if f.pendingData+f.pendingUpdate > f.limit+f.delta { 183 if f.pendingData+f.pendingUpdate > f.limit+f.delta {
210 return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) 184 limit := f.limit
185 rcvd := f.pendingData + f.pendingUpdate
186 f.mu.Unlock()
187 return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
211 } 188 }
189 f.mu.Unlock()
212 return nil 190 return nil
213} 191}
214 192
@@ -216,8 +194,8 @@ func (f *inFlow) onData(n uint32) error {
216// to be sent to the peer. 194// to be sent to the peer.
217func (f *inFlow) onRead(n uint32) uint32 { 195func (f *inFlow) onRead(n uint32) uint32 {
218 f.mu.Lock() 196 f.mu.Lock()
219 defer f.mu.Unlock()
220 if f.pendingData == 0 { 197 if f.pendingData == 0 {
198 f.mu.Unlock()
221 return 0 199 return 0
222 } 200 }
223 f.pendingData -= n 201 f.pendingData -= n
@@ -232,15 +210,9 @@ func (f *inFlow) onRead(n uint32) uint32 {
232 if f.pendingUpdate >= f.limit/4 { 210 if f.pendingUpdate >= f.limit/4 {
233 wu := f.pendingUpdate 211 wu := f.pendingUpdate
234 f.pendingUpdate = 0 212 f.pendingUpdate = 0
213 f.mu.Unlock()
235 return wu 214 return wu
236 } 215 }
216 f.mu.Unlock()
237 return 0 217 return 0
238} 218}
239
240func (f *inFlow) resetPendingUpdate() uint32 {
241 f.mu.Lock()
242 defer f.mu.Unlock()
243 n := f.pendingUpdate
244 f.pendingUpdate = 0
245 return n
246}
diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 27372b5..73b41ea 100644
--- a/vendor/google.golang.org/grpc/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -24,6 +24,7 @@
24package transport 24package transport
25 25
26import ( 26import (
27 "context"
27 "errors" 28 "errors"
28 "fmt" 29 "fmt"
29 "io" 30 "io"
@@ -33,26 +34,30 @@ import (
33 "sync" 34 "sync"
34 "time" 35 "time"
35 36
36 "golang.org/x/net/context" 37 "github.com/golang/protobuf/proto"
37 "golang.org/x/net/http2" 38 "golang.org/x/net/http2"
38 "google.golang.org/grpc/codes" 39 "google.golang.org/grpc/codes"
39 "google.golang.org/grpc/credentials" 40 "google.golang.org/grpc/credentials"
40 "google.golang.org/grpc/metadata" 41 "google.golang.org/grpc/metadata"
41 "google.golang.org/grpc/peer" 42 "google.golang.org/grpc/peer"
43 "google.golang.org/grpc/stats"
42 "google.golang.org/grpc/status" 44 "google.golang.org/grpc/status"
43) 45)
44 46
45// NewServerHandlerTransport returns a ServerTransport handling gRPC 47// NewServerHandlerTransport returns a ServerTransport handling gRPC
46// from inside an http.Handler. It requires that the http Server 48// from inside an http.Handler. It requires that the http Server
47// supports HTTP/2. 49// supports HTTP/2.
48func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { 50func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
49 if r.ProtoMajor != 2 { 51 if r.ProtoMajor != 2 {
50 return nil, errors.New("gRPC requires HTTP/2") 52 return nil, errors.New("gRPC requires HTTP/2")
51 } 53 }
52 if r.Method != "POST" { 54 if r.Method != "POST" {
53 return nil, errors.New("invalid gRPC request method") 55 return nil, errors.New("invalid gRPC request method")
54 } 56 }
55 if !validContentType(r.Header.Get("Content-Type")) { 57 contentType := r.Header.Get("Content-Type")
58 // TODO: do we assume contentType is lowercase? we did before
59 contentSubtype, validContentType := contentSubtype(contentType)
60 if !validContentType {
56 return nil, errors.New("invalid gRPC request content-type") 61 return nil, errors.New("invalid gRPC request content-type")
57 } 62 }
58 if _, ok := w.(http.Flusher); !ok { 63 if _, ok := w.(http.Flusher); !ok {
@@ -63,34 +68,37 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
63 } 68 }
64 69
65 st := &serverHandlerTransport{ 70 st := &serverHandlerTransport{
66 rw: w, 71 rw: w,
67 req: r, 72 req: r,
68 closedCh: make(chan struct{}), 73 closedCh: make(chan struct{}),
69 writes: make(chan func()), 74 writes: make(chan func()),
75 contentType: contentType,
76 contentSubtype: contentSubtype,
77 stats: stats,
70 } 78 }
71 79
72 if v := r.Header.Get("grpc-timeout"); v != "" { 80 if v := r.Header.Get("grpc-timeout"); v != "" {
73 to, err := decodeTimeout(v) 81 to, err := decodeTimeout(v)
74 if err != nil { 82 if err != nil {
75 return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err) 83 return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
76 } 84 }
77 st.timeoutSet = true 85 st.timeoutSet = true
78 st.timeout = to 86 st.timeout = to
79 } 87 }
80 88
81 var metakv []string 89 metakv := []string{"content-type", contentType}
82 if r.Host != "" { 90 if r.Host != "" {
83 metakv = append(metakv, ":authority", r.Host) 91 metakv = append(metakv, ":authority", r.Host)
84 } 92 }
85 for k, vv := range r.Header { 93 for k, vv := range r.Header {
86 k = strings.ToLower(k) 94 k = strings.ToLower(k)
87 if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) { 95 if isReservedHeader(k) && !isWhitelistedHeader(k) {
88 continue 96 continue
89 } 97 }
90 for _, v := range vv { 98 for _, v := range vv {
91 v, err := decodeMetadataHeader(k, v) 99 v, err := decodeMetadataHeader(k, v)
92 if err != nil { 100 if err != nil {
93 return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err) 101 return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
94 } 102 }
95 metakv = append(metakv, k, v) 103 metakv = append(metakv, k, v)
96 } 104 }
@@ -121,6 +129,18 @@ type serverHandlerTransport struct {
121 // ServeHTTP (HandleStreams) goroutine. The channel is closed 129 // ServeHTTP (HandleStreams) goroutine. The channel is closed
122 // when WriteStatus is called. 130 // when WriteStatus is called.
123 writes chan func() 131 writes chan func()
132
133 // block concurrent WriteStatus calls
134 // e.g. grpc/(*serverStream).SendMsg/RecvMsg
135 writeStatusMu sync.Mutex
136
137 // we just mirror the request content-type
138 contentType string
139 // we store both contentType and contentSubtype so we don't keep recreating them
140 // TODO make sure this is consistent across handler_server and http2_server
141 contentSubtype string
142
143 stats stats.Handler
124} 144}
125 145
126func (ht *serverHandlerTransport) Close() error { 146func (ht *serverHandlerTransport) Close() error {
@@ -167,11 +187,13 @@ func (ht *serverHandlerTransport) do(fn func()) error {
167 case <-ht.closedCh: 187 case <-ht.closedCh:
168 return ErrConnClosing 188 return ErrConnClosing
169 } 189 }
170
171 } 190 }
172} 191}
173 192
174func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { 193func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
194 ht.writeStatusMu.Lock()
195 defer ht.writeStatusMu.Unlock()
196
175 err := ht.do(func() { 197 err := ht.do(func() {
176 ht.writeCommonHeaders(s) 198 ht.writeCommonHeaders(s)
177 199
@@ -186,7 +208,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
186 h.Set("Grpc-Message", encodeGrpcMessage(m)) 208 h.Set("Grpc-Message", encodeGrpcMessage(m))
187 } 209 }
188 210
189 // TODO: Support Grpc-Status-Details-Bin 211 if p := st.Proto(); p != nil && len(p.Details) > 0 {
212 stBytes, err := proto.Marshal(p)
213 if err != nil {
214 // TODO: return error instead, when callers are able to handle it.
215 panic(err)
216 }
217
218 h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
219 }
190 220
191 if md := s.Trailer(); len(md) > 0 { 221 if md := s.Trailer(); len(md) > 0 {
192 for k, vv := range md { 222 for k, vv := range md {
@@ -202,7 +232,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
202 } 232 }
203 } 233 }
204 }) 234 })
205 close(ht.writes) 235
236 if err == nil { // transport has not been closed
237 if ht.stats != nil {
238 ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
239 }
240 close(ht.writes)
241 }
242 ht.Close()
206 return err 243 return err
207} 244}
208 245
@@ -216,7 +253,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
216 253
217 h := ht.rw.Header() 254 h := ht.rw.Header()
218 h["Date"] = nil // suppress Date to make tests happy; TODO: restore 255 h["Date"] = nil // suppress Date to make tests happy; TODO: restore
219 h.Set("Content-Type", "application/grpc") 256 h.Set("Content-Type", ht.contentType)
220 257
221 // Predeclare trailers we'll set later in WriteStatus (after the body). 258 // Predeclare trailers we'll set later in WriteStatus (after the body).
222 // This is a SHOULD in the HTTP RFC, and the way you add (known) 259 // This is a SHOULD in the HTTP RFC, and the way you add (known)
@@ -225,25 +262,24 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
225 // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers 262 // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
226 h.Add("Trailer", "Grpc-Status") 263 h.Add("Trailer", "Grpc-Status")
227 h.Add("Trailer", "Grpc-Message") 264 h.Add("Trailer", "Grpc-Message")
228 // TODO: Support Grpc-Status-Details-Bin 265 h.Add("Trailer", "Grpc-Status-Details-Bin")
229 266
230 if s.sendCompress != "" { 267 if s.sendCompress != "" {
231 h.Set("Grpc-Encoding", s.sendCompress) 268 h.Set("Grpc-Encoding", s.sendCompress)
232 } 269 }
233} 270}
234 271
235func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { 272func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
236 return ht.do(func() { 273 return ht.do(func() {
237 ht.writeCommonHeaders(s) 274 ht.writeCommonHeaders(s)
275 ht.rw.Write(hdr)
238 ht.rw.Write(data) 276 ht.rw.Write(data)
239 if !opts.Delay { 277 ht.rw.(http.Flusher).Flush()
240 ht.rw.(http.Flusher).Flush()
241 }
242 }) 278 })
243} 279}
244 280
245func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { 281func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
246 return ht.do(func() { 282 err := ht.do(func() {
247 ht.writeCommonHeaders(s) 283 ht.writeCommonHeaders(s)
248 h := ht.rw.Header() 284 h := ht.rw.Header()
249 for k, vv := range md { 285 for k, vv := range md {
@@ -259,17 +295,24 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
259 ht.rw.WriteHeader(200) 295 ht.rw.WriteHeader(200)
260 ht.rw.(http.Flusher).Flush() 296 ht.rw.(http.Flusher).Flush()
261 }) 297 })
298
299 if err == nil {
300 if ht.stats != nil {
301 ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
302 }
303 }
304 return err
262} 305}
263 306
264func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { 307func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
265 // With this transport type there will be exactly 1 stream: this HTTP request. 308 // With this transport type there will be exactly 1 stream: this HTTP request.
266 309
267 var ctx context.Context 310 ctx := ht.req.Context()
268 var cancel context.CancelFunc 311 var cancel context.CancelFunc
269 if ht.timeoutSet { 312 if ht.timeoutSet {
270 ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) 313 ctx, cancel = context.WithTimeout(ctx, ht.timeout)
271 } else { 314 } else {
272 ctx, cancel = context.WithCancel(context.Background()) 315 ctx, cancel = context.WithCancel(ctx)
273 } 316 }
274 317
275 // requestOver is closed when either the request's context is done 318 // requestOver is closed when either the request's context is done
@@ -283,23 +326,24 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
283 go func() { 326 go func() {
284 select { 327 select {
285 case <-requestOver: 328 case <-requestOver:
286 return
287 case <-ht.closedCh: 329 case <-ht.closedCh:
288 case <-clientGone: 330 case <-clientGone:
289 } 331 }
290 cancel() 332 cancel()
333 ht.Close()
291 }() 334 }()
292 335
293 req := ht.req 336 req := ht.req
294 337
295 s := &Stream{ 338 s := &Stream{
296 id: 0, // irrelevant 339 id: 0, // irrelevant
297 requestRead: func(int) {}, 340 requestRead: func(int) {},
298 cancel: cancel, 341 cancel: cancel,
299 buf: newRecvBuffer(), 342 buf: newRecvBuffer(),
300 st: ht, 343 st: ht,
301 method: req.URL.Path, 344 method: req.URL.Path,
302 recvCompress: req.Header.Get("grpc-encoding"), 345 recvCompress: req.Header.Get("grpc-encoding"),
346 contentSubtype: ht.contentSubtype,
303 } 347 }
304 pr := &peer.Peer{ 348 pr := &peer.Peer{
305 Addr: ht.RemoteAddr(), 349 Addr: ht.RemoteAddr(),
@@ -308,10 +352,18 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
308 pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} 352 pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
309 } 353 }
310 ctx = metadata.NewIncomingContext(ctx, ht.headerMD) 354 ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
311 ctx = peer.NewContext(ctx, pr) 355 s.ctx = peer.NewContext(ctx, pr)
312 s.ctx = newContextWithStream(ctx, s) 356 if ht.stats != nil {
357 s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
358 inHeader := &stats.InHeader{
359 FullMethod: s.method,
360 RemoteAddr: ht.RemoteAddr(),
361 Compression: s.recvCompress,
362 }
363 ht.stats.HandleRPC(s.ctx, inHeader)
364 }
313 s.trReader = &transportReader{ 365 s.trReader = &transportReader{
314 reader: &recvBufferReader{ctx: s.ctx, recv: s.buf}, 366 reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
315 windowHandler: func(int) {}, 367 windowHandler: func(int) {},
316 } 368 }
317 369
@@ -366,6 +418,10 @@ func (ht *serverHandlerTransport) runStream() {
366 } 418 }
367} 419}
368 420
421func (ht *serverHandlerTransport) IncrMsgSent() {}
422
423func (ht *serverHandlerTransport) IncrMsgRecv() {}
424
369func (ht *serverHandlerTransport) Drain() { 425func (ht *serverHandlerTransport) Drain() {
370 panic("Drain() is not implemented") 426 panic("Drain() is not implemented")
371} 427}
@@ -376,18 +432,18 @@ func (ht *serverHandlerTransport) Drain() {
376// * io.EOF 432// * io.EOF
377// * io.ErrUnexpectedEOF 433// * io.ErrUnexpectedEOF
378// * of type transport.ConnectionError 434// * of type transport.ConnectionError
379// * of type transport.StreamError 435// * an error from the status package
380func mapRecvMsgError(err error) error { 436func mapRecvMsgError(err error) error {
381 if err == io.EOF || err == io.ErrUnexpectedEOF { 437 if err == io.EOF || err == io.ErrUnexpectedEOF {
382 return err 438 return err
383 } 439 }
384 if se, ok := err.(http2.StreamError); ok { 440 if se, ok := err.(http2.StreamError); ok {
385 if code, ok := http2ErrConvTab[se.Code]; ok { 441 if code, ok := http2ErrConvTab[se.Code]; ok {
386 return StreamError{ 442 return status.Error(code, se.Error())
387 Code: code,
388 Desc: se.Error(),
389 }
390 } 443 }
391 } 444 }
445 if strings.Contains(err.Error(), "body closed by handler") {
446 return status.Error(codes.Canceled, err.Error())
447 }
392 return connectionErrorf(true, err, err.Error()) 448 return connectionErrorf(true, err, err.Error())
393} 449}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
new file mode 100644
index 0000000..babcaee
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -0,0 +1,1380 @@
1/*
2 *
3 * Copyright 2014 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package transport
20
21import (
22 "context"
23 "fmt"
24 "io"
25 "math"
26 "net"
27 "strconv"
28 "strings"
29 "sync"
30 "sync/atomic"
31 "time"
32
33 "golang.org/x/net/http2"
34 "golang.org/x/net/http2/hpack"
35
36 "google.golang.org/grpc/codes"
37 "google.golang.org/grpc/credentials"
38 "google.golang.org/grpc/internal/channelz"
39 "google.golang.org/grpc/internal/syscall"
40 "google.golang.org/grpc/keepalive"
41 "google.golang.org/grpc/metadata"
42 "google.golang.org/grpc/peer"
43 "google.golang.org/grpc/stats"
44 "google.golang.org/grpc/status"
45)
46
47// http2Client implements the ClientTransport interface with HTTP2.
48type http2Client struct {
49 ctx context.Context
50 cancel context.CancelFunc
51 ctxDone <-chan struct{} // Cache the ctx.Done() chan.
52 userAgent string
53 md interface{}
54 conn net.Conn // underlying communication channel
55 loopy *loopyWriter
56 remoteAddr net.Addr
57 localAddr net.Addr
58 authInfo credentials.AuthInfo // auth info about the connection
59
60 readerDone chan struct{} // sync point to enable testing.
61 writerDone chan struct{} // sync point to enable testing.
62 // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
63 // that the server sent GoAway on this transport.
64 goAway chan struct{}
65 // awakenKeepalive is used to wake up keepalive when after it has gone dormant.
66 awakenKeepalive chan struct{}
67
68 framer *framer
69 // controlBuf delivers all the control related tasks (e.g., window
70 // updates, reset streams, and various settings) to the controller.
71 controlBuf *controlBuffer
72 fc *trInFlow
73 // The scheme used: https if TLS is on, http otherwise.
74 scheme string
75
76 isSecure bool
77
78 perRPCCreds []credentials.PerRPCCredentials
79
80 // Boolean to keep track of reading activity on transport.
81 // 1 is true and 0 is false.
82 activity uint32 // Accessed atomically.
83 kp keepalive.ClientParameters
84 keepaliveEnabled bool
85
86 statsHandler stats.Handler
87
88 initialWindowSize int32
89
90 // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE
91 maxSendHeaderListSize *uint32
92
93 bdpEst *bdpEstimator
94 // onPrefaceReceipt is a callback that client transport calls upon
95 // receiving server preface to signal that a succefull HTTP2
96 // connection was established.
97 onPrefaceReceipt func()
98
99 maxConcurrentStreams uint32
100 streamQuota int64
101 streamsQuotaAvailable chan struct{}
102 waitingStreams uint32
103 nextID uint32
104
105 mu sync.Mutex // guard the following variables
106 state transportState
107 activeStreams map[uint32]*Stream
108 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
109 prevGoAwayID uint32
110 // goAwayReason records the http2.ErrCode and debug data received with the
111 // GoAway frame.
112 goAwayReason GoAwayReason
113
114 // Fields below are for channelz metric collection.
115 channelzID int64 // channelz unique identification number
116 czData *channelzData
117
118 onGoAway func(GoAwayReason)
119 onClose func()
120}
121
122func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
123 if fn != nil {
124 return fn(ctx, addr)
125 }
126 return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
127}
128
129func isTemporary(err error) bool {
130 switch err := err.(type) {
131 case interface {
132 Temporary() bool
133 }:
134 return err.Temporary()
135 case interface {
136 Timeout() bool
137 }:
138 // Timeouts may be resolved upon retry, and are thus treated as
139 // temporary.
140 return err.Timeout()
141 }
142 return true
143}
144
145// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
146// and starts to receive messages on it. Non-nil error returns if construction
147// fails.
148func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
149 scheme := "http"
150 ctx, cancel := context.WithCancel(ctx)
151 defer func() {
152 if err != nil {
153 cancel()
154 }
155 }()
156
157 conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
158 if err != nil {
159 if opts.FailOnNonTempDialError {
160 return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
161 }
162 return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
163 }
164 // Any further errors will close the underlying connection
165 defer func(conn net.Conn) {
166 if err != nil {
167 conn.Close()
168 }
169 }(conn)
170 kp := opts.KeepaliveParams
171 // Validate keepalive parameters.
172 if kp.Time == 0 {
173 kp.Time = defaultClientKeepaliveTime
174 }
175 if kp.Timeout == 0 {
176 kp.Timeout = defaultClientKeepaliveTimeout
177 }
178 keepaliveEnabled := false
179 if kp.Time != infinity {
180 if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
181 return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
182 }
183 keepaliveEnabled = true
184 }
185 var (
186 isSecure bool
187 authInfo credentials.AuthInfo
188 )
189 transportCreds := opts.TransportCredentials
190 perRPCCreds := opts.PerRPCCredentials
191
192 if b := opts.CredsBundle; b != nil {
193 if t := b.TransportCredentials(); t != nil {
194 transportCreds = t
195 }
196 if t := b.PerRPCCredentials(); t != nil {
197 perRPCCreds = append(perRPCCreds, t)
198 }
199 }
200 if transportCreds != nil {
201 scheme = "https"
202 conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
203 if err != nil {
204 return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
205 }
206 isSecure = true
207 }
208 dynamicWindow := true
209 icwz := int32(initialWindowSize)
210 if opts.InitialConnWindowSize >= defaultWindowSize {
211 icwz = opts.InitialConnWindowSize
212 dynamicWindow = false
213 }
214 writeBufSize := opts.WriteBufferSize
215 readBufSize := opts.ReadBufferSize
216 maxHeaderListSize := defaultClientMaxHeaderListSize
217 if opts.MaxHeaderListSize != nil {
218 maxHeaderListSize = *opts.MaxHeaderListSize
219 }
220 t := &http2Client{
221 ctx: ctx,
222 ctxDone: ctx.Done(), // Cache Done chan.
223 cancel: cancel,
224 userAgent: opts.UserAgent,
225 md: addr.Metadata,
226 conn: conn,
227 remoteAddr: conn.RemoteAddr(),
228 localAddr: conn.LocalAddr(),
229 authInfo: authInfo,
230 readerDone: make(chan struct{}),
231 writerDone: make(chan struct{}),
232 goAway: make(chan struct{}),
233 awakenKeepalive: make(chan struct{}, 1),
234 framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
235 fc: &trInFlow{limit: uint32(icwz)},
236 scheme: scheme,
237 activeStreams: make(map[uint32]*Stream),
238 isSecure: isSecure,
239 perRPCCreds: perRPCCreds,
240 kp: kp,
241 statsHandler: opts.StatsHandler,
242 initialWindowSize: initialWindowSize,
243 onPrefaceReceipt: onPrefaceReceipt,
244 nextID: 1,
245 maxConcurrentStreams: defaultMaxStreamsClient,
246 streamQuota: defaultMaxStreamsClient,
247 streamsQuotaAvailable: make(chan struct{}, 1),
248 czData: new(channelzData),
249 onGoAway: onGoAway,
250 onClose: onClose,
251 keepaliveEnabled: keepaliveEnabled,
252 }
253 t.controlBuf = newControlBuffer(t.ctxDone)
254 if opts.InitialWindowSize >= defaultWindowSize {
255 t.initialWindowSize = opts.InitialWindowSize
256 dynamicWindow = false
257 }
258 if dynamicWindow {
259 t.bdpEst = &bdpEstimator{
260 bdp: initialWindowSize,
261 updateFlowControl: t.updateFlowControl,
262 }
263 }
264 // Make sure awakenKeepalive can't be written upon.
265 // keepalive routine will make it writable, if need be.
266 t.awakenKeepalive <- struct{}{}
267 if t.statsHandler != nil {
268 t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
269 RemoteAddr: t.remoteAddr,
270 LocalAddr: t.localAddr,
271 })
272 connBegin := &stats.ConnBegin{
273 Client: true,
274 }
275 t.statsHandler.HandleConn(t.ctx, connBegin)
276 }
277 if channelz.IsOn() {
278 t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
279 }
280 if t.keepaliveEnabled {
281 go t.keepalive()
282 }
283 // Start the reader goroutine for incoming message. Each transport has
284 // a dedicated goroutine which reads HTTP2 frame from network. Then it
285 // dispatches the frame to the corresponding stream entity.
286 go t.reader()
287
288 // Send connection preface to server.
289 n, err := t.conn.Write(clientPreface)
290 if err != nil {
291 t.Close()
292 return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
293 }
294 if n != len(clientPreface) {
295 t.Close()
296 return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
297 }
298 var ss []http2.Setting
299
300 if t.initialWindowSize != defaultWindowSize {
301 ss = append(ss, http2.Setting{
302 ID: http2.SettingInitialWindowSize,
303 Val: uint32(t.initialWindowSize),
304 })
305 }
306 if opts.MaxHeaderListSize != nil {
307 ss = append(ss, http2.Setting{
308 ID: http2.SettingMaxHeaderListSize,
309 Val: *opts.MaxHeaderListSize,
310 })
311 }
312 err = t.framer.fr.WriteSettings(ss...)
313 if err != nil {
314 t.Close()
315 return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
316 }
317 // Adjust the connection flow control window if needed.
318 if delta := uint32(icwz - defaultWindowSize); delta > 0 {
319 if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
320 t.Close()
321 return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
322 }
323 }
324
325 t.framer.writer.Flush()
326 go func() {
327 t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
328 err := t.loopy.run()
329 if err != nil {
330 errorf("transport: loopyWriter.run returning. Err: %v", err)
331 }
332 // If it's a connection error, let reader goroutine handle it
333 // since there might be data in the buffers.
334 if _, ok := err.(net.Error); !ok {
335 t.conn.Close()
336 }
337 close(t.writerDone)
338 }()
339 return t, nil
340}
341
342func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
343 // TODO(zhaoq): Handle uint32 overflow of Stream.id.
344 s := &Stream{
345 done: make(chan struct{}),
346 method: callHdr.Method,
347 sendCompress: callHdr.SendCompress,
348 buf: newRecvBuffer(),
349 headerChan: make(chan struct{}),
350 contentSubtype: callHdr.ContentSubtype,
351 }
352 s.wq = newWriteQuota(defaultWriteQuota, s.done)
353 s.requestRead = func(n int) {
354 t.adjustWindow(s, uint32(n))
355 }
356 // The client side stream context should have exactly the same life cycle with the user provided context.
357 // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
358 // So we use the original context here instead of creating a copy.
359 s.ctx = ctx
360 s.trReader = &transportReader{
361 reader: &recvBufferReader{
362 ctx: s.ctx,
363 ctxDone: s.ctx.Done(),
364 recv: s.buf,
365 closeStream: func(err error) {
366 t.CloseStream(s, err)
367 },
368 },
369 windowHandler: func(n int) {
370 t.updateWindow(s, uint32(n))
371 },
372 }
373 return s
374}
375
376func (t *http2Client) getPeer() *peer.Peer {
377 pr := &peer.Peer{
378 Addr: t.remoteAddr,
379 }
380 // Attach Auth info if there is any.
381 if t.authInfo != nil {
382 pr.AuthInfo = t.authInfo
383 }
384 return pr
385}
386
387func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
388 aud := t.createAudience(callHdr)
389 authData, err := t.getTrAuthData(ctx, aud)
390 if err != nil {
391 return nil, err
392 }
393 callAuthData, err := t.getCallAuthData(ctx, aud, callHdr)
394 if err != nil {
395 return nil, err
396 }
397 // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
398 // first and create a slice of that exact size.
399 // Make the slice of certain predictable size to reduce allocations made by append.
400 hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
401 hfLen += len(authData) + len(callAuthData)
402 headerFields := make([]hpack.HeaderField, 0, hfLen)
403 headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
404 headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
405 headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
406 headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
407 headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
408 headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
409 headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
410 if callHdr.PreviousAttempts > 0 {
411 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
412 }
413
414 if callHdr.SendCompress != "" {
415 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
416 }
417 if dl, ok := ctx.Deadline(); ok {
418 // Send out timeout regardless its value. The server can detect timeout context by itself.
419 // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
420 timeout := dl.Sub(time.Now())
421 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
422 }
423 for k, v := range authData {
424 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
425 }
426 for k, v := range callAuthData {
427 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
428 }
429 if b := stats.OutgoingTags(ctx); b != nil {
430 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
431 }
432 if b := stats.OutgoingTrace(ctx); b != nil {
433 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
434 }
435
436 if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
437 var k string
438 for _, vv := range added {
439 for i, v := range vv {
440 if i%2 == 0 {
441 k = v
442 continue
443 }
444 // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
445 if isReservedHeader(k) {
446 continue
447 }
448 headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
449 }
450 }
451 for k, vv := range md {
452 // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
453 if isReservedHeader(k) {
454 continue
455 }
456 for _, v := range vv {
457 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
458 }
459 }
460 }
461 if md, ok := t.md.(*metadata.MD); ok {
462 for k, vv := range *md {
463 if isReservedHeader(k) {
464 continue
465 }
466 for _, v := range vv {
467 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
468 }
469 }
470 }
471 return headerFields, nil
472}
473
474func (t *http2Client) createAudience(callHdr *CallHdr) string {
475 // Create an audience string only if needed.
476 if len(t.perRPCCreds) == 0 && callHdr.Creds == nil {
477 return ""
478 }
479 // Construct URI required to get auth request metadata.
480 // Omit port if it is the default one.
481 host := strings.TrimSuffix(callHdr.Host, ":443")
482 pos := strings.LastIndex(callHdr.Method, "/")
483 if pos == -1 {
484 pos = len(callHdr.Method)
485 }
486 return "https://" + host + callHdr.Method[:pos]
487}
488
489func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
490 authData := map[string]string{}
491 for _, c := range t.perRPCCreds {
492 data, err := c.GetRequestMetadata(ctx, audience)
493 if err != nil {
494 if _, ok := status.FromError(err); ok {
495 return nil, err
496 }
497
498 return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
499 }
500 for k, v := range data {
501 // Capital header names are illegal in HTTP/2.
502 k = strings.ToLower(k)
503 authData[k] = v
504 }
505 }
506 return authData, nil
507}
508
509func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) {
510 callAuthData := map[string]string{}
511 // Check if credentials.PerRPCCredentials were provided via call options.
512 // Note: if these credentials are provided both via dial options and call
513 // options, then both sets of credentials will be applied.
514 if callCreds := callHdr.Creds; callCreds != nil {
515 if !t.isSecure && callCreds.RequireTransportSecurity() {
516 return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
517 }
518 data, err := callCreds.GetRequestMetadata(ctx, audience)
519 if err != nil {
520 return nil, status.Errorf(codes.Internal, "transport: %v", err)
521 }
522 for k, v := range data {
523 // Capital header names are illegal in HTTP/2
524 k = strings.ToLower(k)
525 callAuthData[k] = v
526 }
527 }
528 return callAuthData, nil
529}
530
531// NewStream creates a stream and registers it into the transport as "active"
532// streams.
533func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
534 ctx = peer.NewContext(ctx, t.getPeer())
535 headerFields, err := t.createHeaderFields(ctx, callHdr)
536 if err != nil {
537 return nil, err
538 }
539 s := t.newStream(ctx, callHdr)
540 cleanup := func(err error) {
541 if s.swapState(streamDone) == streamDone {
542 // If it was already done, return.
543 return
544 }
545 // The stream was unprocessed by the server.
546 atomic.StoreUint32(&s.unprocessed, 1)
547 s.write(recvMsg{err: err})
548 close(s.done)
549 // If headerChan isn't closed, then close it.
550 if atomic.SwapUint32(&s.headerDone, 1) == 0 {
551 close(s.headerChan)
552 }
553
554 }
555 hdr := &headerFrame{
556 hf: headerFields,
557 endStream: false,
558 initStream: func(id uint32) (bool, error) {
559 t.mu.Lock()
560 if state := t.state; state != reachable {
561 t.mu.Unlock()
562 // Do a quick cleanup.
563 err := error(errStreamDrain)
564 if state == closing {
565 err = ErrConnClosing
566 }
567 cleanup(err)
568 return false, err
569 }
570 t.activeStreams[id] = s
571 if channelz.IsOn() {
572 atomic.AddInt64(&t.czData.streamsStarted, 1)
573 atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
574 }
575 var sendPing bool
576 // If the number of active streams change from 0 to 1, then check if keepalive
577 // has gone dormant. If so, wake it up.
578 if len(t.activeStreams) == 1 && t.keepaliveEnabled {
579 select {
580 case t.awakenKeepalive <- struct{}{}:
581 sendPing = true
582 // Fill the awakenKeepalive channel again as this channel must be
583 // kept non-writable except at the point that the keepalive()
584 // goroutine is waiting either to be awaken or shutdown.
585 t.awakenKeepalive <- struct{}{}
586 default:
587 }
588 }
589 t.mu.Unlock()
590 return sendPing, nil
591 },
592 onOrphaned: cleanup,
593 wq: s.wq,
594 }
595 firstTry := true
596 var ch chan struct{}
597 checkForStreamQuota := func(it interface{}) bool {
598 if t.streamQuota <= 0 { // Can go negative if server decreases it.
599 if firstTry {
600 t.waitingStreams++
601 }
602 ch = t.streamsQuotaAvailable
603 return false
604 }
605 if !firstTry {
606 t.waitingStreams--
607 }
608 t.streamQuota--
609 h := it.(*headerFrame)
610 h.streamID = t.nextID
611 t.nextID += 2
612 s.id = h.streamID
613 s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
614 if t.streamQuota > 0 && t.waitingStreams > 0 {
615 select {
616 case t.streamsQuotaAvailable <- struct{}{}:
617 default:
618 }
619 }
620 return true
621 }
622 var hdrListSizeErr error
623 checkForHeaderListSize := func(it interface{}) bool {
624 if t.maxSendHeaderListSize == nil {
625 return true
626 }
627 hdrFrame := it.(*headerFrame)
628 var sz int64
629 for _, f := range hdrFrame.hf {
630 if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
631 hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
632 return false
633 }
634 }
635 return true
636 }
637 for {
638 success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
639 if !checkForStreamQuota(it) {
640 return false
641 }
642 if !checkForHeaderListSize(it) {
643 return false
644 }
645 return true
646 }, hdr)
647 if err != nil {
648 return nil, err
649 }
650 if success {
651 break
652 }
653 if hdrListSizeErr != nil {
654 return nil, hdrListSizeErr
655 }
656 firstTry = false
657 select {
658 case <-ch:
659 case <-s.ctx.Done():
660 return nil, ContextErr(s.ctx.Err())
661 case <-t.goAway:
662 return nil, errStreamDrain
663 case <-t.ctx.Done():
664 return nil, ErrConnClosing
665 }
666 }
667 if t.statsHandler != nil {
668 outHeader := &stats.OutHeader{
669 Client: true,
670 FullMethod: callHdr.Method,
671 RemoteAddr: t.remoteAddr,
672 LocalAddr: t.localAddr,
673 Compression: callHdr.SendCompress,
674 }
675 t.statsHandler.HandleRPC(s.ctx, outHeader)
676 }
677 return s, nil
678}
679
680// CloseStream clears the footprint of a stream when the stream is not needed any more.
681// This must not be executed in reader's goroutine.
682func (t *http2Client) CloseStream(s *Stream, err error) {
683 var (
684 rst bool
685 rstCode http2.ErrCode
686 )
687 if err != nil {
688 rst = true
689 rstCode = http2.ErrCodeCancel
690 }
691 t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
692}
693
694func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
695 // Set stream status to done.
696 if s.swapState(streamDone) == streamDone {
697 // If it was already done, return. If multiple closeStream calls
698 // happen simultaneously, wait for the first to finish.
699 <-s.done
700 return
701 }
702 // status and trailers can be updated here without any synchronization because the stream goroutine will
703 // only read it after it sees an io.EOF error from read or write and we'll write those errors
704 // only after updating this.
705 s.status = st
706 if len(mdata) > 0 {
707 s.trailer = mdata
708 }
709 if err != nil {
710 // This will unblock reads eventually.
711 s.write(recvMsg{err: err})
712 }
713 // If headerChan isn't closed, then close it.
714 if atomic.SwapUint32(&s.headerDone, 1) == 0 {
715 s.noHeaders = true
716 close(s.headerChan)
717 }
718 cleanup := &cleanupStream{
719 streamID: s.id,
720 onWrite: func() {
721 t.mu.Lock()
722 if t.activeStreams != nil {
723 delete(t.activeStreams, s.id)
724 }
725 t.mu.Unlock()
726 if channelz.IsOn() {
727 if eosReceived {
728 atomic.AddInt64(&t.czData.streamsSucceeded, 1)
729 } else {
730 atomic.AddInt64(&t.czData.streamsFailed, 1)
731 }
732 }
733 },
734 rst: rst,
735 rstCode: rstCode,
736 }
737 addBackStreamQuota := func(interface{}) bool {
738 t.streamQuota++
739 if t.streamQuota > 0 && t.waitingStreams > 0 {
740 select {
741 case t.streamsQuotaAvailable <- struct{}{}:
742 default:
743 }
744 }
745 return true
746 }
747 t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
748 // This will unblock write.
749 close(s.done)
750}
751
752// Close kicks off the shutdown process of the transport. This should be called
753// only once on a transport. Once it is called, the transport should not be
754// accessed any more.
755//
756// This method blocks until the addrConn that initiated this transport is
757// re-connected. This happens because t.onClose() begins reconnect logic at the
758// addrConn level and blocks until the addrConn is successfully connected.
759func (t *http2Client) Close() error {
760 t.mu.Lock()
761 // Make sure we only Close once.
762 if t.state == closing {
763 t.mu.Unlock()
764 return nil
765 }
766 t.state = closing
767 streams := t.activeStreams
768 t.activeStreams = nil
769 t.mu.Unlock()
770 t.controlBuf.finish()
771 t.cancel()
772 err := t.conn.Close()
773 if channelz.IsOn() {
774 channelz.RemoveEntry(t.channelzID)
775 }
776 // Notify all active streams.
777 for _, s := range streams {
778 t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
779 }
780 if t.statsHandler != nil {
781 connEnd := &stats.ConnEnd{
782 Client: true,
783 }
784 t.statsHandler.HandleConn(t.ctx, connEnd)
785 }
786 t.onClose()
787 return err
788}
789
790// GracefulClose sets the state to draining, which prevents new streams from
791// being created and causes the transport to be closed when the last active
792// stream is closed. If there are no active streams, the transport is closed
793// immediately. This does nothing if the transport is already draining or
794// closing.
795func (t *http2Client) GracefulClose() error {
796 t.mu.Lock()
797 // Make sure we move to draining only from active.
798 if t.state == draining || t.state == closing {
799 t.mu.Unlock()
800 return nil
801 }
802 t.state = draining
803 active := len(t.activeStreams)
804 t.mu.Unlock()
805 if active == 0 {
806 return t.Close()
807 }
808 t.controlBuf.put(&incomingGoAway{})
809 return nil
810}
811
812// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
813// should proceed only if Write returns nil.
814func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
815 if opts.Last {
816 // If it's the last message, update stream state.
817 if !s.compareAndSwapState(streamActive, streamWriteDone) {
818 return errStreamDone
819 }
820 } else if s.getState() != streamActive {
821 return errStreamDone
822 }
823 df := &dataFrame{
824 streamID: s.id,
825 endStream: opts.Last,
826 }
827 if hdr != nil || data != nil { // If it's not an empty data frame.
828 // Add some data to grpc message header so that we can equally
829 // distribute bytes across frames.
830 emptyLen := http2MaxFrameLen - len(hdr)
831 if emptyLen > len(data) {
832 emptyLen = len(data)
833 }
834 hdr = append(hdr, data[:emptyLen]...)
835 data = data[emptyLen:]
836 df.h, df.d = hdr, data
837 // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
838 if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
839 return err
840 }
841 }
842 return t.controlBuf.put(df)
843}
844
845func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
846 t.mu.Lock()
847 defer t.mu.Unlock()
848 s, ok := t.activeStreams[f.Header().StreamID]
849 return s, ok
850}
851
852// adjustWindow sends out extra window update over the initial window size
853// of stream if the application is requesting data larger in size than
854// the window.
855func (t *http2Client) adjustWindow(s *Stream, n uint32) {
856 if w := s.fc.maybeAdjust(n); w > 0 {
857 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
858 }
859}
860
861// updateWindow adjusts the inbound quota for the stream.
862// Window updates will be sent out when the cumulative quota
863// exceeds the corresponding threshold.
864func (t *http2Client) updateWindow(s *Stream, n uint32) {
865 if w := s.fc.onRead(n); w > 0 {
866 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
867 }
868}
869
870// updateFlowControl updates the incoming flow control windows
871// for the transport and the stream based on the current bdp
872// estimation.
873func (t *http2Client) updateFlowControl(n uint32) {
874 t.mu.Lock()
875 for _, s := range t.activeStreams {
876 s.fc.newLimit(n)
877 }
878 t.mu.Unlock()
879 updateIWS := func(interface{}) bool {
880 t.initialWindowSize = int32(n)
881 return true
882 }
883 t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)})
884 t.controlBuf.put(&outgoingSettings{
885 ss: []http2.Setting{
886 {
887 ID: http2.SettingInitialWindowSize,
888 Val: n,
889 },
890 },
891 })
892}
893
894func (t *http2Client) handleData(f *http2.DataFrame) {
895 size := f.Header().Length
896 var sendBDPPing bool
897 if t.bdpEst != nil {
898 sendBDPPing = t.bdpEst.add(size)
899 }
900 // Decouple connection's flow control from application's read.
901 // An update on connection's flow control should not depend on
902 // whether user application has read the data or not. Such a
903 // restriction is already imposed on the stream's flow control,
904 // and therefore the sender will be blocked anyways.
905 // Decoupling the connection flow control will prevent other
906 // active(fast) streams from starving in presence of slow or
907 // inactive streams.
908 //
909 if w := t.fc.onData(size); w > 0 {
910 t.controlBuf.put(&outgoingWindowUpdate{
911 streamID: 0,
912 increment: w,
913 })
914 }
915 if sendBDPPing {
916 // Avoid excessive ping detection (e.g. in an L7 proxy)
917 // by sending a window update prior to the BDP ping.
918
919 if w := t.fc.reset(); w > 0 {
920 t.controlBuf.put(&outgoingWindowUpdate{
921 streamID: 0,
922 increment: w,
923 })
924 }
925
926 t.controlBuf.put(bdpPing)
927 }
928 // Select the right stream to dispatch.
929 s, ok := t.getStream(f)
930 if !ok {
931 return
932 }
933 if size > 0 {
934 if err := s.fc.onData(size); err != nil {
935 t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
936 return
937 }
938 if f.Header().Flags.Has(http2.FlagDataPadded) {
939 if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
940 t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
941 }
942 }
943 // TODO(bradfitz, zhaoq): A copy is required here because there is no
944 // guarantee f.Data() is consumed before the arrival of next frame.
945 // Can this copy be eliminated?
946 if len(f.Data()) > 0 {
947 data := make([]byte, len(f.Data()))
948 copy(data, f.Data())
949 s.write(recvMsg{data: data})
950 }
951 }
952 // The server has closed the stream without sending trailers. Record that
953 // the read direction is closed, and set the status appropriately.
954 if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
955 t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
956 }
957}
958
959func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
960 s, ok := t.getStream(f)
961 if !ok {
962 return
963 }
964 if f.ErrCode == http2.ErrCodeRefusedStream {
965 // The stream was unprocessed by the server.
966 atomic.StoreUint32(&s.unprocessed, 1)
967 }
968 statusCode, ok := http2ErrConvTab[f.ErrCode]
969 if !ok {
970 warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
971 statusCode = codes.Unknown
972 }
973 if statusCode == codes.Canceled {
974 // Our deadline was already exceeded, and that was likely the cause of
975 // this cancelation. Alter the status code accordingly.
976 if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) {
977 statusCode = codes.DeadlineExceeded
978 }
979 }
980 t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
981}
982
983func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
984 if f.IsAck() {
985 return
986 }
987 var maxStreams *uint32
988 var ss []http2.Setting
989 var updateFuncs []func()
990 f.ForeachSetting(func(s http2.Setting) error {
991 switch s.ID {
992 case http2.SettingMaxConcurrentStreams:
993 maxStreams = new(uint32)
994 *maxStreams = s.Val
995 case http2.SettingMaxHeaderListSize:
996 updateFuncs = append(updateFuncs, func() {
997 t.maxSendHeaderListSize = new(uint32)
998 *t.maxSendHeaderListSize = s.Val
999 })
1000 default:
1001 ss = append(ss, s)
1002 }
1003 return nil
1004 })
1005 if isFirst && maxStreams == nil {
1006 maxStreams = new(uint32)
1007 *maxStreams = math.MaxUint32
1008 }
1009 sf := &incomingSettings{
1010 ss: ss,
1011 }
1012 if maxStreams != nil {
1013 updateStreamQuota := func() {
1014 delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
1015 t.maxConcurrentStreams = *maxStreams
1016 t.streamQuota += delta
1017 if delta > 0 && t.waitingStreams > 0 {
1018 close(t.streamsQuotaAvailable) // wake all of them up.
1019 t.streamsQuotaAvailable = make(chan struct{}, 1)
1020 }
1021 }
1022 updateFuncs = append(updateFuncs, updateStreamQuota)
1023 }
1024 t.controlBuf.executeAndPut(func(interface{}) bool {
1025 for _, f := range updateFuncs {
1026 f()
1027 }
1028 return true
1029 }, sf)
1030}
1031
1032func (t *http2Client) handlePing(f *http2.PingFrame) {
1033 if f.IsAck() {
1034 // Maybe it's a BDP ping.
1035 if t.bdpEst != nil {
1036 t.bdpEst.calculate(f.Data)
1037 }
1038 return
1039 }
1040 pingAck := &ping{ack: true}
1041 copy(pingAck.data[:], f.Data[:])
1042 t.controlBuf.put(pingAck)
1043}
1044
1045func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
1046 t.mu.Lock()
1047 if t.state == closing {
1048 t.mu.Unlock()
1049 return
1050 }
1051 if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
1052 infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
1053 }
1054 id := f.LastStreamID
1055 if id > 0 && id%2 != 1 {
1056 t.mu.Unlock()
1057 t.Close()
1058 return
1059 }
1060 // A client can receive multiple GoAways from the server (see
1061 // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first
1062 // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be
1063 // sent after an RTT delay with the ID of the last stream the server will
1064 // process.
1065 //
1066 // Therefore, when we get the first GoAway we don't necessarily close any
1067 // streams. While in case of second GoAway we close all streams created after
1068 // the GoAwayId. This way streams that were in-flight while the GoAway from
1069 // server was being sent don't get killed.
1070 select {
1071 case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
1072 // If there are multiple GoAways the first one should always have an ID greater than the following ones.
1073 if id > t.prevGoAwayID {
1074 t.mu.Unlock()
1075 t.Close()
1076 return
1077 }
1078 default:
1079 t.setGoAwayReason(f)
1080 close(t.goAway)
1081 t.state = draining
1082 t.controlBuf.put(&incomingGoAway{})
1083
1084 // This has to be a new goroutine because we're still using the current goroutine to read in the transport.
1085 t.onGoAway(t.goAwayReason)
1086 }
1087 // All streams with IDs greater than the GoAwayId
1088 // and smaller than the previous GoAway ID should be killed.
1089 upperLimit := t.prevGoAwayID
1090 if upperLimit == 0 { // This is the first GoAway Frame.
1091 upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
1092 }
1093 for streamID, stream := range t.activeStreams {
1094 if streamID > id && streamID <= upperLimit {
1095 // The stream was unprocessed by the server.
1096 atomic.StoreUint32(&stream.unprocessed, 1)
1097 t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
1098 }
1099 }
1100 t.prevGoAwayID = id
1101 active := len(t.activeStreams)
1102 t.mu.Unlock()
1103 if active == 0 {
1104 t.Close()
1105 }
1106}
1107
1108// setGoAwayReason sets the value of t.goAwayReason based
1109// on the GoAway frame received.
1110// It expects a lock on transport's mutext to be held by
1111// the caller.
1112func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
1113 t.goAwayReason = GoAwayNoReason
1114 switch f.ErrCode {
1115 case http2.ErrCodeEnhanceYourCalm:
1116 if string(f.DebugData()) == "too_many_pings" {
1117 t.goAwayReason = GoAwayTooManyPings
1118 }
1119 }
1120}
1121
1122func (t *http2Client) GetGoAwayReason() GoAwayReason {
1123 t.mu.Lock()
1124 defer t.mu.Unlock()
1125 return t.goAwayReason
1126}
1127
1128func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
1129 t.controlBuf.put(&incomingWindowUpdate{
1130 streamID: f.Header().StreamID,
1131 increment: f.Increment,
1132 })
1133}
1134
1135// operateHeaders takes action on the decoded headers.
1136func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
1137 s, ok := t.getStream(frame)
1138 if !ok {
1139 return
1140 }
1141 atomic.StoreUint32(&s.bytesReceived, 1)
1142 var state decodeState
1143 if err := state.decodeHeader(frame); err != nil {
1144 t.closeStream(s, err, true, http2.ErrCodeProtocol, status.New(codes.Internal, err.Error()), nil, false)
1145 // Something wrong. Stops reading even when there is remaining.
1146 return
1147 }
1148
1149 endStream := frame.StreamEnded()
1150 var isHeader bool
1151 defer func() {
1152 if t.statsHandler != nil {
1153 if isHeader {
1154 inHeader := &stats.InHeader{
1155 Client: true,
1156 WireLength: int(frame.Header().Length),
1157 }
1158 t.statsHandler.HandleRPC(s.ctx, inHeader)
1159 } else {
1160 inTrailer := &stats.InTrailer{
1161 Client: true,
1162 WireLength: int(frame.Header().Length),
1163 }
1164 t.statsHandler.HandleRPC(s.ctx, inTrailer)
1165 }
1166 }
1167 }()
1168 // If headers haven't been received yet.
1169 if atomic.SwapUint32(&s.headerDone, 1) == 0 {
1170 if !endStream {
1171 // Headers frame is not actually a trailers-only frame.
1172 isHeader = true
1173 // These values can be set without any synchronization because
1174 // stream goroutine will read it only after seeing a closed
1175 // headerChan which we'll close after setting this.
1176 s.recvCompress = state.encoding
1177 if len(state.mdata) > 0 {
1178 s.header = state.mdata
1179 }
1180 } else {
1181 s.noHeaders = true
1182 }
1183 close(s.headerChan)
1184 }
1185 if !endStream {
1186 return
1187 }
1188 // if client received END_STREAM from server while stream was still active, send RST_STREAM
1189 rst := s.getState() == streamActive
1190 t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.mdata, true)
1191}
1192
1193// reader runs as a separate goroutine in charge of reading data from network
1194// connection.
1195//
1196// TODO(zhaoq): currently one reader per transport. Investigate whether this is
1197// optimal.
1198// TODO(zhaoq): Check the validity of the incoming frame sequence.
1199func (t *http2Client) reader() {
1200 defer close(t.readerDone)
1201 // Check the validity of server preface.
1202 frame, err := t.framer.fr.ReadFrame()
1203 if err != nil {
1204 t.Close() // this kicks off resetTransport, so must be last before return
1205 return
1206 }
1207 t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
1208 if t.keepaliveEnabled {
1209 atomic.CompareAndSwapUint32(&t.activity, 0, 1)
1210 }
1211 sf, ok := frame.(*http2.SettingsFrame)
1212 if !ok {
1213 t.Close() // this kicks off resetTransport, so must be last before return
1214 return
1215 }
1216 t.onPrefaceReceipt()
1217 t.handleSettings(sf, true)
1218
1219 // loop to keep reading incoming messages on this transport.
1220 for {
1221 frame, err := t.framer.fr.ReadFrame()
1222 if t.keepaliveEnabled {
1223 atomic.CompareAndSwapUint32(&t.activity, 0, 1)
1224 }
1225 if err != nil {
1226 // Abort an active stream if the http2.Framer returns a
1227 // http2.StreamError. This can happen only if the server's response
1228 // is malformed http2.
1229 if se, ok := err.(http2.StreamError); ok {
1230 t.mu.Lock()
1231 s := t.activeStreams[se.StreamID]
1232 t.mu.Unlock()
1233 if s != nil {
1234 // use error detail to provide better err message
1235 code := http2ErrConvTab[se.Code]
1236 msg := t.framer.fr.ErrorDetail().Error()
1237 t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
1238 }
1239 continue
1240 } else {
1241 // Transport error.
1242 t.Close()
1243 return
1244 }
1245 }
1246 switch frame := frame.(type) {
1247 case *http2.MetaHeadersFrame:
1248 t.operateHeaders(frame)
1249 case *http2.DataFrame:
1250 t.handleData(frame)
1251 case *http2.RSTStreamFrame:
1252 t.handleRSTStream(frame)
1253 case *http2.SettingsFrame:
1254 t.handleSettings(frame, false)
1255 case *http2.PingFrame:
1256 t.handlePing(frame)
1257 case *http2.GoAwayFrame:
1258 t.handleGoAway(frame)
1259 case *http2.WindowUpdateFrame:
1260 t.handleWindowUpdate(frame)
1261 default:
1262 errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
1263 }
1264 }
1265}
1266
1267// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
1268func (t *http2Client) keepalive() {
1269 p := &ping{data: [8]byte{}}
1270 timer := time.NewTimer(t.kp.Time)
1271 for {
1272 select {
1273 case <-timer.C:
1274 if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
1275 timer.Reset(t.kp.Time)
1276 continue
1277 }
1278 // Check if keepalive should go dormant.
1279 t.mu.Lock()
1280 if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
1281 // Make awakenKeepalive writable.
1282 <-t.awakenKeepalive
1283 t.mu.Unlock()
1284 select {
1285 case <-t.awakenKeepalive:
1286 // If the control gets here a ping has been sent
1287 // need to reset the timer with keepalive.Timeout.
1288 case <-t.ctx.Done():
1289 return
1290 }
1291 } else {
1292 t.mu.Unlock()
1293 if channelz.IsOn() {
1294 atomic.AddInt64(&t.czData.kpCount, 1)
1295 }
1296 // Send ping.
1297 t.controlBuf.put(p)
1298 }
1299
1300 // By the time control gets here a ping has been sent one way or the other.
1301 timer.Reset(t.kp.Timeout)
1302 select {
1303 case <-timer.C:
1304 if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
1305 timer.Reset(t.kp.Time)
1306 continue
1307 }
1308 t.Close()
1309 return
1310 case <-t.ctx.Done():
1311 if !timer.Stop() {
1312 <-timer.C
1313 }
1314 return
1315 }
1316 case <-t.ctx.Done():
1317 if !timer.Stop() {
1318 <-timer.C
1319 }
1320 return
1321 }
1322 }
1323}
1324
1325func (t *http2Client) Error() <-chan struct{} {
1326 return t.ctx.Done()
1327}
1328
1329func (t *http2Client) GoAway() <-chan struct{} {
1330 return t.goAway
1331}
1332
1333func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
1334 s := channelz.SocketInternalMetric{
1335 StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
1336 StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
1337 StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
1338 MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
1339 MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
1340 KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
1341 LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
1342 LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
1343 LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
1344 LocalFlowControlWindow: int64(t.fc.getSize()),
1345 SocketOptions: channelz.GetSocketOption(t.conn),
1346 LocalAddr: t.localAddr,
1347 RemoteAddr: t.remoteAddr,
1348 // RemoteName :
1349 }
1350 if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
1351 s.Security = au.GetSecurityValue()
1352 }
1353 s.RemoteFlowControlWindow = t.getOutFlowWindow()
1354 return &s
1355}
1356
1357func (t *http2Client) IncrMsgSent() {
1358 atomic.AddInt64(&t.czData.msgSent, 1)
1359 atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
1360}
1361
1362func (t *http2Client) IncrMsgRecv() {
1363 atomic.AddInt64(&t.czData.msgRecv, 1)
1364 atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
1365}
1366
1367func (t *http2Client) getOutFlowWindow() int64 {
1368 resp := make(chan uint32, 1)
1369 timer := time.NewTimer(time.Second)
1370 defer timer.Stop()
1371 t.controlBuf.put(&outFlowControlSizeRequest{resp})
1372 select {
1373 case sz := <-resp:
1374 return int64(sz)
1375 case <-t.ctxDone:
1376 return -1
1377 case <-timer.C:
1378 return -2
1379 }
1380}
diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index b6f93e3..df27403 100644
--- a/vendor/google.golang.org/grpc/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -20,10 +20,11 @@ package transport
20 20
21import ( 21import (
22 "bytes" 22 "bytes"
23 "context"
23 "errors" 24 "errors"
25 "fmt"
24 "io" 26 "io"
25 "math" 27 "math"
26 "math/rand"
27 "net" 28 "net"
28 "strconv" 29 "strconv"
29 "sync" 30 "sync"
@@ -31,11 +32,14 @@ import (
31 "time" 32 "time"
32 33
33 "github.com/golang/protobuf/proto" 34 "github.com/golang/protobuf/proto"
34 "golang.org/x/net/context"
35 "golang.org/x/net/http2" 35 "golang.org/x/net/http2"
36 "golang.org/x/net/http2/hpack" 36 "golang.org/x/net/http2/hpack"
37
37 "google.golang.org/grpc/codes" 38 "google.golang.org/grpc/codes"
38 "google.golang.org/grpc/credentials" 39 "google.golang.org/grpc/credentials"
40 "google.golang.org/grpc/grpclog"
41 "google.golang.org/grpc/internal/channelz"
42 "google.golang.org/grpc/internal/grpcrand"
39 "google.golang.org/grpc/keepalive" 43 "google.golang.org/grpc/keepalive"
40 "google.golang.org/grpc/metadata" 44 "google.golang.org/grpc/metadata"
41 "google.golang.org/grpc/peer" 45 "google.golang.org/grpc/peer"
@@ -44,39 +48,37 @@ import (
44 "google.golang.org/grpc/tap" 48 "google.golang.org/grpc/tap"
45) 49)
46 50
47// ErrIllegalHeaderWrite indicates that setting header is illegal because of 51var (
48// the stream's state. 52 // ErrIllegalHeaderWrite indicates that setting header is illegal because of
49var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") 53 // the stream's state.
54 ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
55 // ErrHeaderListSizeLimitViolation indicates that the header list size is larger
56 // than the limit set by peer.
57 ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
58)
50 59
51// http2Server implements the ServerTransport interface with HTTP2. 60// http2Server implements the ServerTransport interface with HTTP2.
52type http2Server struct { 61type http2Server struct {
53 ctx context.Context 62 ctx context.Context
63 ctxDone <-chan struct{} // Cache the context.Done() chan
64 cancel context.CancelFunc
54 conn net.Conn 65 conn net.Conn
66 loopy *loopyWriter
67 readerDone chan struct{} // sync point to enable testing.
68 writerDone chan struct{} // sync point to enable testing.
55 remoteAddr net.Addr 69 remoteAddr net.Addr
56 localAddr net.Addr 70 localAddr net.Addr
57 maxStreamID uint32 // max stream ID ever seen 71 maxStreamID uint32 // max stream ID ever seen
58 authInfo credentials.AuthInfo // auth info about the connection 72 authInfo credentials.AuthInfo // auth info about the connection
59 inTapHandle tap.ServerInHandle 73 inTapHandle tap.ServerInHandle
60 // writableChan synchronizes write access to the transport. 74 framer *framer
61 // A writer acquires the write lock by receiving a value on writableChan
62 // and releases it by sending on writableChan.
63 writableChan chan int
64 // shutdownChan is closed when Close is called.
65 // Blocking operations should select on shutdownChan to avoid
66 // blocking forever after Close.
67 shutdownChan chan struct{}
68 framer *framer
69 hBuf *bytes.Buffer // the buffer for HPACK encoding
70 hEnc *hpack.Encoder // HPACK encoder
71 // The max number of concurrent streams. 75 // The max number of concurrent streams.
72 maxStreams uint32 76 maxStreams uint32
73 // controlBuf delivers all the control related tasks (e.g., window 77 // controlBuf delivers all the control related tasks (e.g., window
74 // updates, reset streams, and various settings) to the controller. 78 // updates, reset streams, and various settings) to the controller.
75 controlBuf *controlBuffer 79 controlBuf *controlBuffer
76 fc *inFlow 80 fc *trInFlow
77 // sendQuotaPool provides flow control to outbound message. 81 stats stats.Handler
78 sendQuotaPool *quotaPool
79 stats stats.Handler
80 // Flag to keep track of reading activity on transport. 82 // Flag to keep track of reading activity on transport.
81 // 1 is true and 0 is false. 83 // 1 is true and 0 is false.
82 activity uint32 // Accessed atomically. 84 activity uint32 // Accessed atomically.
@@ -92,11 +94,10 @@ type http2Server struct {
92 // Flag to signify that number of ping strikes should be reset to 0. 94 // Flag to signify that number of ping strikes should be reset to 0.
93 // This is set whenever data or header frames are sent. 95 // This is set whenever data or header frames are sent.
94 // 1 means yes. 96 // 1 means yes.
95 resetPingStrikes uint32 // Accessed atomically. 97 resetPingStrikes uint32 // Accessed atomically.
96 initialWindowSize int32 98 initialWindowSize int32
97 bdpEst *bdpEstimator 99 bdpEst *bdpEstimator
98 100 maxSendHeaderListSize *uint32
99 outQuotaVersion uint32
100 101
101 mu sync.Mutex // guard the following 102 mu sync.Mutex // guard the following
102 103
@@ -109,19 +110,27 @@ type http2Server struct {
109 drainChan chan struct{} 110 drainChan chan struct{}
110 state transportState 111 state transportState
111 activeStreams map[uint32]*Stream 112 activeStreams map[uint32]*Stream
112 // the per-stream outbound flow control window size set by the peer.
113 streamSendQuota uint32
114 // idle is the time instant when the connection went idle. 113 // idle is the time instant when the connection went idle.
115 // This is either the begining of the connection or when the number of 114 // This is either the beginning of the connection or when the number of
116 // RPCs go down to 0. 115 // RPCs go down to 0.
117 // When the connection is busy, this value is set to 0. 116 // When the connection is busy, this value is set to 0.
118 idle time.Time 117 idle time.Time
118
119 // Fields below are for channelz metric collection.
120 channelzID int64 // channelz unique identification number
121 czData *channelzData
119} 122}
120 123
121// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is 124// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
122// returned if something goes wrong. 125// returned if something goes wrong.
123func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { 126func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
124 framer := newFramer(conn) 127 writeBufSize := config.WriteBufferSize
128 readBufSize := config.ReadBufferSize
129 maxHeaderListSize := defaultServerMaxHeaderListSize
130 if config.MaxHeaderListSize != nil {
131 maxHeaderListSize = *config.MaxHeaderListSize
132 }
133 framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
125 // Send initial settings as connection preface to client. 134 // Send initial settings as connection preface to client.
126 var isettings []http2.Setting 135 var isettings []http2.Setting
127 // TODO(zhaoq): Have a better way to signal "no limit" because 0 is 136 // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
@@ -151,13 +160,19 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
151 ID: http2.SettingInitialWindowSize, 160 ID: http2.SettingInitialWindowSize,
152 Val: uint32(iwz)}) 161 Val: uint32(iwz)})
153 } 162 }
154 if err := framer.writeSettings(true, isettings...); err != nil { 163 if config.MaxHeaderListSize != nil {
155 return nil, connectionErrorf(true, err, "transport: %v", err) 164 isettings = append(isettings, http2.Setting{
165 ID: http2.SettingMaxHeaderListSize,
166 Val: *config.MaxHeaderListSize,
167 })
168 }
169 if err := framer.fr.WriteSettings(isettings...); err != nil {
170 return nil, connectionErrorf(false, err, "transport: %v", err)
156 } 171 }
157 // Adjust the connection flow control window if needed. 172 // Adjust the connection flow control window if needed.
158 if delta := uint32(icwz - defaultWindowSize); delta > 0 { 173 if delta := uint32(icwz - defaultWindowSize); delta > 0 {
159 if err := framer.writeWindowUpdate(true, 0, delta); err != nil { 174 if err := framer.fr.WriteWindowUpdate(0, delta); err != nil {
160 return nil, connectionErrorf(true, err, "transport: %v", err) 175 return nil, connectionErrorf(false, err, "transport: %v", err)
161 } 176 }
162 } 177 }
163 kp := config.KeepaliveParams 178 kp := config.KeepaliveParams
@@ -182,32 +197,31 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
182 if kep.MinTime == 0 { 197 if kep.MinTime == 0 {
183 kep.MinTime = defaultKeepalivePolicyMinTime 198 kep.MinTime = defaultKeepalivePolicyMinTime
184 } 199 }
185 var buf bytes.Buffer 200 ctx, cancel := context.WithCancel(context.Background())
186 t := &http2Server{ 201 t := &http2Server{
187 ctx: context.Background(), 202 ctx: ctx,
203 cancel: cancel,
204 ctxDone: ctx.Done(),
188 conn: conn, 205 conn: conn,
189 remoteAddr: conn.RemoteAddr(), 206 remoteAddr: conn.RemoteAddr(),
190 localAddr: conn.LocalAddr(), 207 localAddr: conn.LocalAddr(),
191 authInfo: config.AuthInfo, 208 authInfo: config.AuthInfo,
192 framer: framer, 209 framer: framer,
193 hBuf: &buf, 210 readerDone: make(chan struct{}),
194 hEnc: hpack.NewEncoder(&buf), 211 writerDone: make(chan struct{}),
195 maxStreams: maxStreams, 212 maxStreams: maxStreams,
196 inTapHandle: config.InTapHandle, 213 inTapHandle: config.InTapHandle,
197 controlBuf: newControlBuffer(), 214 fc: &trInFlow{limit: uint32(icwz)},
198 fc: &inFlow{limit: uint32(icwz)},
199 sendQuotaPool: newQuotaPool(defaultWindowSize),
200 state: reachable, 215 state: reachable,
201 writableChan: make(chan int, 1),
202 shutdownChan: make(chan struct{}),
203 activeStreams: make(map[uint32]*Stream), 216 activeStreams: make(map[uint32]*Stream),
204 streamSendQuota: defaultWindowSize,
205 stats: config.StatsHandler, 217 stats: config.StatsHandler,
206 kp: kp, 218 kp: kp,
207 idle: time.Now(), 219 idle: time.Now(),
208 kep: kep, 220 kep: kep,
209 initialWindowSize: iwz, 221 initialWindowSize: iwz,
222 czData: new(channelzData),
210 } 223 }
224 t.controlBuf = newControlBuffer(t.ctxDone)
211 if dynamicWindow { 225 if dynamicWindow {
212 t.bdpEst = &bdpEstimator{ 226 t.bdpEst = &bdpEstimator{
213 bdp: initialWindowSize, 227 bdp: initialWindowSize,
@@ -222,37 +236,83 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
222 connBegin := &stats.ConnBegin{} 236 connBegin := &stats.ConnBegin{}
223 t.stats.HandleConn(t.ctx, connBegin) 237 t.stats.HandleConn(t.ctx, connBegin)
224 } 238 }
225 go t.controller() 239 if channelz.IsOn() {
240 t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
241 }
242 t.framer.writer.Flush()
243
244 defer func() {
245 if err != nil {
246 t.Close()
247 }
248 }()
249
250 // Check the validity of client preface.
251 preface := make([]byte, len(clientPreface))
252 if _, err := io.ReadFull(t.conn, preface); err != nil {
253 return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
254 }
255 if !bytes.Equal(preface, clientPreface) {
256 return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
257 }
258
259 frame, err := t.framer.fr.ReadFrame()
260 if err == io.EOF || err == io.ErrUnexpectedEOF {
261 return nil, err
262 }
263 if err != nil {
264 return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
265 }
266 atomic.StoreUint32(&t.activity, 1)
267 sf, ok := frame.(*http2.SettingsFrame)
268 if !ok {
269 return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
270 }
271 t.handleSettings(sf)
272
273 go func() {
274 t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
275 t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
276 if err := t.loopy.run(); err != nil {
277 errorf("transport: loopyWriter.run returning. Err: %v", err)
278 }
279 t.conn.Close()
280 close(t.writerDone)
281 }()
226 go t.keepalive() 282 go t.keepalive()
227 t.writableChan <- 0
228 return t, nil 283 return t, nil
229} 284}
230 285
231// operateHeader takes action on the decoded headers. 286// operateHeader takes action on the decoded headers.
232func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { 287func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
233 buf := newRecvBuffer() 288 streamID := frame.Header().StreamID
234 s := &Stream{ 289 state := decodeState{serverSide: true}
235 id: frame.Header().StreamID, 290 if err := state.decodeHeader(frame); err != nil {
236 st: t, 291 if se, ok := status.FromError(err); ok {
237 buf: buf, 292 t.controlBuf.put(&cleanupStream{
238 fc: &inFlow{limit: uint32(t.initialWindowSize)}, 293 streamID: streamID,
239 } 294 rst: true,
240 295 rstCode: statusCodeConvTab[se.Code()],
241 var state decodeState 296 onWrite: func() {},
242 for _, hf := range frame.Fields { 297 })
243 if err := state.processHeaderField(hf); err != nil {
244 if se, ok := err.(StreamError); ok {
245 t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
246 }
247 return
248 } 298 }
299 return false
249 } 300 }
250 301
302 buf := newRecvBuffer()
303 s := &Stream{
304 id: streamID,
305 st: t,
306 buf: buf,
307 fc: &inFlow{limit: uint32(t.initialWindowSize)},
308 recvCompress: state.encoding,
309 method: state.method,
310 contentSubtype: state.contentSubtype,
311 }
251 if frame.StreamEnded() { 312 if frame.StreamEnded() {
252 // s is just created by the caller. No lock needed. 313 // s is just created by the caller. No lock needed.
253 s.state = streamReadDone 314 s.state = streamReadDone
254 } 315 }
255 s.recvCompress = state.encoding
256 if state.timeoutSet { 316 if state.timeoutSet {
257 s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) 317 s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
258 } else { 318 } else {
@@ -266,25 +326,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
266 pr.AuthInfo = t.authInfo 326 pr.AuthInfo = t.authInfo
267 } 327 }
268 s.ctx = peer.NewContext(s.ctx, pr) 328 s.ctx = peer.NewContext(s.ctx, pr)
269 // Cache the current stream to the context so that the server application
270 // can find out. Required when the server wants to send some metadata
271 // back to the client (unary call only).
272 s.ctx = newContextWithStream(s.ctx, s)
273 // Attach the received metadata to the context. 329 // Attach the received metadata to the context.
274 if len(state.mdata) > 0 { 330 if len(state.mdata) > 0 {
275 s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) 331 s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
276 } 332 }
277 s.trReader = &transportReader{ 333 if state.statsTags != nil {
278 reader: &recvBufferReader{ 334 s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags)
279 ctx: s.ctx, 335 }
280 recv: s.buf, 336 if state.statsTrace != nil {
281 }, 337 s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace)
282 windowHandler: func(n int) {
283 t.updateWindow(s, uint32(n))
284 },
285 } 338 }
286 s.recvCompress = state.encoding
287 s.method = state.method
288 if t.inTapHandle != nil { 339 if t.inTapHandle != nil {
289 var err error 340 var err error
290 info := &tap.Info{ 341 info := &tap.Info{
@@ -293,33 +344,46 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
293 s.ctx, err = t.inTapHandle(s.ctx, info) 344 s.ctx, err = t.inTapHandle(s.ctx, info)
294 if err != nil { 345 if err != nil {
295 warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) 346 warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
296 t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) 347 t.controlBuf.put(&cleanupStream{
297 return 348 streamID: s.id,
349 rst: true,
350 rstCode: http2.ErrCodeRefusedStream,
351 onWrite: func() {},
352 })
353 return false
298 } 354 }
299 } 355 }
300 t.mu.Lock() 356 t.mu.Lock()
301 if t.state != reachable { 357 if t.state != reachable {
302 t.mu.Unlock() 358 t.mu.Unlock()
303 return 359 return false
304 } 360 }
305 if uint32(len(t.activeStreams)) >= t.maxStreams { 361 if uint32(len(t.activeStreams)) >= t.maxStreams {
306 t.mu.Unlock() 362 t.mu.Unlock()
307 t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) 363 t.controlBuf.put(&cleanupStream{
308 return 364 streamID: streamID,
365 rst: true,
366 rstCode: http2.ErrCodeRefusedStream,
367 onWrite: func() {},
368 })
369 return false
309 } 370 }
310 if s.id%2 != 1 || s.id <= t.maxStreamID { 371 if streamID%2 != 1 || streamID <= t.maxStreamID {
311 t.mu.Unlock() 372 t.mu.Unlock()
312 // illegal gRPC stream id. 373 // illegal gRPC stream id.
313 errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", s.id) 374 errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
314 return true 375 return true
315 } 376 }
316 t.maxStreamID = s.id 377 t.maxStreamID = streamID
317 s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) 378 t.activeStreams[streamID] = s
318 t.activeStreams[s.id] = s
319 if len(t.activeStreams) == 1 { 379 if len(t.activeStreams) == 1 {
320 t.idle = time.Time{} 380 t.idle = time.Time{}
321 } 381 }
322 t.mu.Unlock() 382 t.mu.Unlock()
383 if channelz.IsOn() {
384 atomic.AddInt64(&t.czData.streamsStarted, 1)
385 atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
386 }
323 s.requestRead = func(n int) { 387 s.requestRead = func(n int) {
324 t.adjustWindow(s, uint32(n)) 388 t.adjustWindow(s, uint32(n))
325 } 389 }
@@ -335,61 +399,51 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
335 } 399 }
336 t.stats.HandleRPC(s.ctx, inHeader) 400 t.stats.HandleRPC(s.ctx, inHeader)
337 } 401 }
402 s.ctxDone = s.ctx.Done()
403 s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
404 s.trReader = &transportReader{
405 reader: &recvBufferReader{
406 ctx: s.ctx,
407 ctxDone: s.ctxDone,
408 recv: s.buf,
409 },
410 windowHandler: func(n int) {
411 t.updateWindow(s, uint32(n))
412 },
413 }
414 // Register the stream with loopy.
415 t.controlBuf.put(&registerStream{
416 streamID: s.id,
417 wq: s.wq,
418 })
338 handle(s) 419 handle(s)
339 return 420 return false
340} 421}
341 422
342// HandleStreams receives incoming streams using the given handler. This is 423// HandleStreams receives incoming streams using the given handler. This is
343// typically run in a separate goroutine. 424// typically run in a separate goroutine.
344// traceCtx attaches trace to ctx and returns the new context. 425// traceCtx attaches trace to ctx and returns the new context.
345func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { 426func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
346 // Check the validity of client preface. 427 defer close(t.readerDone)
347 preface := make([]byte, len(clientPreface))
348 if _, err := io.ReadFull(t.conn, preface); err != nil {
349 // Only log if it isn't a simple tcp accept check (ie: tcp balancer doing open/close socket)
350 if err != io.EOF {
351 errorf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
352 }
353 t.Close()
354 return
355 }
356 if !bytes.Equal(preface, clientPreface) {
357 errorf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
358 t.Close()
359 return
360 }
361
362 frame, err := t.framer.readFrame()
363 if err == io.EOF || err == io.ErrUnexpectedEOF {
364 t.Close()
365 return
366 }
367 if err != nil {
368 errorf("transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
369 t.Close()
370 return
371 }
372 atomic.StoreUint32(&t.activity, 1)
373 sf, ok := frame.(*http2.SettingsFrame)
374 if !ok {
375 errorf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
376 t.Close()
377 return
378 }
379 t.handleSettings(sf)
380
381 for { 428 for {
382 frame, err := t.framer.readFrame() 429 frame, err := t.framer.fr.ReadFrame()
383 atomic.StoreUint32(&t.activity, 1) 430 atomic.StoreUint32(&t.activity, 1)
384 if err != nil { 431 if err != nil {
385 if se, ok := err.(http2.StreamError); ok { 432 if se, ok := err.(http2.StreamError); ok {
433 warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
386 t.mu.Lock() 434 t.mu.Lock()
387 s := t.activeStreams[se.StreamID] 435 s := t.activeStreams[se.StreamID]
388 t.mu.Unlock() 436 t.mu.Unlock()
389 if s != nil { 437 if s != nil {
390 t.closeStream(s) 438 t.closeStream(s, true, se.Code, nil, false)
439 } else {
440 t.controlBuf.put(&cleanupStream{
441 streamID: se.StreamID,
442 rst: true,
443 rstCode: se.Code,
444 onWrite: func() {},
445 })
391 } 446 }
392 t.controlBuf.put(&resetStream{se.StreamID, se.Code})
393 continue 447 continue
394 } 448 }
395 if err == io.EOF || err == io.ErrUnexpectedEOF { 449 if err == io.EOF || err == io.ErrUnexpectedEOF {
@@ -443,33 +497,20 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
443// of stream if the application is requesting data larger in size than 497// of stream if the application is requesting data larger in size than
444// the window. 498// the window.
445func (t *http2Server) adjustWindow(s *Stream, n uint32) { 499func (t *http2Server) adjustWindow(s *Stream, n uint32) {
446 s.mu.Lock()
447 defer s.mu.Unlock()
448 if s.state == streamDone {
449 return
450 }
451 if w := s.fc.maybeAdjust(n); w > 0 { 500 if w := s.fc.maybeAdjust(n); w > 0 {
452 if cw := t.fc.resetPendingUpdate(); cw > 0 { 501 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
453 t.controlBuf.put(&windowUpdate{0, cw, false})
454 }
455 t.controlBuf.put(&windowUpdate{s.id, w, true})
456 } 502 }
503
457} 504}
458 505
459// updateWindow adjusts the inbound quota for the stream and the transport. 506// updateWindow adjusts the inbound quota for the stream and the transport.
460// Window updates will deliver to the controller for sending when 507// Window updates will deliver to the controller for sending when
461// the cumulative quota exceeds the corresponding threshold. 508// the cumulative quota exceeds the corresponding threshold.
462func (t *http2Server) updateWindow(s *Stream, n uint32) { 509func (t *http2Server) updateWindow(s *Stream, n uint32) {
463 s.mu.Lock()
464 defer s.mu.Unlock()
465 if s.state == streamDone {
466 return
467 }
468 if w := s.fc.onRead(n); w > 0 { 510 if w := s.fc.onRead(n); w > 0 {
469 if cw := t.fc.resetPendingUpdate(); cw > 0 { 511 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
470 t.controlBuf.put(&windowUpdate{0, cw, false}) 512 increment: w,
471 } 513 })
472 t.controlBuf.put(&windowUpdate{s.id, w, true})
473 } 514 }
474} 515}
475 516
@@ -483,13 +524,15 @@ func (t *http2Server) updateFlowControl(n uint32) {
483 } 524 }
484 t.initialWindowSize = int32(n) 525 t.initialWindowSize = int32(n)
485 t.mu.Unlock() 526 t.mu.Unlock()
486 t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n), false}) 527 t.controlBuf.put(&outgoingWindowUpdate{
487 t.controlBuf.put(&settings{ 528 streamID: 0,
488 ack: false, 529 increment: t.fc.newLimit(n),
530 })
531 t.controlBuf.put(&outgoingSettings{
489 ss: []http2.Setting{ 532 ss: []http2.Setting{
490 { 533 {
491 ID: http2.SettingInitialWindowSize, 534 ID: http2.SettingInitialWindowSize,
492 Val: uint32(n), 535 Val: n,
493 }, 536 },
494 }, 537 },
495 }) 538 })
@@ -500,7 +543,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
500 size := f.Header().Length 543 size := f.Header().Length
501 var sendBDPPing bool 544 var sendBDPPing bool
502 if t.bdpEst != nil { 545 if t.bdpEst != nil {
503 sendBDPPing = t.bdpEst.add(uint32(size)) 546 sendBDPPing = t.bdpEst.add(size)
504 } 547 }
505 // Decouple connection's flow control from application's read. 548 // Decouple connection's flow control from application's read.
506 // An update on connection's flow control should not depend on 549 // An update on connection's flow control should not depend on
@@ -510,21 +553,22 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
510 // Decoupling the connection flow control will prevent other 553 // Decoupling the connection flow control will prevent other
511 // active(fast) streams from starving in presence of slow or 554 // active(fast) streams from starving in presence of slow or
512 // inactive streams. 555 // inactive streams.
513 // 556 if w := t.fc.onData(size); w > 0 {
514 // Furthermore, if a bdpPing is being sent out we can piggyback 557 t.controlBuf.put(&outgoingWindowUpdate{
515 // connection's window update for the bytes we just received. 558 streamID: 0,
559 increment: w,
560 })
561 }
516 if sendBDPPing { 562 if sendBDPPing {
517 t.controlBuf.put(&windowUpdate{0, uint32(size), false}) 563 // Avoid excessive ping detection (e.g. in an L7 proxy)
518 t.controlBuf.put(bdpPing) 564 // by sending a window update prior to the BDP ping.
519 } else { 565 if w := t.fc.reset(); w > 0 {
520 if err := t.fc.onData(uint32(size)); err != nil { 566 t.controlBuf.put(&outgoingWindowUpdate{
521 errorf("transport: http2Server %v", err) 567 streamID: 0,
522 t.Close() 568 increment: w,
523 return 569 })
524 }
525 if w := t.fc.onRead(uint32(size)); w > 0 {
526 t.controlBuf.put(&windowUpdate{0, w, true})
527 } 570 }
571 t.controlBuf.put(bdpPing)
528 } 572 }
529 // Select the right stream to dispatch. 573 // Select the right stream to dispatch.
530 s, ok := t.getStream(f) 574 s, ok := t.getStream(f)
@@ -532,23 +576,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
532 return 576 return
533 } 577 }
534 if size > 0 { 578 if size > 0 {
535 s.mu.Lock() 579 if err := s.fc.onData(size); err != nil {
536 if s.state == streamDone { 580 t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false)
537 s.mu.Unlock()
538 return
539 }
540 if err := s.fc.onData(uint32(size)); err != nil {
541 s.mu.Unlock()
542 t.closeStream(s)
543 t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
544 return 581 return
545 } 582 }
546 if f.Header().Flags.Has(http2.FlagDataPadded) { 583 if f.Header().Flags.Has(http2.FlagDataPadded) {
547 if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { 584 if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
548 t.controlBuf.put(&windowUpdate{s.id, w, true}) 585 t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
549 } 586 }
550 } 587 }
551 s.mu.Unlock()
552 // TODO(bradfitz, zhaoq): A copy is required here because there is no 588 // TODO(bradfitz, zhaoq): A copy is required here because there is no
553 // guarantee f.Data() is consumed before the arrival of next frame. 589 // guarantee f.Data() is consumed before the arrival of next frame.
554 // Can this copy be eliminated? 590 // Can this copy be eliminated?
@@ -560,11 +596,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
560 } 596 }
561 if f.Header().Flags.Has(http2.FlagDataEndStream) { 597 if f.Header().Flags.Has(http2.FlagDataEndStream) {
562 // Received the end of stream from the client. 598 // Received the end of stream from the client.
563 s.mu.Lock() 599 s.compareAndSwapState(streamActive, streamReadDone)
564 if s.state != streamDone {
565 s.state = streamReadDone
566 }
567 s.mu.Unlock()
568 s.write(recvMsg{err: io.EOF}) 600 s.write(recvMsg{err: io.EOF})
569 } 601 }
570} 602}
@@ -574,7 +606,7 @@ func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
574 if !ok { 606 if !ok {
575 return 607 return
576 } 608 }
577 t.closeStream(s) 609 t.closeStream(s, false, 0, nil, false)
578} 610}
579 611
580func (t *http2Server) handleSettings(f *http2.SettingsFrame) { 612func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
@@ -582,12 +614,27 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
582 return 614 return
583 } 615 }
584 var ss []http2.Setting 616 var ss []http2.Setting
617 var updateFuncs []func()
585 f.ForeachSetting(func(s http2.Setting) error { 618 f.ForeachSetting(func(s http2.Setting) error {
586 ss = append(ss, s) 619 switch s.ID {
620 case http2.SettingMaxHeaderListSize:
621 updateFuncs = append(updateFuncs, func() {
622 t.maxSendHeaderListSize = new(uint32)
623 *t.maxSendHeaderListSize = s.Val
624 })
625 default:
626 ss = append(ss, s)
627 }
587 return nil 628 return nil
588 }) 629 })
589 // The settings will be applied once the ack is sent. 630 t.controlBuf.executeAndPut(func(interface{}) bool {
590 t.controlBuf.put(&settings{ack: true, ss: ss}) 631 for _, f := range updateFuncs {
632 f()
633 }
634 return true
635 }, &incomingSettings{
636 ss: ss,
637 })
591} 638}
592 639
593const ( 640const (
@@ -627,7 +674,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
627 t.mu.Unlock() 674 t.mu.Unlock()
628 if ns < 1 && !t.kep.PermitWithoutStream { 675 if ns < 1 && !t.kep.PermitWithoutStream {
629 // Keepalive shouldn't be active thus, this new ping should 676 // Keepalive shouldn't be active thus, this new ping should
630 // have come after atleast defaultPingTimeout. 677 // have come after at least defaultPingTimeout.
631 if t.lastPingAt.Add(defaultPingTimeout).After(now) { 678 if t.lastPingAt.Add(defaultPingTimeout).After(now) {
632 t.pingStrikes++ 679 t.pingStrikes++
633 } 680 }
@@ -640,69 +687,52 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
640 687
641 if t.pingStrikes > maxPingStrikes { 688 if t.pingStrikes > maxPingStrikes {
642 // Send goaway and close the connection. 689 // Send goaway and close the connection.
690 errorf("transport: Got too many pings from the client, closing the connection.")
643 t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) 691 t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
644 } 692 }
645} 693}
646 694
647func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { 695func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
648 id := f.Header().StreamID 696 t.controlBuf.put(&incomingWindowUpdate{
649 incr := f.Increment 697 streamID: f.Header().StreamID,
650 if id == 0 { 698 increment: f.Increment,
651 t.sendQuotaPool.add(int(incr)) 699 })
652 return
653 }
654 if s, ok := t.getStream(f); ok {
655 s.sendQuotaPool.add(int(incr))
656 }
657} 700}
658 701
659func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { 702func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField {
660 first := true 703 for k, vv := range md {
661 endHeaders := false 704 if isReservedHeader(k) {
662 var err error 705 // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
663 defer func() { 706 continue
664 if err == nil {
665 // Reset ping strikes when seding headers since that might cause the
666 // peer to send ping.
667 atomic.StoreUint32(&t.resetPingStrikes, 1)
668 }
669 }()
670 // Sends the headers in a single batch.
671 for !endHeaders {
672 size := t.hBuf.Len()
673 if size > http2MaxFrameLen {
674 size = http2MaxFrameLen
675 } else {
676 endHeaders = true
677 } 707 }
678 if first { 708 for _, v := range vv {
679 p := http2.HeadersFrameParam{ 709 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
680 StreamID: s.id,
681 BlockFragment: b.Next(size),
682 EndStream: endStream,
683 EndHeaders: endHeaders,
684 }
685 err = t.framer.writeHeaders(endHeaders, p)
686 first = false
687 } else {
688 err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size))
689 } 710 }
690 if err != nil { 711 }
691 t.Close() 712 return headerFields
692 return connectionErrorf(true, err, "transport: %v", err) 713}
714
715func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
716 if t.maxSendHeaderListSize == nil {
717 return true
718 }
719 hdrFrame := it.(*headerFrame)
720 var sz int64
721 for _, f := range hdrFrame.hf {
722 if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
723 errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
724 return false
693 } 725 }
694 } 726 }
695 return nil 727 return true
696} 728}
697 729
698// WriteHeader sends the header metedata md back to the client. 730// WriteHeader sends the header metedata md back to the client.
699func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { 731func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
700 s.mu.Lock() 732 if s.updateHeaderSent() || s.getState() == streamDone {
701 if s.headerOk || s.state == streamDone {
702 s.mu.Unlock()
703 return ErrIllegalHeaderWrite 733 return ErrIllegalHeaderWrite
704 } 734 }
705 s.headerOk = true 735 s.hdrMu.Lock()
706 if md.Len() > 0 { 736 if md.Len() > 0 {
707 if s.header.Len() > 0 { 737 if s.header.Len() > 0 {
708 s.header = metadata.Join(s.header, md) 738 s.header = metadata.Join(s.header, md)
@@ -710,37 +740,45 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
710 s.header = md 740 s.header = md
711 } 741 }
712 } 742 }
713 md = s.header 743 if err := t.writeHeaderLocked(s); err != nil {
714 s.mu.Unlock() 744 s.hdrMu.Unlock()
715 if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
716 return err 745 return err
717 } 746 }
718 t.hBuf.Reset() 747 s.hdrMu.Unlock()
719 t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) 748 return nil
720 t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) 749}
750
751func (t *http2Server) writeHeaderLocked(s *Stream) error {
752 // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
753 // first and create a slice of that exact size.
754 headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
755 headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
756 headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
721 if s.sendCompress != "" { 757 if s.sendCompress != "" {
722 t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) 758 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
723 } 759 }
724 for k, vv := range md { 760 headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
725 if isReservedHeader(k) { 761 success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
726 // Clients don't tolerate reading restricted headers after some non restricted ones were sent. 762 streamID: s.id,
727 continue 763 hf: headerFields,
728 } 764 endStream: false,
729 for _, v := range vv { 765 onWrite: func() {
730 t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 766 atomic.StoreUint32(&t.resetPingStrikes, 1)
767 },
768 })
769 if !success {
770 if err != nil {
771 return err
731 } 772 }
732 } 773 t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
733 bufLen := t.hBuf.Len() 774 return ErrHeaderListSizeLimitViolation
734 if err := t.writeHeaders(s, t.hBuf, false); err != nil {
735 return err
736 } 775 }
737 if t.stats != nil { 776 if t.stats != nil {
738 outHeader := &stats.OutHeader{ 777 // Note: WireLength is not set in outHeader.
739 WireLength: bufLen, 778 // TODO(mmukhi): Revisit this later, if needed.
740 } 779 outHeader := &stats.OutHeader{}
741 t.stats.HandleRPC(s.Context(), outHeader) 780 t.stats.HandleRPC(s.Context(), outHeader)
742 } 781 }
743 t.writableChan <- 0
744 return nil 782 return nil
745} 783}
746 784
@@ -749,204 +787,108 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
749// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early 787// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
750// OK is adopted. 788// OK is adopted.
751func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { 789func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
752 var headersSent, hasHeader bool 790 if s.getState() == streamDone {
753 s.mu.Lock()
754 if s.state == streamDone {
755 s.mu.Unlock()
756 return nil 791 return nil
757 } 792 }
758 if s.headerOk { 793 s.hdrMu.Lock()
759 headersSent = true 794 // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
760 } 795 // first and create a slice of that exact size.
761 if s.header.Len() > 0 { 796 headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
762 hasHeader = true 797 if !s.updateHeaderSent() { // No headers have been sent.
763 } 798 if len(s.header) > 0 { // Send a separate header frame.
764 s.mu.Unlock() 799 if err := t.writeHeaderLocked(s); err != nil {
765 800 s.hdrMu.Unlock()
766 if !headersSent && hasHeader { 801 return err
767 t.WriteHeader(s, nil) 802 }
768 headersSent = true 803 } else { // Send a trailer only response.
769 } 804 headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
770 805 headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
771 if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { 806 }
772 return err
773 }
774 t.hBuf.Reset()
775 if !headersSent {
776 t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
777 t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
778 } 807 }
779 t.hEnc.WriteField( 808 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
780 hpack.HeaderField{ 809 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
781 Name: "grpc-status",
782 Value: strconv.Itoa(int(st.Code())),
783 })
784 t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
785 810
786 if p := st.Proto(); p != nil && len(p.Details) > 0 { 811 if p := st.Proto(); p != nil && len(p.Details) > 0 {
787 stBytes, err := proto.Marshal(p) 812 stBytes, err := proto.Marshal(p)
788 if err != nil { 813 if err != nil {
789 // TODO: return error instead, when callers are able to handle it. 814 // TODO: return error instead, when callers are able to handle it.
790 panic(err) 815 grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
816 } else {
817 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
791 } 818 }
792
793 t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
794 } 819 }
795 820
796 // Attach the trailer metadata. 821 // Attach the trailer metadata.
797 for k, vv := range s.trailer { 822 headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer)
798 // Clients don't tolerate reading restricted headers after some non restricted ones were sent. 823 trailingHeader := &headerFrame{
799 if isReservedHeader(k) { 824 streamID: s.id,
800 continue 825 hf: headerFields,
801 } 826 endStream: true,
802 for _, v := range vv { 827 onWrite: func() {
803 t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) 828 atomic.StoreUint32(&t.resetPingStrikes, 1)
804 } 829 },
805 } 830 }
806 bufLen := t.hBuf.Len() 831 s.hdrMu.Unlock()
807 if err := t.writeHeaders(s, t.hBuf, true); err != nil { 832 success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
808 t.Close() 833 if !success {
809 return err 834 if err != nil {
835 return err
836 }
837 t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
838 return ErrHeaderListSizeLimitViolation
810 } 839 }
840 t.closeStream(s, false, 0, trailingHeader, true)
811 if t.stats != nil { 841 if t.stats != nil {
812 outTrailer := &stats.OutTrailer{ 842 t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
813 WireLength: bufLen,
814 }
815 t.stats.HandleRPC(s.Context(), outTrailer)
816 } 843 }
817 t.closeStream(s)
818 t.writableChan <- 0
819 return nil 844 return nil
820} 845}
821 846
822// Write converts the data into HTTP2 data frame and sends it out. Non-nil error 847// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
823// is returns if it fails (e.g., framing error, transport error). 848// is returns if it fails (e.g., framing error, transport error).
824func (t *http2Server) Write(s *Stream, data []byte, opts *Options) (err error) { 849func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
825 // TODO(zhaoq): Support multi-writers for a single stream. 850 if !s.isHeaderSent() { // Headers haven't been written yet.
826 var writeHeaderFrame bool 851 if err := t.WriteHeader(s, nil); err != nil {
827 s.mu.Lock() 852 // TODO(mmukhi, dfawley): Make sure this is the right code to return.
828 if s.state == streamDone { 853 return status.Errorf(codes.Internal, "transport: %v", err)
829 s.mu.Unlock()
830 return streamErrorf(codes.Unknown, "the stream has been done")
831 }
832 if !s.headerOk {
833 writeHeaderFrame = true
834 }
835 s.mu.Unlock()
836 if writeHeaderFrame {
837 t.WriteHeader(s, nil)
838 }
839 r := bytes.NewBuffer(data)
840 var (
841 p []byte
842 oqv uint32
843 )
844 for {
845 if r.Len() == 0 && p == nil {
846 return nil
847 }
848 oqv = atomic.LoadUint32(&t.outQuotaVersion)
849 size := http2MaxFrameLen
850 // Wait until the stream has some quota to send the data.
851 sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire())
852 if err != nil {
853 return err
854 }
855 // Wait until the transport has some quota to send the data.
856 tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire())
857 if err != nil {
858 return err
859 }
860 if sq < size {
861 size = sq
862 }
863 if tq < size {
864 size = tq
865 } 854 }
866 if p == nil { 855 } else {
867 p = r.Next(size) 856 // Writing headers checks for this condition.
868 } 857 if s.getState() == streamDone {
869 ps := len(p) 858 // TODO(mmukhi, dfawley): Should the server write also return io.EOF?
870 if ps < sq { 859 s.cancel()
871 // Overbooked stream quota. Return it back. 860 select {
872 s.sendQuotaPool.add(sq - ps) 861 case <-t.ctx.Done():
873 } 862 return ErrConnClosing
874 if ps < tq { 863 default:
875 // Overbooked transport quota. Return it back.
876 t.sendQuotaPool.add(tq - ps)
877 }
878 t.framer.adjustNumWriters(1)
879 // Got some quota. Try to acquire writing privilege on the
880 // transport.
881 if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
882 if _, ok := err.(StreamError); ok {
883 // Return the connection quota back.
884 t.sendQuotaPool.add(ps)
885 }
886 if t.framer.adjustNumWriters(-1) == 0 {
887 // This writer is the last one in this batch and has the
888 // responsibility to flush the buffered frames. It queues
889 // a flush request to controlBuf instead of flushing directly
890 // in order to avoid the race with other writing or flushing.
891 t.controlBuf.put(&flushIO{})
892 }
893 return err
894 }
895 select {
896 case <-s.ctx.Done():
897 t.sendQuotaPool.add(ps)
898 if t.framer.adjustNumWriters(-1) == 0 {
899 t.controlBuf.put(&flushIO{})
900 } 864 }
901 t.writableChan <- 0
902 return ContextErr(s.ctx.Err()) 865 return ContextErr(s.ctx.Err())
903 default:
904 }
905 if oqv != atomic.LoadUint32(&t.outQuotaVersion) {
906 // InitialWindowSize settings frame must have been received after we
907 // acquired send quota but before we got the writable channel.
908 // We must forsake this write.
909 t.sendQuotaPool.add(ps)
910 s.sendQuotaPool.add(ps)
911 if t.framer.adjustNumWriters(-1) == 0 {
912 t.controlBuf.put(&flushIO{})
913 }
914 t.writableChan <- 0
915 continue
916 }
917 var forceFlush bool
918 if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last {
919 forceFlush = true
920 }
921 // Reset ping strikes when sending data since this might cause
922 // the peer to send ping.
923 atomic.StoreUint32(&t.resetPingStrikes, 1)
924 if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil {
925 t.Close()
926 return connectionErrorf(true, err, "transport: %v", err)
927 } 866 }
928 p = nil
929 if t.framer.adjustNumWriters(-1) == 0 {
930 t.framer.flushWrite()
931 }
932 t.writableChan <- 0
933 } 867 }
934 868 // Add some data to header frame so that we can equally distribute bytes across frames.
935} 869 emptyLen := http2MaxFrameLen - len(hdr)
936 870 if emptyLen > len(data) {
937func (t *http2Server) applySettings(ss []http2.Setting) { 871 emptyLen = len(data)
938 for _, s := range ss { 872 }
939 if s.ID == http2.SettingInitialWindowSize { 873 hdr = append(hdr, data[:emptyLen]...)
940 t.mu.Lock() 874 data = data[emptyLen:]
941 defer t.mu.Unlock() 875 df := &dataFrame{
942 for _, stream := range t.activeStreams { 876 streamID: s.id,
943 stream.sendQuotaPool.add(int(s.Val) - int(t.streamSendQuota)) 877 h: hdr,
944 } 878 d: data,
945 t.streamSendQuota = s.Val 879 onEachWrite: func() {
946 atomic.AddUint32(&t.outQuotaVersion, 1) 880 atomic.StoreUint32(&t.resetPingStrikes, 1)
881 },
882 }
883 if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
884 select {
885 case <-t.ctx.Done():
886 return ErrConnClosing
887 default:
947 } 888 }
948 889 return ContextErr(s.ctx.Err())
949 } 890 }
891 return t.controlBuf.put(df)
950} 892}
951 893
952// keepalive running in a separate goroutine does the following: 894// keepalive running in a separate goroutine does the following:
@@ -962,7 +904,7 @@ func (t *http2Server) keepalive() {
962 maxAge := time.NewTimer(t.kp.MaxConnectionAge) 904 maxAge := time.NewTimer(t.kp.MaxConnectionAge)
963 keepalive := time.NewTimer(t.kp.Time) 905 keepalive := time.NewTimer(t.kp.Time)
964 // NOTE: All exit paths of this function should reset their 906 // NOTE: All exit paths of this function should reset their
965 // respecitve timers. A failure to do so will cause the 907 // respective timers. A failure to do so will cause the
966 // following clean-up to deadlock and eventually leak. 908 // following clean-up to deadlock and eventually leak.
967 defer func() { 909 defer func() {
968 if !maxIdle.Stop() { 910 if !maxIdle.Stop() {
@@ -991,7 +933,7 @@ func (t *http2Server) keepalive() {
991 // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. 933 // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
992 // Gracefully close the connection. 934 // Gracefully close the connection.
993 t.drain(http2.ErrCodeNo, []byte{}) 935 t.drain(http2.ErrCodeNo, []byte{})
994 // Reseting the timer so that the clean-up doesn't deadlock. 936 // Resetting the timer so that the clean-up doesn't deadlock.
995 maxIdle.Reset(infinity) 937 maxIdle.Reset(infinity)
996 return 938 return
997 } 939 }
@@ -1003,9 +945,9 @@ func (t *http2Server) keepalive() {
1003 case <-maxAge.C: 945 case <-maxAge.C:
1004 // Close the connection after grace period. 946 // Close the connection after grace period.
1005 t.Close() 947 t.Close()
1006 // Reseting the timer so that the clean-up doesn't deadlock. 948 // Resetting the timer so that the clean-up doesn't deadlock.
1007 maxAge.Reset(infinity) 949 maxAge.Reset(infinity)
1008 case <-t.shutdownChan: 950 case <-t.ctx.Done():
1009 } 951 }
1010 return 952 return
1011 case <-keepalive.C: 953 case <-keepalive.C:
@@ -1016,98 +958,17 @@ func (t *http2Server) keepalive() {
1016 } 958 }
1017 if pingSent { 959 if pingSent {
1018 t.Close() 960 t.Close()
1019 // Reseting the timer so that the clean-up doesn't deadlock. 961 // Resetting the timer so that the clean-up doesn't deadlock.
1020 keepalive.Reset(infinity) 962 keepalive.Reset(infinity)
1021 return 963 return
1022 } 964 }
1023 pingSent = true 965 pingSent = true
966 if channelz.IsOn() {
967 atomic.AddInt64(&t.czData.kpCount, 1)
968 }
1024 t.controlBuf.put(p) 969 t.controlBuf.put(p)
1025 keepalive.Reset(t.kp.Timeout) 970 keepalive.Reset(t.kp.Timeout)
1026 case <-t.shutdownChan: 971 case <-t.ctx.Done():
1027 return
1028 }
1029 }
1030}
1031
1032var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
1033
1034// controller running in a separate goroutine takes charge of sending control
1035// frames (e.g., window update, reset stream, setting, etc.) to the server.
1036func (t *http2Server) controller() {
1037 for {
1038 select {
1039 case i := <-t.controlBuf.get():
1040 t.controlBuf.load()
1041 select {
1042 case <-t.writableChan:
1043 switch i := i.(type) {
1044 case *windowUpdate:
1045 t.framer.writeWindowUpdate(i.flush, i.streamID, i.increment)
1046 case *settings:
1047 if i.ack {
1048 t.framer.writeSettingsAck(true)
1049 t.applySettings(i.ss)
1050 } else {
1051 t.framer.writeSettings(true, i.ss...)
1052 }
1053 case *resetStream:
1054 t.framer.writeRSTStream(true, i.streamID, i.code)
1055 case *goAway:
1056 t.mu.Lock()
1057 if t.state == closing {
1058 t.mu.Unlock()
1059 // The transport is closing.
1060 return
1061 }
1062 sid := t.maxStreamID
1063 if !i.headsUp {
1064 // Stop accepting more streams now.
1065 t.state = draining
1066 t.mu.Unlock()
1067 t.framer.writeGoAway(true, sid, i.code, i.debugData)
1068 if i.closeConn {
1069 // Abruptly close the connection following the GoAway.
1070 t.Close()
1071 }
1072 t.writableChan <- 0
1073 continue
1074 }
1075 t.mu.Unlock()
1076 // For a graceful close, send out a GoAway with stream ID of MaxUInt32,
1077 // Follow that with a ping and wait for the ack to come back or a timer
1078 // to expire. During this time accept new streams since they might have
1079 // originated before the GoAway reaches the client.
1080 // After getting the ack or timer expiration send out another GoAway this
1081 // time with an ID of the max stream server intends to process.
1082 t.framer.writeGoAway(true, math.MaxUint32, http2.ErrCodeNo, []byte{})
1083 t.framer.writePing(true, false, goAwayPing.data)
1084 go func() {
1085 timer := time.NewTimer(time.Minute)
1086 defer timer.Stop()
1087 select {
1088 case <-t.drainChan:
1089 case <-timer.C:
1090 case <-t.shutdownChan:
1091 return
1092 }
1093 t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData})
1094 }()
1095 case *flushIO:
1096 t.framer.flushWrite()
1097 case *ping:
1098 if !i.ack {
1099 t.bdpEst.timesnap(i.data)
1100 }
1101 t.framer.writePing(true, i.ack, i.data)
1102 default:
1103 errorf("transport: http2Server.controller got unexpected item type %v\n", i)
1104 }
1105 t.writableChan <- 0
1106 continue
1107 case <-t.shutdownChan:
1108 return
1109 }
1110 case <-t.shutdownChan:
1111 return 972 return
1112 } 973 }
1113 } 974 }
@@ -1116,7 +977,7 @@ func (t *http2Server) controller() {
1116// Close starts shutting down the http2Server transport. 977// Close starts shutting down the http2Server transport.
1117// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This 978// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
1118// could cause some resource issue. Revisit this later. 979// could cause some resource issue. Revisit this later.
1119func (t *http2Server) Close() (err error) { 980func (t *http2Server) Close() error {
1120 t.mu.Lock() 981 t.mu.Lock()
1121 if t.state == closing { 982 if t.state == closing {
1122 t.mu.Unlock() 983 t.mu.Unlock()
@@ -1126,8 +987,12 @@ func (t *http2Server) Close() (err error) {
1126 streams := t.activeStreams 987 streams := t.activeStreams
1127 t.activeStreams = nil 988 t.activeStreams = nil
1128 t.mu.Unlock() 989 t.mu.Unlock()
1129 close(t.shutdownChan) 990 t.controlBuf.finish()
1130 err = t.conn.Close() 991 t.cancel()
992 err := t.conn.Close()
993 if channelz.IsOn() {
994 channelz.RemoveEntry(t.channelzID)
995 }
1131 // Cancel all active streams. 996 // Cancel all active streams.
1132 for _, s := range streams { 997 for _, s := range streams {
1133 s.cancel() 998 s.cancel()
@@ -1136,32 +1001,48 @@ func (t *http2Server) Close() (err error) {
1136 connEnd := &stats.ConnEnd{} 1001 connEnd := &stats.ConnEnd{}
1137 t.stats.HandleConn(t.ctx, connEnd) 1002 t.stats.HandleConn(t.ctx, connEnd)
1138 } 1003 }
1139 return 1004 return err
1140} 1005}
1141 1006
1142// closeStream clears the footprint of a stream when the stream is not needed 1007// closeStream clears the footprint of a stream when the stream is not needed
1143// any more. 1008// any more.
1144func (t *http2Server) closeStream(s *Stream) { 1009func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
1145 t.mu.Lock() 1010 if s.swapState(streamDone) == streamDone {
1146 delete(t.activeStreams, s.id) 1011 // If the stream was already done, return.
1147 if len(t.activeStreams) == 0 { 1012 return
1148 t.idle = time.Now()
1149 }
1150 if t.state == draining && len(t.activeStreams) == 0 {
1151 defer t.Close()
1152 } 1013 }
1153 t.mu.Unlock()
1154 // In case stream sending and receiving are invoked in separate 1014 // In case stream sending and receiving are invoked in separate
1155 // goroutines (e.g., bi-directional streaming), cancel needs to be 1015 // goroutines (e.g., bi-directional streaming), cancel needs to be
1156 // called to interrupt the potential blocking on other goroutines. 1016 // called to interrupt the potential blocking on other goroutines.
1157 s.cancel() 1017 s.cancel()
1158 s.mu.Lock() 1018 cleanup := &cleanupStream{
1159 if s.state == streamDone { 1019 streamID: s.id,
1160 s.mu.Unlock() 1020 rst: rst,
1161 return 1021 rstCode: rstCode,
1022 onWrite: func() {
1023 t.mu.Lock()
1024 if t.activeStreams != nil {
1025 delete(t.activeStreams, s.id)
1026 if len(t.activeStreams) == 0 {
1027 t.idle = time.Now()
1028 }
1029 }
1030 t.mu.Unlock()
1031 if channelz.IsOn() {
1032 if eosReceived {
1033 atomic.AddInt64(&t.czData.streamsSucceeded, 1)
1034 } else {
1035 atomic.AddInt64(&t.czData.streamsFailed, 1)
1036 }
1037 }
1038 },
1039 }
1040 if hdr != nil {
1041 hdr.cleanup = cleanup
1042 t.controlBuf.put(hdr)
1043 } else {
1044 t.controlBuf.put(cleanup)
1162 } 1045 }
1163 s.state = streamDone
1164 s.mu.Unlock()
1165} 1046}
1166 1047
1167func (t *http2Server) RemoteAddr() net.Addr { 1048func (t *http2Server) RemoteAddr() net.Addr {
@@ -1182,7 +1063,111 @@ func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
1182 t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) 1063 t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
1183} 1064}
1184 1065
1185var rgen = rand.New(rand.NewSource(time.Now().UnixNano())) 1066var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
1067
1068// Handles outgoing GoAway and returns true if loopy needs to put itself
1069// in draining mode.
1070func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
1071 t.mu.Lock()
1072 if t.state == closing { // TODO(mmukhi): This seems unnecessary.
1073 t.mu.Unlock()
1074 // The transport is closing.
1075 return false, ErrConnClosing
1076 }
1077 sid := t.maxStreamID
1078 if !g.headsUp {
1079 // Stop accepting more streams now.
1080 t.state = draining
1081 if len(t.activeStreams) == 0 {
1082 g.closeConn = true
1083 }
1084 t.mu.Unlock()
1085 if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
1086 return false, err
1087 }
1088 if g.closeConn {
1089 // Abruptly close the connection following the GoAway (via
1090 // loopywriter). But flush out what's inside the buffer first.
1091 t.framer.writer.Flush()
1092 return false, fmt.Errorf("transport: Connection closing")
1093 }
1094 return true, nil
1095 }
1096 t.mu.Unlock()
1097 // For a graceful close, send out a GoAway with stream ID of MaxUInt32,
1098 // Follow that with a ping and wait for the ack to come back or a timer
1099 // to expire. During this time accept new streams since they might have
1100 // originated before the GoAway reaches the client.
1101 // After getting the ack or timer expiration send out another GoAway this
1102 // time with an ID of the max stream server intends to process.
1103 if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
1104 return false, err
1105 }
1106 if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
1107 return false, err
1108 }
1109 go func() {
1110 timer := time.NewTimer(time.Minute)
1111 defer timer.Stop()
1112 select {
1113 case <-t.drainChan:
1114 case <-timer.C:
1115 case <-t.ctx.Done():
1116 return
1117 }
1118 t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
1119 }()
1120 return false, nil
1121}
1122
1123func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
1124 s := channelz.SocketInternalMetric{
1125 StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
1126 StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
1127 StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
1128 MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
1129 MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
1130 KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
1131 LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
1132 LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
1133 LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
1134 LocalFlowControlWindow: int64(t.fc.getSize()),
1135 SocketOptions: channelz.GetSocketOption(t.conn),
1136 LocalAddr: t.localAddr,
1137 RemoteAddr: t.remoteAddr,
1138 // RemoteName :
1139 }
1140 if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
1141 s.Security = au.GetSecurityValue()
1142 }
1143 s.RemoteFlowControlWindow = t.getOutFlowWindow()
1144 return &s
1145}
1146
1147func (t *http2Server) IncrMsgSent() {
1148 atomic.AddInt64(&t.czData.msgSent, 1)
1149 atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
1150}
1151
1152func (t *http2Server) IncrMsgRecv() {
1153 atomic.AddInt64(&t.czData.msgRecv, 1)
1154 atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
1155}
1156
1157func (t *http2Server) getOutFlowWindow() int64 {
1158 resp := make(chan uint32)
1159 timer := time.NewTimer(time.Second)
1160 defer timer.Stop()
1161 t.controlBuf.put(&outFlowControlSizeRequest{resp})
1162 select {
1163 case sz := <-resp:
1164 return int64(sz)
1165 case <-t.ctxDone:
1166 return -1
1167 case <-timer.C:
1168 return -2
1169 }
1170}
1186 1171
1187func getJitter(v time.Duration) time.Duration { 1172func getJitter(v time.Duration) time.Duration {
1188 if v == infinity { 1173 if v == infinity {
@@ -1190,6 +1175,6 @@ func getJitter(v time.Duration) time.Duration {
1190 } 1175 }
1191 // Generate a jitter between +/- 10% of the value. 1176 // Generate a jitter between +/- 10% of the value.
1192 r := int64(v / 10) 1177 r := int64(v / 10)
1193 j := rgen.Int63n(2*r) - r 1178 j := grpcrand.Int63n(2*r) - r
1194 return time.Duration(j) 1179 return time.Duration(j)
1195} 1180}
diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 685c6fb..77a2cfa 100644
--- a/vendor/google.golang.org/grpc/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -24,12 +24,13 @@ import (
24 "encoding/base64" 24 "encoding/base64"
25 "fmt" 25 "fmt"
26 "io" 26 "io"
27 "math"
27 "net" 28 "net"
28 "net/http" 29 "net/http"
29 "strconv" 30 "strconv"
30 "strings" 31 "strings"
31 "sync/atomic"
32 "time" 32 "time"
33 "unicode/utf8"
33 34
34 "github.com/golang/protobuf/proto" 35 "github.com/golang/protobuf/proto"
35 "golang.org/x/net/http2" 36 "golang.org/x/net/http2"
@@ -44,8 +45,12 @@ const (
44 http2MaxFrameLen = 16384 // 16KB frame 45 http2MaxFrameLen = 16384 // 16KB frame
45 // http://http2.github.io/http2-spec/#SettingValues 46 // http://http2.github.io/http2-spec/#SettingValues
46 http2InitHeaderTableSize = 4096 47 http2InitHeaderTableSize = 4096
47 // http2IOBufSize specifies the buffer size for sending frames. 48 // baseContentType is the base content-type for gRPC. This is a valid
48 http2IOBufSize = 32 * 1024 49 // content-type on it's own, but can also include a content-subtype such as
50 // "proto" as a suffix after "+" or ";". See
51 // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
52 // for more details.
53 baseContentType = "application/grpc"
49) 54)
50 55
51var ( 56var (
@@ -64,7 +69,7 @@ var (
64 http2.ErrCodeConnect: codes.Internal, 69 http2.ErrCodeConnect: codes.Internal,
65 http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, 70 http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
66 http2.ErrCodeInadequateSecurity: codes.PermissionDenied, 71 http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
67 http2.ErrCodeHTTP11Required: codes.FailedPrecondition, 72 http2.ErrCodeHTTP11Required: codes.Internal,
68 } 73 }
69 statusCodeConvTab = map[codes.Code]http2.ErrCode{ 74 statusCodeConvTab = map[codes.Code]http2.ErrCode{
70 codes.Internal: http2.ErrCodeInternal, 75 codes.Internal: http2.ErrCodeInternal,
@@ -111,7 +116,12 @@ type decodeState struct {
111 timeout time.Duration 116 timeout time.Duration
112 method string 117 method string
113 // key-value metadata map from the peer. 118 // key-value metadata map from the peer.
114 mdata map[string][]string 119 mdata map[string][]string
120 statsTags []byte
121 statsTrace []byte
122 contentSubtype string
123 // whether decoding on server side or not
124 serverSide bool
115} 125}
116 126
117// isReservedHeader checks whether hdr belongs to HTTP2 headers 127// isReservedHeader checks whether hdr belongs to HTTP2 headers
@@ -123,12 +133,16 @@ func isReservedHeader(hdr string) bool {
123 } 133 }
124 switch hdr { 134 switch hdr {
125 case "content-type", 135 case "content-type",
136 "user-agent",
126 "grpc-message-type", 137 "grpc-message-type",
127 "grpc-encoding", 138 "grpc-encoding",
128 "grpc-message", 139 "grpc-message",
129 "grpc-status", 140 "grpc-status",
130 "grpc-timeout", 141 "grpc-timeout",
131 "grpc-status-details-bin", 142 "grpc-status-details-bin",
143 // Intentionally exclude grpc-previous-rpc-attempts and
144 // grpc-retry-pushback-ms, which are "reserved", but their API
145 // intentionally works via metadata.
132 "te": 146 "te":
133 return true 147 return true
134 default: 148 default:
@@ -136,28 +150,55 @@ func isReservedHeader(hdr string) bool {
136 } 150 }
137} 151}
138 152
139// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders 153// isWhitelistedHeader checks whether hdr should be propagated into metadata
140// that should be propagated into metadata visible to users. 154// visible to users, even though it is classified as "reserved", above.
141func isWhitelistedPseudoHeader(hdr string) bool { 155func isWhitelistedHeader(hdr string) bool {
142 switch hdr { 156 switch hdr {
143 case ":authority": 157 case ":authority", "user-agent":
144 return true 158 return true
145 default: 159 default:
146 return false 160 return false
147 } 161 }
148} 162}
149 163
150func validContentType(t string) bool { 164// contentSubtype returns the content-subtype for the given content-type. The
151 e := "application/grpc" 165// given content-type must be a valid content-type that starts with
152 if !strings.HasPrefix(t, e) { 166// "application/grpc". A content-subtype will follow "application/grpc" after a
153 return false 167// "+" or ";". See
168// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
169// more details.
170//
171// If contentType is not a valid content-type for gRPC, the boolean
172// will be false, otherwise true. If content-type == "application/grpc",
173// "application/grpc+", or "application/grpc;", the boolean will be true,
174// but no content-subtype will be returned.
175//
176// contentType is assumed to be lowercase already.
177func contentSubtype(contentType string) (string, bool) {
178 if contentType == baseContentType {
179 return "", true
180 }
181 if !strings.HasPrefix(contentType, baseContentType) {
182 return "", false
183 }
184 // guaranteed since != baseContentType and has baseContentType prefix
185 switch contentType[len(baseContentType)] {
186 case '+', ';':
187 // this will return true for "application/grpc+" or "application/grpc;"
188 // which the previous validContentType function tested to be valid, so we
189 // just say that no content-subtype is specified in this case
190 return contentType[len(baseContentType)+1:], true
191 default:
192 return "", false
154 } 193 }
155 // Support variations on the content-type 194}
156 // (e.g. "application/grpc+blah", "application/grpc;blah"). 195
157 if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' { 196// contentSubtype is assumed to be lowercase
158 return false 197func contentType(contentSubtype string) string {
198 if contentSubtype == "" {
199 return baseContentType
159 } 200 }
160 return true 201 return baseContentType + "+" + contentSubtype
161} 202}
162 203
163func (d *decodeState) status() *status.Status { 204func (d *decodeState) status() *status.Status {
@@ -197,13 +238,22 @@ func decodeMetadataHeader(k, v string) (string, error) {
197 return v, nil 238 return v, nil
198} 239}
199 240
200func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error { 241func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
242 // frame.Truncated is set to true when framer detects that the current header
243 // list size hits MaxHeaderListSize limit.
244 if frame.Truncated {
245 return status.Error(codes.Internal, "peer header list size exceeded limit")
246 }
201 for _, hf := range frame.Fields { 247 for _, hf := range frame.Fields {
202 if err := d.processHeaderField(hf); err != nil { 248 if err := d.processHeaderField(hf); err != nil {
203 return err 249 return err
204 } 250 }
205 } 251 }
206 252
253 if d.serverSide {
254 return nil
255 }
256
207 // If grpc status exists, no need to check further. 257 // If grpc status exists, no need to check further.
208 if d.rawStatusCode != nil || d.statusGen != nil { 258 if d.rawStatusCode != nil || d.statusGen != nil {
209 return nil 259 return nil
@@ -212,7 +262,7 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error
212 // If grpc status doesn't exist and http status doesn't exist, 262 // If grpc status doesn't exist and http status doesn't exist,
213 // then it's a malformed header. 263 // then it's a malformed header.
214 if d.httpStatus == nil { 264 if d.httpStatus == nil {
215 return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") 265 return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
216 } 266 }
217 267
218 if *(d.httpStatus) != http.StatusOK { 268 if *(d.httpStatus) != http.StatusOK {
@@ -220,33 +270,46 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error
220 if !ok { 270 if !ok {
221 code = codes.Unknown 271 code = codes.Unknown
222 } 272 }
223 return streamErrorf(code, http.StatusText(*(d.httpStatus))) 273 return status.Error(code, http.StatusText(*(d.httpStatus)))
224 } 274 }
225 275
226 // gRPC status doesn't exist and http status is OK. 276 // gRPC status doesn't exist and http status is OK.
227 // Set rawStatusCode to be unknown and return nil error. 277 // Set rawStatusCode to be unknown and return nil error.
228 // So that, if the stream has ended this Unknown status 278 // So that, if the stream has ended this Unknown status
229 // will be propogated to the user. 279 // will be propagated to the user.
230 // Otherwise, it will be ignored. In which case, status from 280 // Otherwise, it will be ignored. In which case, status from
231 // a later trailer, that has StreamEnded flag set, is propogated. 281 // a later trailer, that has StreamEnded flag set, is propagated.
232 code := int(codes.Unknown) 282 code := int(codes.Unknown)
233 d.rawStatusCode = &code 283 d.rawStatusCode = &code
234 return nil 284 return nil
285}
235 286
287func (d *decodeState) addMetadata(k, v string) {
288 if d.mdata == nil {
289 d.mdata = make(map[string][]string)
290 }
291 d.mdata[k] = append(d.mdata[k], v)
236} 292}
237 293
238func (d *decodeState) processHeaderField(f hpack.HeaderField) error { 294func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
239 switch f.Name { 295 switch f.Name {
240 case "content-type": 296 case "content-type":
241 if !validContentType(f.Value) { 297 contentSubtype, validContentType := contentSubtype(f.Value)
242 return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value) 298 if !validContentType {
299 return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
243 } 300 }
301 d.contentSubtype = contentSubtype
302 // TODO: do we want to propagate the whole content-type in the metadata,
303 // or come up with a way to just propagate the content-subtype if it was set?
304 // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
305 // in the metadata?
306 d.addMetadata(f.Name, f.Value)
244 case "grpc-encoding": 307 case "grpc-encoding":
245 d.encoding = f.Value 308 d.encoding = f.Value
246 case "grpc-status": 309 case "grpc-status":
247 code, err := strconv.Atoi(f.Value) 310 code, err := strconv.Atoi(f.Value)
248 if err != nil { 311 if err != nil {
249 return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) 312 return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
250 } 313 }
251 d.rawStatusCode = &code 314 d.rawStatusCode = &code
252 case "grpc-message": 315 case "grpc-message":
@@ -254,39 +317,51 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
254 case "grpc-status-details-bin": 317 case "grpc-status-details-bin":
255 v, err := decodeBinHeader(f.Value) 318 v, err := decodeBinHeader(f.Value)
256 if err != nil { 319 if err != nil {
257 return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) 320 return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
258 } 321 }
259 s := &spb.Status{} 322 s := &spb.Status{}
260 if err := proto.Unmarshal(v, s); err != nil { 323 if err := proto.Unmarshal(v, s); err != nil {
261 return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) 324 return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
262 } 325 }
263 d.statusGen = status.FromProto(s) 326 d.statusGen = status.FromProto(s)
264 case "grpc-timeout": 327 case "grpc-timeout":
265 d.timeoutSet = true 328 d.timeoutSet = true
266 var err error 329 var err error
267 if d.timeout, err = decodeTimeout(f.Value); err != nil { 330 if d.timeout, err = decodeTimeout(f.Value); err != nil {
268 return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err) 331 return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
269 } 332 }
270 case ":path": 333 case ":path":
271 d.method = f.Value 334 d.method = f.Value
272 case ":status": 335 case ":status":
273 code, err := strconv.Atoi(f.Value) 336 code, err := strconv.Atoi(f.Value)
274 if err != nil { 337 if err != nil {
275 return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err) 338 return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
276 } 339 }
277 d.httpStatus = &code 340 d.httpStatus = &code
341 case "grpc-tags-bin":
342 v, err := decodeBinHeader(f.Value)
343 if err != nil {
344 return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
345 }
346 d.statsTags = v
347 d.addMetadata(f.Name, string(v))
348 case "grpc-trace-bin":
349 v, err := decodeBinHeader(f.Value)
350 if err != nil {
351 return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
352 }
353 d.statsTrace = v
354 d.addMetadata(f.Name, string(v))
278 default: 355 default:
279 if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) { 356 if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
280 if d.mdata == nil { 357 break
281 d.mdata = make(map[string][]string) 358 }
282 } 359 v, err := decodeMetadataHeader(f.Name, f.Value)
283 v, err := decodeMetadataHeader(f.Name, f.Value) 360 if err != nil {
284 if err != nil { 361 errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
285 errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) 362 return nil
286 return nil
287 }
288 d.mdata[f.Name] = append(d.mdata[f.Name], v)
289 } 363 }
364 d.addMetadata(f.Name, v)
290 } 365 }
291 return nil 366 return nil
292} 367}
@@ -361,6 +436,10 @@ func decodeTimeout(s string) (time.Duration, error) {
361 if size < 2 { 436 if size < 2 {
362 return 0, fmt.Errorf("transport: timeout string is too short: %q", s) 437 return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
363 } 438 }
439 if size > 9 {
440 // Spec allows for 8 digits plus the unit.
441 return 0, fmt.Errorf("transport: timeout string is too long: %q", s)
442 }
364 unit := timeoutUnit(s[size-1]) 443 unit := timeoutUnit(s[size-1])
365 d, ok := timeoutUnitToDuration(unit) 444 d, ok := timeoutUnitToDuration(unit)
366 if !ok { 445 if !ok {
@@ -370,21 +449,27 @@ func decodeTimeout(s string) (time.Duration, error) {
370 if err != nil { 449 if err != nil {
371 return 0, err 450 return 0, err
372 } 451 }
452 const maxHours = math.MaxInt64 / int64(time.Hour)
453 if d == time.Hour && t > maxHours {
454 // This timeout would overflow math.MaxInt64; clamp it.
455 return time.Duration(math.MaxInt64), nil
456 }
373 return d * time.Duration(t), nil 457 return d * time.Duration(t), nil
374} 458}
375 459
376const ( 460const (
377 spaceByte = ' ' 461 spaceByte = ' '
378 tildaByte = '~' 462 tildeByte = '~'
379 percentByte = '%' 463 percentByte = '%'
380) 464)
381 465
382// encodeGrpcMessage is used to encode status code in header field 466// encodeGrpcMessage is used to encode status code in header field
383// "grpc-message". 467// "grpc-message". It does percent encoding and also replaces invalid utf-8
384// It checks to see if each individual byte in msg is an 468// characters with Unicode replacement character.
385// allowable byte, and then either percent encoding or passing it through. 469//
386// When percent encoding, the byte is converted into hexadecimal notation 470// It checks to see if each individual byte in msg is an allowable byte, and
387// with a '%' prepended. 471// then either percent encoding or passing it through. When percent encoding,
472// the byte is converted into hexadecimal notation with a '%' prepended.
388func encodeGrpcMessage(msg string) string { 473func encodeGrpcMessage(msg string) string {
389 if msg == "" { 474 if msg == "" {
390 return "" 475 return ""
@@ -392,7 +477,7 @@ func encodeGrpcMessage(msg string) string {
392 lenMsg := len(msg) 477 lenMsg := len(msg)
393 for i := 0; i < lenMsg; i++ { 478 for i := 0; i < lenMsg; i++ {
394 c := msg[i] 479 c := msg[i]
395 if !(c >= spaceByte && c < tildaByte && c != percentByte) { 480 if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
396 return encodeGrpcMessageUnchecked(msg) 481 return encodeGrpcMessageUnchecked(msg)
397 } 482 }
398 } 483 }
@@ -401,14 +486,26 @@ func encodeGrpcMessage(msg string) string {
401 486
402func encodeGrpcMessageUnchecked(msg string) string { 487func encodeGrpcMessageUnchecked(msg string) string {
403 var buf bytes.Buffer 488 var buf bytes.Buffer
404 lenMsg := len(msg) 489 for len(msg) > 0 {
405 for i := 0; i < lenMsg; i++ { 490 r, size := utf8.DecodeRuneInString(msg)
406 c := msg[i] 491 for _, b := range []byte(string(r)) {
407 if c >= spaceByte && c < tildaByte && c != percentByte { 492 if size > 1 {
408 buf.WriteByte(c) 493 // If size > 1, r is not ascii. Always do percent encoding.
409 } else { 494 buf.WriteString(fmt.Sprintf("%%%02X", b))
410 buf.WriteString(fmt.Sprintf("%%%02X", c)) 495 continue
496 }
497
498 // The for loop is necessary even if size == 1. r could be
499 // utf8.RuneError.
500 //
501 // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
502 if b >= spaceByte && b <= tildeByte && b != percentByte {
503 buf.WriteByte(b)
504 } else {
505 buf.WriteString(fmt.Sprintf("%%%02X", b))
506 }
411 } 507 }
508 msg = msg[size:]
412 } 509 }
413 return buf.String() 510 return buf.String()
414} 511}
@@ -447,151 +544,80 @@ func decodeGrpcMessageUnchecked(msg string) string {
447 return buf.String() 544 return buf.String()
448} 545}
449 546
450type framer struct { 547type bufWriter struct {
451 numWriters int32 548 buf []byte
452 reader io.Reader 549 offset int
453 writer *bufio.Writer 550 batchSize int
454 fr *http2.Framer 551 conn net.Conn
455} 552 err error
456
457func newFramer(conn net.Conn) *framer {
458 f := &framer{
459 reader: bufio.NewReaderSize(conn, http2IOBufSize),
460 writer: bufio.NewWriterSize(conn, http2IOBufSize),
461 }
462 f.fr = http2.NewFramer(f.writer, f.reader)
463 // Opt-in to Frame reuse API on framer to reduce garbage.
464 // Frames aren't safe to read from after a subsequent call to ReadFrame.
465 f.fr.SetReuseFrames()
466 f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
467 return f
468}
469 553
470func (f *framer) adjustNumWriters(i int32) int32 { 554 onFlush func()
471 return atomic.AddInt32(&f.numWriters, i)
472} 555}
473 556
474// The following writeXXX functions can only be called when the caller gets 557func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
475// unblocked from writableChan channel (i.e., owns the privilege to write). 558 return &bufWriter{
476 559 buf: make([]byte, batchSize*2),
477func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { 560 batchSize: batchSize,
478 if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { 561 conn: conn,
479 return err
480 } 562 }
481 if forceFlush {
482 return f.writer.Flush()
483 }
484 return nil
485} 563}
486 564
487func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { 565func (w *bufWriter) Write(b []byte) (n int, err error) {
488 if err := f.fr.WriteData(streamID, endStream, data); err != nil { 566 if w.err != nil {
489 return err 567 return 0, w.err
490 } 568 }
491 if forceFlush { 569 if w.batchSize == 0 { // Buffer has been disabled.
492 return f.writer.Flush() 570 return w.conn.Write(b)
493 } 571 }
494 return nil 572 for len(b) > 0 {
495} 573 nn := copy(w.buf[w.offset:], b)
496 574 b = b[nn:]
497func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { 575 w.offset += nn
498 if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { 576 n += nn
499 return err 577 if w.offset >= w.batchSize {
500 } 578 err = w.Flush()
501 if forceFlush { 579 }
502 return f.writer.Flush()
503 }
504 return nil
505}
506
507func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error {
508 if err := f.fr.WriteHeaders(p); err != nil {
509 return err
510 }
511 if forceFlush {
512 return f.writer.Flush()
513 }
514 return nil
515}
516
517func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error {
518 if err := f.fr.WritePing(ack, data); err != nil {
519 return err
520 }
521 if forceFlush {
522 return f.writer.Flush()
523 }
524 return nil
525}
526
527func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error {
528 if err := f.fr.WritePriority(streamID, p); err != nil {
529 return err
530 }
531 if forceFlush {
532 return f.writer.Flush()
533 } 580 }
534 return nil 581 return n, err
535} 582}
536 583
537func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { 584func (w *bufWriter) Flush() error {
538 if err := f.fr.WritePushPromise(p); err != nil { 585 if w.err != nil {
539 return err 586 return w.err
540 }
541 if forceFlush {
542 return f.writer.Flush()
543 } 587 }
544 return nil 588 if w.offset == 0 {
545} 589 return nil
546
547func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error {
548 if err := f.fr.WriteRSTStream(streamID, code); err != nil {
549 return err
550 } 590 }
551 if forceFlush { 591 if w.onFlush != nil {
552 return f.writer.Flush() 592 w.onFlush()
553 } 593 }
554 return nil 594 _, w.err = w.conn.Write(w.buf[:w.offset])
595 w.offset = 0
596 return w.err
555} 597}
556 598
557func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { 599type framer struct {
558 if err := f.fr.WriteSettings(settings...); err != nil { 600 writer *bufWriter
559 return err 601 fr *http2.Framer
560 }
561 if forceFlush {
562 return f.writer.Flush()
563 }
564 return nil
565} 602}
566 603
567func (f *framer) writeSettingsAck(forceFlush bool) error { 604func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
568 if err := f.fr.WriteSettingsAck(); err != nil { 605 if writeBufferSize < 0 {
569 return err 606 writeBufferSize = 0
570 } 607 }
571 if forceFlush { 608 var r io.Reader = conn
572 return f.writer.Flush() 609 if readBufferSize > 0 {
610 r = bufio.NewReaderSize(r, readBufferSize)
573 } 611 }
574 return nil 612 w := newBufWriter(conn, writeBufferSize)
575} 613 f := &framer{
576 614 writer: w,
577func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { 615 fr: http2.NewFramer(w, r),
578 if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil {
579 return err
580 }
581 if forceFlush {
582 return f.writer.Flush()
583 } 616 }
584 return nil 617 // Opt-in to Frame reuse API on framer to reduce garbage.
585} 618 // Frames aren't safe to read from after a subsequent call to ReadFrame.
586 619 f.fr.SetReuseFrames()
587func (f *framer) flushWrite() error { 620 f.fr.MaxHeaderListSize = maxHeaderListSize
588 return f.writer.Flush() 621 f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
589} 622 return f
590
591func (f *framer) readFrame() (http2.Frame, error) {
592 return f.fr.ReadFrame()
593}
594
595func (f *framer) errorDetail() error {
596 return f.fr.ErrorDetail()
597} 623}
diff --git a/vendor/google.golang.org/grpc/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go
index ac8e358..879df80 100644
--- a/vendor/google.golang.org/grpc/transport/log.go
+++ b/vendor/google.golang.org/grpc/internal/transport/log.go
@@ -42,9 +42,3 @@ func errorf(format string, args ...interface{}) {
42 grpclog.Errorf(format, args...) 42 grpclog.Errorf(format, args...)
43 } 43 }
44} 44}
45
46func fatalf(format string, args ...interface{}) {
47 if grpclog.V(logLevel) {
48 grpclog.Fatalf(format, args...)
49 }
50}
diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index ec0fe67..2580aa7 100644
--- a/vendor/google.golang.org/grpc/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -17,17 +17,19 @@
17 */ 17 */
18 18
19// Package transport defines and implements message oriented communication 19// Package transport defines and implements message oriented communication
20// channel to complete various transactions (e.g., an RPC). 20// channel to complete various transactions (e.g., an RPC). It is meant for
21package transport // import "google.golang.org/grpc/transport" 21// grpc-internal usage and is not intended to be imported directly by users.
22package transport
22 23
23import ( 24import (
25 "context"
26 "errors"
24 "fmt" 27 "fmt"
25 "io" 28 "io"
26 "net" 29 "net"
27 "sync" 30 "sync"
31 "sync/atomic"
28 32
29 "golang.org/x/net/context"
30 "golang.org/x/net/http2"
31 "google.golang.org/grpc/codes" 33 "google.golang.org/grpc/codes"
32 "google.golang.org/grpc/credentials" 34 "google.golang.org/grpc/credentials"
33 "google.golang.org/grpc/keepalive" 35 "google.golang.org/grpc/keepalive"
@@ -56,6 +58,7 @@ type recvBuffer struct {
56 c chan recvMsg 58 c chan recvMsg
57 mu sync.Mutex 59 mu sync.Mutex
58 backlog []recvMsg 60 backlog []recvMsg
61 err error
59} 62}
60 63
61func newRecvBuffer() *recvBuffer { 64func newRecvBuffer() *recvBuffer {
@@ -67,20 +70,27 @@ func newRecvBuffer() *recvBuffer {
67 70
68func (b *recvBuffer) put(r recvMsg) { 71func (b *recvBuffer) put(r recvMsg) {
69 b.mu.Lock() 72 b.mu.Lock()
70 defer b.mu.Unlock() 73 if b.err != nil {
74 b.mu.Unlock()
75 // An error had occurred earlier, don't accept more
76 // data or errors.
77 return
78 }
79 b.err = r.err
71 if len(b.backlog) == 0 { 80 if len(b.backlog) == 0 {
72 select { 81 select {
73 case b.c <- r: 82 case b.c <- r:
83 b.mu.Unlock()
74 return 84 return
75 default: 85 default:
76 } 86 }
77 } 87 }
78 b.backlog = append(b.backlog, r) 88 b.backlog = append(b.backlog, r)
89 b.mu.Unlock()
79} 90}
80 91
81func (b *recvBuffer) load() { 92func (b *recvBuffer) load() {
82 b.mu.Lock() 93 b.mu.Lock()
83 defer b.mu.Unlock()
84 if len(b.backlog) > 0 { 94 if len(b.backlog) > 0 {
85 select { 95 select {
86 case b.c <- b.backlog[0]: 96 case b.c <- b.backlog[0]:
@@ -89,6 +99,7 @@ func (b *recvBuffer) load() {
89 default: 99 default:
90 } 100 }
91 } 101 }
102 b.mu.Unlock()
92} 103}
93 104
94// get returns the channel that receives a recvMsg in the buffer. 105// get returns the channel that receives a recvMsg in the buffer.
@@ -102,11 +113,12 @@ func (b *recvBuffer) get() <-chan recvMsg {
102// recvBufferReader implements io.Reader interface to read the data from 113// recvBufferReader implements io.Reader interface to read the data from
103// recvBuffer. 114// recvBuffer.
104type recvBufferReader struct { 115type recvBufferReader struct {
105 ctx context.Context 116 closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
106 goAway chan struct{} 117 ctx context.Context
107 recv *recvBuffer 118 ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
108 last []byte // Stores the remaining data in the previous calls. 119 recv *recvBuffer
109 err error 120 last []byte // Stores the remaining data in the previous calls.
121 err error
110} 122}
111 123
112// Read reads the next len(p) bytes from last. If last is drained, it tries to 124// Read reads the next len(p) bytes from last. If last is drained, it tries to
@@ -116,87 +128,54 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) {
116 if r.err != nil { 128 if r.err != nil {
117 return 0, r.err 129 return 0, r.err
118 } 130 }
119 n, r.err = r.read(p)
120 return n, r.err
121}
122
123func (r *recvBufferReader) read(p []byte) (n int, err error) {
124 if r.last != nil && len(r.last) > 0 { 131 if r.last != nil && len(r.last) > 0 {
125 // Read remaining data left in last call. 132 // Read remaining data left in last call.
126 copied := copy(p, r.last) 133 copied := copy(p, r.last)
127 r.last = r.last[copied:] 134 r.last = r.last[copied:]
128 return copied, nil 135 return copied, nil
129 } 136 }
137 if r.closeStream != nil {
138 n, r.err = r.readClient(p)
139 } else {
140 n, r.err = r.read(p)
141 }
142 return n, r.err
143}
144
145func (r *recvBufferReader) read(p []byte) (n int, err error) {
130 select { 146 select {
131 case <-r.ctx.Done(): 147 case <-r.ctxDone:
132 return 0, ContextErr(r.ctx.Err()) 148 return 0, ContextErr(r.ctx.Err())
133 case <-r.goAway:
134 return 0, ErrStreamDrain
135 case m := <-r.recv.get(): 149 case m := <-r.recv.get():
136 r.recv.load() 150 return r.readAdditional(m, p)
137 if m.err != nil {
138 return 0, m.err
139 }
140 copied := copy(p, m.data)
141 r.last = m.data[copied:]
142 return copied, nil
143 } 151 }
144} 152}
145 153
146// All items in an out of a controlBuffer should be the same type. 154func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
147type item interface { 155 // If the context is canceled, then closes the stream with nil metadata.
148 item() 156 // closeStream writes its error parameter to r.recv as a recvMsg.
149} 157 // r.readAdditional acts on that message and returns the necessary error.
150 158 select {
151// controlBuffer is an unbounded channel of item. 159 case <-r.ctxDone:
152type controlBuffer struct { 160 r.closeStream(ContextErr(r.ctx.Err()))
153 c chan item 161 m := <-r.recv.get()
154 mu sync.Mutex 162 return r.readAdditional(m, p)
155 backlog []item 163 case m := <-r.recv.get():
156} 164 return r.readAdditional(m, p)
157
158func newControlBuffer() *controlBuffer {
159 b := &controlBuffer{
160 c: make(chan item, 1),
161 } 165 }
162 return b
163} 166}
164 167
165func (b *controlBuffer) put(r item) { 168func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
166 b.mu.Lock() 169 r.recv.load()
167 defer b.mu.Unlock() 170 if m.err != nil {
168 if len(b.backlog) == 0 { 171 return 0, m.err
169 select {
170 case b.c <- r:
171 return
172 default:
173 }
174 } 172 }
175 b.backlog = append(b.backlog, r) 173 copied := copy(p, m.data)
174 r.last = m.data[copied:]
175 return copied, nil
176} 176}
177 177
178func (b *controlBuffer) load() { 178type streamState uint32
179 b.mu.Lock()
180 defer b.mu.Unlock()
181 if len(b.backlog) > 0 {
182 select {
183 case b.c <- b.backlog[0]:
184 b.backlog[0] = nil
185 b.backlog = b.backlog[1:]
186 default:
187 }
188 }
189}
190
191// get returns the channel that receives an item in the buffer.
192//
193// Upon receipt of an item, the caller should call load to send another
194// item onto the channel if there is any.
195func (b *controlBuffer) get() <-chan item {
196 return b.c
197}
198
199type streamState uint8
200 179
201const ( 180const (
202 streamActive streamState = iota 181 streamActive streamState = iota
@@ -207,65 +186,98 @@ const (
207 186
208// Stream represents an RPC in the transport layer. 187// Stream represents an RPC in the transport layer.
209type Stream struct { 188type Stream struct {
210 id uint32 189 id uint32
211 // nil for client side Stream. 190 st ServerTransport // nil for client side Stream
212 st ServerTransport 191 ctx context.Context // the associated context of the stream
213 // ctx is the associated context of the stream. 192 cancel context.CancelFunc // always nil for client side Stream
214 ctx context.Context 193 done chan struct{} // closed at the end of stream to unblock writers. On the client side.
215 // cancel is always nil for client side Stream. 194 ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
216 cancel context.CancelFunc 195 method string // the associated RPC method of the stream
217 // done is closed when the final status arrives.
218 done chan struct{}
219 // goAway is closed when the server sent GoAways signal before this stream was initiated.
220 goAway chan struct{}
221 // method records the associated RPC method of the stream.
222 method string
223 recvCompress string 196 recvCompress string
224 sendCompress string 197 sendCompress string
225 buf *recvBuffer 198 buf *recvBuffer
226 trReader io.Reader 199 trReader io.Reader
227 fc *inFlow 200 fc *inFlow
228 recvQuota uint32 201 wq *writeQuota
229
230 // TODO: Remote this unused variable.
231 // The accumulated inbound quota pending for window update.
232 updateQuota uint32
233 202
234 // Callback to state application's intentions to read data. This 203 // Callback to state application's intentions to read data. This
235 // is used to adjust flow control, if need be. 204 // is used to adjust flow control, if needed.
236 requestRead func(int) 205 requestRead func(int)
237 206
238 sendQuotaPool *quotaPool 207 headerChan chan struct{} // closed to indicate the end of header metadata.
239 // Close headerChan to indicate the end of reception of header metadata. 208 headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
240 headerChan chan struct{} 209
241 // header caches the received header metadata. 210 // hdrMu protects header and trailer metadata on the server-side.
242 header metadata.MD 211 hdrMu sync.Mutex
243 // The key-value map of trailer metadata. 212 // On client side, header keeps the received header metadata.
244 trailer metadata.MD 213 //
245 214 // On server side, header keeps the header set by SetHeader(). The complete
246 mu sync.RWMutex // guard the following 215 // header will merged into this after t.WriteHeader() is called.
247 // headerOK becomes true from the first header is about to send. 216 header metadata.MD
248 headerOk bool 217 trailer metadata.MD // the key-value map of trailer metadata.
249 state streamState 218
250 // true iff headerChan is closed. Used to avoid closing headerChan 219 noHeaders bool // set if the client never received headers (set only after the stream is done).
251 // multiple times. 220
252 headerDone bool 221 // On the server-side, headerSent is atomically set to 1 when the headers are sent out.
253 // the status error received from the server. 222 headerSent uint32
223
224 state streamState
225
226 // On client-side it is the status error received from the server.
227 // On server-side it is unused.
254 status *status.Status 228 status *status.Status
255 // rstStream indicates whether a RST_STREAM frame needs to be sent 229
256 // to the server to signify that this stream is closing. 230 bytesReceived uint32 // indicates whether any bytes have been received on this stream
257 rstStream bool 231 unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
258 // rstError is the error that needs to be sent along with the RST_STREAM frame. 232
259 rstError http2.ErrCode 233 // contentSubtype is the content-subtype for requests.
260 // bytesSent and bytesReceived indicates whether any bytes have been sent or 234 // this must be lowercase or the behavior is undefined.
261 // received on this stream. 235 contentSubtype string
262 bytesSent bool 236}
263 bytesReceived bool 237
238// isHeaderSent is only valid on the server-side.
239func (s *Stream) isHeaderSent() bool {
240 return atomic.LoadUint32(&s.headerSent) == 1
241}
242
243// updateHeaderSent updates headerSent and returns true
244// if it was alreay set. It is valid only on server-side.
245func (s *Stream) updateHeaderSent() bool {
246 return atomic.SwapUint32(&s.headerSent, 1) == 1
247}
248
249func (s *Stream) swapState(st streamState) streamState {
250 return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
251}
252
253func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
254 return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
255}
256
257func (s *Stream) getState() streamState {
258 return streamState(atomic.LoadUint32((*uint32)(&s.state)))
259}
260
261func (s *Stream) waitOnHeader() error {
262 if s.headerChan == nil {
263 // On the server headerChan is always nil since a stream originates
264 // only after having received headers.
265 return nil
266 }
267 select {
268 case <-s.ctx.Done():
269 return ContextErr(s.ctx.Err())
270 case <-s.headerChan:
271 return nil
272 }
264} 273}
265 274
266// RecvCompress returns the compression algorithm applied to the inbound 275// RecvCompress returns the compression algorithm applied to the inbound
267// message. It is empty string if there is no compression applied. 276// message. It is empty string if there is no compression applied.
268func (s *Stream) RecvCompress() string { 277func (s *Stream) RecvCompress() string {
278 if err := s.waitOnHeader(); err != nil {
279 return ""
280 }
269 return s.recvCompress 281 return s.recvCompress
270} 282}
271 283
@@ -274,53 +286,68 @@ func (s *Stream) SetSendCompress(str string) {
274 s.sendCompress = str 286 s.sendCompress = str
275} 287}
276 288
277// Done returns a chanel which is closed when it receives the final status 289// Done returns a channel which is closed when it receives the final status
278// from the server. 290// from the server.
279func (s *Stream) Done() <-chan struct{} { 291func (s *Stream) Done() <-chan struct{} {
280 return s.done 292 return s.done
281} 293}
282 294
283// GoAway returns a channel which is closed when the server sent GoAways signal 295// Header returns the header metadata of the stream.
284// before this stream was initiated. 296//
285func (s *Stream) GoAway() <-chan struct{} { 297// On client side, it acquires the key-value pairs of header metadata once it is
286 return s.goAway 298// available. It blocks until i) the metadata is ready or ii) there is no header
287} 299// metadata or iii) the stream is canceled/expired.
288 300//
289// Header acquires the key-value pairs of header metadata once it 301// On server side, it returns the out header after t.WriteHeader is called.
290// is available. It blocks until i) the metadata is ready or ii) there is no
291// header metadata or iii) the stream is canceled/expired.
292func (s *Stream) Header() (metadata.MD, error) { 302func (s *Stream) Header() (metadata.MD, error) {
293 var err error 303 if s.headerChan == nil && s.header != nil {
294 select { 304 // On server side, return the header in stream. It will be the out
295 case <-s.ctx.Done(): 305 // header after t.WriteHeader is called.
296 err = ContextErr(s.ctx.Err())
297 case <-s.goAway:
298 err = ErrStreamDrain
299 case <-s.headerChan:
300 return s.header.Copy(), nil 306 return s.header.Copy(), nil
301 } 307 }
308 err := s.waitOnHeader()
302 // Even if the stream is closed, header is returned if available. 309 // Even if the stream is closed, header is returned if available.
303 select { 310 select {
304 case <-s.headerChan: 311 case <-s.headerChan:
312 if s.header == nil {
313 return nil, nil
314 }
305 return s.header.Copy(), nil 315 return s.header.Copy(), nil
306 default: 316 default:
307 } 317 }
308 return nil, err 318 return nil, err
309} 319}
310 320
321// TrailersOnly blocks until a header or trailers-only frame is received and
322// then returns true if the stream was trailers-only. If the stream ends
323// before headers are received, returns true, nil. If a context error happens
324// first, returns it as a status error. Client-side only.
325func (s *Stream) TrailersOnly() (bool, error) {
326 err := s.waitOnHeader()
327 if err != nil {
328 return false, err
329 }
330 // if !headerDone, some other connection error occurred.
331 return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil
332}
333
311// Trailer returns the cached trailer metedata. Note that if it is not called 334// Trailer returns the cached trailer metedata. Note that if it is not called
312// after the entire stream is done, it could return an empty MD. Client 335// after the entire stream is done, it could return an empty MD. Client
313// side only. 336// side only.
337// It can be safely read only after stream has ended that is either read
338// or write have returned io.EOF.
314func (s *Stream) Trailer() metadata.MD { 339func (s *Stream) Trailer() metadata.MD {
315 s.mu.RLock() 340 c := s.trailer.Copy()
316 defer s.mu.RUnlock() 341 return c
317 return s.trailer.Copy()
318} 342}
319 343
320// ServerTransport returns the underlying ServerTransport for the stream. 344// ContentSubtype returns the content-subtype for a request. For example, a
321// The client side stream always returns nil. 345// content-subtype of "proto" will result in a content-type of
322func (s *Stream) ServerTransport() ServerTransport { 346// "application/grpc+proto". This will always be lowercase. See
323 return s.st 347// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
348// more details.
349func (s *Stream) ContentSubtype() string {
350 return s.contentSubtype
324} 351}
325 352
326// Context returns the context of the stream. 353// Context returns the context of the stream.
@@ -334,34 +361,48 @@ func (s *Stream) Method() string {
334} 361}
335 362
336// Status returns the status received from the server. 363// Status returns the status received from the server.
364// Status can be read safely only after the stream has ended,
365// that is, after Done() is closed.
337func (s *Stream) Status() *status.Status { 366func (s *Stream) Status() *status.Status {
338 return s.status 367 return s.status
339} 368}
340 369
341// SetHeader sets the header metadata. This can be called multiple times. 370// SetHeader sets the header metadata. This can be called multiple times.
342// Server side only. 371// Server side only.
372// This should not be called in parallel to other data writes.
343func (s *Stream) SetHeader(md metadata.MD) error { 373func (s *Stream) SetHeader(md metadata.MD) error {
344 s.mu.Lock()
345 defer s.mu.Unlock()
346 if s.headerOk || s.state == streamDone {
347 return ErrIllegalHeaderWrite
348 }
349 if md.Len() == 0 { 374 if md.Len() == 0 {
350 return nil 375 return nil
351 } 376 }
377 if s.isHeaderSent() || s.getState() == streamDone {
378 return ErrIllegalHeaderWrite
379 }
380 s.hdrMu.Lock()
352 s.header = metadata.Join(s.header, md) 381 s.header = metadata.Join(s.header, md)
382 s.hdrMu.Unlock()
353 return nil 383 return nil
354} 384}
355 385
386// SendHeader sends the given header metadata. The given metadata is
387// combined with any metadata set by previous calls to SetHeader and
388// then written to the transport stream.
389func (s *Stream) SendHeader(md metadata.MD) error {
390 return s.st.WriteHeader(s, md)
391}
392
356// SetTrailer sets the trailer metadata which will be sent with the RPC status 393// SetTrailer sets the trailer metadata which will be sent with the RPC status
357// by the server. This can be called multiple times. Server side only. 394// by the server. This can be called multiple times. Server side only.
395// This should not be called parallel to other data writes.
358func (s *Stream) SetTrailer(md metadata.MD) error { 396func (s *Stream) SetTrailer(md metadata.MD) error {
359 if md.Len() == 0 { 397 if md.Len() == 0 {
360 return nil 398 return nil
361 } 399 }
362 s.mu.Lock() 400 if s.getState() == streamDone {
363 defer s.mu.Unlock() 401 return ErrIllegalHeaderWrite
402 }
403 s.hdrMu.Lock()
364 s.trailer = metadata.Join(s.trailer, md) 404 s.trailer = metadata.Join(s.trailer, md)
405 s.hdrMu.Unlock()
365 return nil 406 return nil
366} 407}
367 408
@@ -401,26 +442,15 @@ func (t *transportReader) Read(p []byte) (n int, err error) {
401 return 442 return
402} 443}
403 444
404// finish sets the stream's state and status, and closes the done channel.
405// s.mu must be held by the caller. st must always be non-nil.
406func (s *Stream) finish(st *status.Status) {
407 s.status = st
408 s.state = streamDone
409 close(s.done)
410}
411
412// BytesSent indicates whether any bytes have been sent on this stream.
413func (s *Stream) BytesSent() bool {
414 s.mu.Lock()
415 defer s.mu.Unlock()
416 return s.bytesSent
417}
418
419// BytesReceived indicates whether any bytes have been received on this stream. 445// BytesReceived indicates whether any bytes have been received on this stream.
420func (s *Stream) BytesReceived() bool { 446func (s *Stream) BytesReceived() bool {
421 s.mu.Lock() 447 return atomic.LoadUint32(&s.bytesReceived) == 1
422 defer s.mu.Unlock() 448}
423 return s.bytesReceived 449
450// Unprocessed indicates whether the server did not process this stream --
451// i.e. it sent a refused stream or GOAWAY including this stream ID.
452func (s *Stream) Unprocessed() bool {
453 return atomic.LoadUint32(&s.unprocessed) == 1
424} 454}
425 455
426// GoString is implemented by Stream so context.String() won't 456// GoString is implemented by Stream so context.String() won't
@@ -429,27 +459,11 @@ func (s *Stream) GoString() string {
429 return fmt.Sprintf("<stream: %p, %v>", s, s.method) 459 return fmt.Sprintf("<stream: %p, %v>", s, s.method)
430} 460}
431 461
432// The key to save transport.Stream in the context.
433type streamKey struct{}
434
435// newContextWithStream creates a new context from ctx and attaches stream
436// to it.
437func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
438 return context.WithValue(ctx, streamKey{}, stream)
439}
440
441// StreamFromContext returns the stream saved in ctx.
442func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
443 s, ok = ctx.Value(streamKey{}).(*Stream)
444 return
445}
446
447// state of transport 462// state of transport
448type transportState int 463type transportState int
449 464
450const ( 465const (
451 reachable transportState = iota 466 reachable transportState = iota
452 unreachable
453 closing 467 closing
454 draining 468 draining
455) 469)
@@ -464,6 +478,10 @@ type ServerConfig struct {
464 KeepalivePolicy keepalive.EnforcementPolicy 478 KeepalivePolicy keepalive.EnforcementPolicy
465 InitialWindowSize int32 479 InitialWindowSize int32
466 InitialConnWindowSize int32 480 InitialConnWindowSize int32
481 WriteBufferSize int
482 ReadBufferSize int
483 ChannelzParentID int64
484 MaxHeaderListSize *uint32
467} 485}
468 486
469// NewServerTransport creates a ServerTransport with conn or non-nil error 487// NewServerTransport creates a ServerTransport with conn or non-nil error
@@ -476,37 +494,47 @@ func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (S
476type ConnectOptions struct { 494type ConnectOptions struct {
477 // UserAgent is the application user agent. 495 // UserAgent is the application user agent.
478 UserAgent string 496 UserAgent string
479 // Authority is the :authority pseudo-header to use. This field has no effect if
480 // TransportCredentials is set.
481 Authority string
482 // Dialer specifies how to dial a network address. 497 // Dialer specifies how to dial a network address.
483 Dialer func(context.Context, string) (net.Conn, error) 498 Dialer func(context.Context, string) (net.Conn, error)
484 // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. 499 // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
485 FailOnNonTempDialError bool 500 FailOnNonTempDialError bool
486 // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. 501 // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
487 PerRPCCredentials []credentials.PerRPCCredentials 502 PerRPCCredentials []credentials.PerRPCCredentials
488 // TransportCredentials stores the Authenticator required to setup a client connection. 503 // TransportCredentials stores the Authenticator required to setup a client
504 // connection. Only one of TransportCredentials and CredsBundle is non-nil.
489 TransportCredentials credentials.TransportCredentials 505 TransportCredentials credentials.TransportCredentials
506 // CredsBundle is the credentials bundle to be used. Only one of
507 // TransportCredentials and CredsBundle is non-nil.
508 CredsBundle credentials.Bundle
490 // KeepaliveParams stores the keepalive parameters. 509 // KeepaliveParams stores the keepalive parameters.
491 KeepaliveParams keepalive.ClientParameters 510 KeepaliveParams keepalive.ClientParameters
492 // StatsHandler stores the handler for stats. 511 // StatsHandler stores the handler for stats.
493 StatsHandler stats.Handler 512 StatsHandler stats.Handler
494 // InitialWindowSize sets the intial window size for a stream. 513 // InitialWindowSize sets the initial window size for a stream.
495 InitialWindowSize int32 514 InitialWindowSize int32
496 // InitialConnWindowSize sets the intial window size for a connection. 515 // InitialConnWindowSize sets the initial window size for a connection.
497 InitialConnWindowSize int32 516 InitialConnWindowSize int32
517 // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire.
518 WriteBufferSize int
519 // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
520 ReadBufferSize int
521 // ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
522 ChannelzParentID int64
523 // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
524 MaxHeaderListSize *uint32
498} 525}
499 526
500// TargetInfo contains the information of the target such as network address and metadata. 527// TargetInfo contains the information of the target such as network address and metadata.
501type TargetInfo struct { 528type TargetInfo struct {
502 Addr string 529 Addr string
503 Metadata interface{} 530 Metadata interface{}
531 Authority string
504} 532}
505 533
506// NewClientTransport establishes the transport with the required ConnectOptions 534// NewClientTransport establishes the transport with the required ConnectOptions
507// and returns it to the caller. 535// and returns it to the caller.
508func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions) (ClientTransport, error) { 536func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
509 return newHTTP2Client(ctx, target, opts) 537 return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
510} 538}
511 539
512// Options provides additional hints and information for message 540// Options provides additional hints and information for message
@@ -515,11 +543,6 @@ type Options struct {
515 // Last indicates whether this write is the last piece for 543 // Last indicates whether this write is the last piece for
516 // this stream. 544 // this stream.
517 Last bool 545 Last bool
518
519 // Delay is a hint to the transport implementation for whether
520 // the data could be buffered for a batching write. The
521 // Transport implementation may ignore the hint.
522 Delay bool
523} 546}
524 547
525// CallHdr carries the information of a particular RPC. 548// CallHdr carries the information of a particular RPC.
@@ -530,10 +553,6 @@ type CallHdr struct {
530 // Method specifies the operation to perform. 553 // Method specifies the operation to perform.
531 Method string 554 Method string
532 555
533 // RecvCompress specifies the compression algorithm applied on
534 // inbound messages.
535 RecvCompress string
536
537 // SendCompress specifies the compression algorithm applied on 556 // SendCompress specifies the compression algorithm applied on
538 // outbound message. 557 // outbound message.
539 SendCompress string 558 SendCompress string
@@ -541,13 +560,15 @@ type CallHdr struct {
541 // Creds specifies credentials.PerRPCCredentials for a call. 560 // Creds specifies credentials.PerRPCCredentials for a call.
542 Creds credentials.PerRPCCredentials 561 Creds credentials.PerRPCCredentials
543 562
544 // Flush indicates whether a new stream command should be sent 563 // ContentSubtype specifies the content-subtype for a request. For example, a
545 // to the peer without waiting for the first data. This is 564 // content-subtype of "proto" will result in a content-type of
546 // only a hint. 565 // "application/grpc+proto". The value of ContentSubtype must be all
547 // If it's true, the transport may modify the flush decision 566 // lowercase, otherwise the behavior is undefined. See
548 // for performance purposes. 567 // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
549 // If it's false, new stream will never be flushed. 568 // for more details.
550 Flush bool 569 ContentSubtype string
570
571 PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
551} 572}
552 573
553// ClientTransport is the common interface for all gRPC client-side transport 574// ClientTransport is the common interface for all gRPC client-side transport
@@ -564,7 +585,7 @@ type ClientTransport interface {
564 585
565 // Write sends the data for the given stream. A nil stream indicates 586 // Write sends the data for the given stream. A nil stream indicates
566 // the write is to be performed on the transport as a whole. 587 // the write is to be performed on the transport as a whole.
567 Write(s *Stream, data []byte, opts *Options) error 588 Write(s *Stream, hdr []byte, data []byte, opts *Options) error
568 589
569 // NewStream creates a Stream for an RPC. 590 // NewStream creates a Stream for an RPC.
570 NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) 591 NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
@@ -589,6 +610,12 @@ type ClientTransport interface {
589 610
590 // GetGoAwayReason returns the reason why GoAway frame was received. 611 // GetGoAwayReason returns the reason why GoAway frame was received.
591 GetGoAwayReason() GoAwayReason 612 GetGoAwayReason() GoAwayReason
613
614 // IncrMsgSent increments the number of message sent through this transport.
615 IncrMsgSent()
616
617 // IncrMsgRecv increments the number of message received through this transport.
618 IncrMsgRecv()
592} 619}
593 620
594// ServerTransport is the common interface for all gRPC server-side transport 621// ServerTransport is the common interface for all gRPC server-side transport
@@ -606,7 +633,7 @@ type ServerTransport interface {
606 633
607 // Write sends the data for the given stream. 634 // Write sends the data for the given stream.
608 // Write may not be called on all streams. 635 // Write may not be called on all streams.
609 Write(s *Stream, data []byte, opts *Options) error 636 Write(s *Stream, hdr []byte, data []byte, opts *Options) error
610 637
611 // WriteStatus sends the status of a stream to the client. WriteStatus is 638 // WriteStatus sends the status of a stream to the client. WriteStatus is
612 // the final call made on a stream and always occurs. 639 // the final call made on a stream and always occurs.
@@ -622,14 +649,12 @@ type ServerTransport interface {
622 649
623 // Drain notifies the client this ServerTransport stops accepting new RPCs. 650 // Drain notifies the client this ServerTransport stops accepting new RPCs.
624 Drain() 651 Drain()
625}
626 652
627// streamErrorf creates an StreamError with the specified error code and description. 653 // IncrMsgSent increments the number of message sent through this transport.
628func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { 654 IncrMsgSent()
629 return StreamError{ 655
630 Code: c, 656 // IncrMsgRecv increments the number of message received through this transport.
631 Desc: fmt.Sprintf(format, a...), 657 IncrMsgRecv()
632 }
633} 658}
634 659
635// connectionErrorf creates an ConnectionError with the specified error description. 660// connectionErrorf creates an ConnectionError with the specified error description.
@@ -671,60 +696,63 @@ func (e ConnectionError) Origin() error {
671var ( 696var (
672 // ErrConnClosing indicates that the transport is closing. 697 // ErrConnClosing indicates that the transport is closing.
673 ErrConnClosing = connectionErrorf(true, nil, "transport is closing") 698 ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
674 // ErrStreamDrain indicates that the stream is rejected by the server because 699 // errStreamDrain indicates that the stream is rejected because the
675 // the server stops accepting new RPCs. 700 // connection is draining. This could be caused by goaway or balancer
676 ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs") 701 // removing the address.
702 errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
703 // errStreamDone is returned from write at the client side to indiacte application
704 // layer of an error.
705 errStreamDone = errors.New("the stream is done")
706 // StatusGoAway indicates that the server sent a GOAWAY that included this
707 // stream's ID in unprocessed RPCs.
708 statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
677) 709)
678 710
679// TODO: See if we can replace StreamError with status package errors.
680
681// StreamError is an error that only affects one stream within a connection.
682type StreamError struct {
683 Code codes.Code
684 Desc string
685}
686
687func (e StreamError) Error() string {
688 return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
689}
690
691// wait blocks until it can receive from ctx.Done, closing, or proceed.
692// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
693// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise
694// it return the StreamError for ctx.Err.
695// If it receives from goAway, it returns 0, ErrStreamDrain.
696// If it receives from closing, it returns 0, ErrConnClosing.
697// If it receives from proceed, it returns the received integer, nil.
698func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) {
699 select {
700 case <-ctx.Done():
701 return 0, ContextErr(ctx.Err())
702 case <-done:
703 // User cancellation has precedence.
704 select {
705 case <-ctx.Done():
706 return 0, ContextErr(ctx.Err())
707 default:
708 }
709 return 0, io.EOF
710 case <-goAway:
711 return 0, ErrStreamDrain
712 case <-closing:
713 return 0, ErrConnClosing
714 case i := <-proceed:
715 return i, nil
716 }
717}
718
719// GoAwayReason contains the reason for the GoAway frame received. 711// GoAwayReason contains the reason for the GoAway frame received.
720type GoAwayReason uint8 712type GoAwayReason uint8
721 713
722const ( 714const (
723 // Invalid indicates that no GoAway frame is received. 715 // GoAwayInvalid indicates that no GoAway frame is received.
724 Invalid GoAwayReason = 0 716 GoAwayInvalid GoAwayReason = 0
725 // NoReason is the default value when GoAway frame is received. 717 // GoAwayNoReason is the default value when GoAway frame is received.
726 NoReason GoAwayReason = 1 718 GoAwayNoReason GoAwayReason = 1
727 // TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm 719 // GoAwayTooManyPings indicates that a GoAway frame with
728 // was recieved and that the debug data said "too_many_pings". 720 // ErrCodeEnhanceYourCalm was received and that the debug data said
729 TooManyPings GoAwayReason = 2 721 // "too_many_pings".
722 GoAwayTooManyPings GoAwayReason = 2
730) 723)
724
725// channelzData is used to store channelz related data for http2Client and http2Server.
726// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
727// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
728// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
729type channelzData struct {
730 kpCount int64
731 // The number of streams that have started, including already finished ones.
732 streamsStarted int64
733 // Client side: The number of streams that have ended successfully by receiving
734 // EoS bit set frame from server.
735 // Server side: The number of streams that have ended successfully by sending
736 // frame with EoS bit set.
737 streamsSucceeded int64
738 streamsFailed int64
739 // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
740 // instead of time.Time since it's more costly to atomically update time.Time variable than int64
741 // variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
742 lastStreamCreatedTime int64
743 msgSent int64
744 msgRecv int64
745 lastMsgSentTime int64
746 lastMsgRecvTime int64
747}
748
749// ContextErr converts the error from context package into a status error.
750func ContextErr(err error) error {
751 switch err {
752 case context.DeadlineExceeded:
753 return status.Error(codes.DeadlineExceeded, err.Error())
754 case context.Canceled:
755 return status.Error(codes.Canceled, err.Error())
756 }
757 return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
758}
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
index f8adc7e..899e72d 100644
--- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -16,7 +16,8 @@
16 * 16 *
17 */ 17 */
18 18
19// Package keepalive defines configurable parameters for point-to-point healthcheck. 19// Package keepalive defines configurable parameters for point-to-point
20// healthcheck.
20package keepalive 21package keepalive
21 22
22import ( 23import (
@@ -24,42 +25,59 @@ import (
24) 25)
25 26
26// ClientParameters is used to set keepalive parameters on the client-side. 27// ClientParameters is used to set keepalive parameters on the client-side.
27// These configure how the client will actively probe to notice when a connection is broken 28// These configure how the client will actively probe to notice when a
28// and send pings so intermediaries will be aware of the liveness of the connection. 29// connection is broken and send pings so intermediaries will be aware of the
29// Make sure these parameters are set in coordination with the keepalive policy on the server, 30// liveness of the connection. Make sure these parameters are set in
30// as incompatible settings can result in closing of connection. 31// coordination with the keepalive policy on the server, as incompatible
32// settings can result in closing of connection.
31type ClientParameters struct { 33type ClientParameters struct {
32 // After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive. 34 // After a duration of this time if the client doesn't see any activity it
35 // pings the server to see if the transport is still alive.
33 Time time.Duration // The current default value is infinity. 36 Time time.Duration // The current default value is infinity.
34 // After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that 37 // After having pinged for keepalive check, the client waits for a duration
35 // the connection is closed. 38 // of Timeout and if no activity is seen even after that the connection is
39 // closed.
36 Timeout time.Duration // The current default value is 20 seconds. 40 Timeout time.Duration // The current default value is 20 seconds.
37 // If true, client runs keepalive checks even with no active RPCs. 41 // If true, client sends keepalive pings even with no active RPCs. If false,
42 // when there are no active RPCs, Time and Timeout will be ignored and no
43 // keepalive pings will be sent.
38 PermitWithoutStream bool // false by default. 44 PermitWithoutStream bool // false by default.
39} 45}
40 46
41// ServerParameters is used to set keepalive and max-age parameters on the server-side. 47// ServerParameters is used to set keepalive and max-age parameters on the
48// server-side.
42type ServerParameters struct { 49type ServerParameters struct {
43 // MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway. 50 // MaxConnectionIdle is a duration for the amount of time after which an
44 // Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment. 51 // idle connection would be closed by sending a GoAway. Idleness duration is
52 // defined since the most recent time the number of outstanding RPCs became
53 // zero or the connection establishment.
45 MaxConnectionIdle time.Duration // The current default value is infinity. 54 MaxConnectionIdle time.Duration // The current default value is infinity.
46 // MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. 55 // MaxConnectionAge is a duration for the maximum amount of time a
47 // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. 56 // connection may exist before it will be closed by sending a GoAway. A
57 // random jitter of +/-10% will be added to MaxConnectionAge to spread out
58 // connection storms.
48 MaxConnectionAge time.Duration // The current default value is infinity. 59 MaxConnectionAge time.Duration // The current default value is infinity.
49 // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed. 60 // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
61 // which the connection will be forcibly closed.
50 MaxConnectionAgeGrace time.Duration // The current default value is infinity. 62 MaxConnectionAgeGrace time.Duration // The current default value is infinity.
51 // After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive. 63 // After a duration of this time if the server doesn't see any activity it
64 // pings the client to see if the transport is still alive.
52 Time time.Duration // The current default value is 2 hours. 65 Time time.Duration // The current default value is 2 hours.
53 // After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that 66 // After having pinged for keepalive check, the server waits for a duration
54 // the connection is closed. 67 // of Timeout and if no activity is seen even after that the connection is
68 // closed.
55 Timeout time.Duration // The current default value is 20 seconds. 69 Timeout time.Duration // The current default value is 20 seconds.
56} 70}
57 71
58// EnforcementPolicy is used to set keepalive enforcement policy on the server-side. 72// EnforcementPolicy is used to set keepalive enforcement policy on the
59// Server will close connection with a client that violates this policy. 73// server-side. Server will close connection with a client that violates this
74// policy.
60type EnforcementPolicy struct { 75type EnforcementPolicy struct {
61 // MinTime is the minimum amount of time a client should wait before sending a keepalive ping. 76 // MinTime is the minimum amount of time a client should wait before sending
77 // a keepalive ping.
62 MinTime time.Duration // The current default value is 5 minutes. 78 MinTime time.Duration // The current default value is 5 minutes.
63 // If true, server expects keepalive pings even when there are no active streams(RPCs). 79 // If true, server allows keepalive pings even when there are no active
80 // streams(RPCs). If false, and client sends ping when there are no active
81 // streams, server will send GOAWAY and close the connection.
64 PermitWithoutStream bool // false by default. 82 PermitWithoutStream bool // false by default.
65} 83}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index be4f9e7..cf6d1b9 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -17,17 +17,19 @@
17 */ 17 */
18 18
19// Package metadata define the structure of the metadata supported by gRPC library. 19// Package metadata define the structure of the metadata supported by gRPC library.
20// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata. 20// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
21// for more information about custom-metadata.
21package metadata // import "google.golang.org/grpc/metadata" 22package metadata // import "google.golang.org/grpc/metadata"
22 23
23import ( 24import (
25 "context"
24 "fmt" 26 "fmt"
25 "strings" 27 "strings"
26
27 "golang.org/x/net/context"
28) 28)
29 29
30// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used. 30// DecodeKeyValue returns k, v, nil.
31//
32// Deprecated: use k and v directly instead.
31func DecodeKeyValue(k, v string) (string, string, error) { 33func DecodeKeyValue(k, v string) (string, string, error) {
32 return k, v, nil 34 return k, v, nil
33} 35}
@@ -44,6 +46,9 @@ type MD map[string][]string
44// - lowercase letters: a-z 46// - lowercase letters: a-z
45// - special characters: -_. 47// - special characters: -_.
46// Uppercase letters are automatically converted to lowercase. 48// Uppercase letters are automatically converted to lowercase.
49//
50// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
51// result in errors if set in metadata.
47func New(m map[string]string) MD { 52func New(m map[string]string) MD {
48 md := MD{} 53 md := MD{}
49 for k, val := range m { 54 for k, val := range m {
@@ -62,6 +67,9 @@ func New(m map[string]string) MD {
62// - lowercase letters: a-z 67// - lowercase letters: a-z
63// - special characters: -_. 68// - special characters: -_.
64// Uppercase letters are automatically converted to lowercase. 69// Uppercase letters are automatically converted to lowercase.
70//
71// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
72// result in errors if set in metadata.
65func Pairs(kv ...string) MD { 73func Pairs(kv ...string) MD {
66 if len(kv)%2 == 1 { 74 if len(kv)%2 == 1 {
67 panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) 75 panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
@@ -88,6 +96,30 @@ func (md MD) Copy() MD {
88 return Join(md) 96 return Join(md)
89} 97}
90 98
99// Get obtains the values for a given key.
100func (md MD) Get(k string) []string {
101 k = strings.ToLower(k)
102 return md[k]
103}
104
105// Set sets the value of a given key with a slice of values.
106func (md MD) Set(k string, vals ...string) {
107 if len(vals) == 0 {
108 return
109 }
110 k = strings.ToLower(k)
111 md[k] = vals
112}
113
114// Append adds the values to key k, not overwriting what was already stored at that key.
115func (md MD) Append(k string, vals ...string) {
116 if len(vals) == 0 {
117 return
118 }
119 k = strings.ToLower(k)
120 md[k] = append(md[k], vals...)
121}
122
91// Join joins any number of mds into a single MD. 123// Join joins any number of mds into a single MD.
92// The order of values for each key is determined by the order in which 124// The order of values for each key is determined by the order in which
93// the mds containing those values are presented to Join. 125// the mds containing those values are presented to Join.
@@ -104,24 +136,31 @@ func Join(mds ...MD) MD {
104type mdIncomingKey struct{} 136type mdIncomingKey struct{}
105type mdOutgoingKey struct{} 137type mdOutgoingKey struct{}
106 138
107// NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated.
108func NewContext(ctx context.Context, md MD) context.Context {
109 return NewOutgoingContext(ctx, md)
110}
111
112// NewIncomingContext creates a new context with incoming md attached. 139// NewIncomingContext creates a new context with incoming md attached.
113func NewIncomingContext(ctx context.Context, md MD) context.Context { 140func NewIncomingContext(ctx context.Context, md MD) context.Context {
114 return context.WithValue(ctx, mdIncomingKey{}, md) 141 return context.WithValue(ctx, mdIncomingKey{}, md)
115} 142}
116 143
117// NewOutgoingContext creates a new context with outgoing md attached. 144// NewOutgoingContext creates a new context with outgoing md attached. If used
145// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
146// overwrite any previously-appended metadata.
118func NewOutgoingContext(ctx context.Context, md MD) context.Context { 147func NewOutgoingContext(ctx context.Context, md MD) context.Context {
119 return context.WithValue(ctx, mdOutgoingKey{}, md) 148 return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
120} 149}
121 150
122// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated. 151// AppendToOutgoingContext returns a new context with the provided kv merged
123func FromContext(ctx context.Context) (md MD, ok bool) { 152// with any existing metadata in the context. Please refer to the
124 return FromIncomingContext(ctx) 153// documentation of Pairs for a description of kv.
154func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
155 if len(kv)%2 == 1 {
156 panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
157 }
158 md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
159 added := make([][]string, len(md.added)+1)
160 copy(added, md.added)
161 added[len(added)-1] = make([]string, len(kv))
162 copy(added[len(added)-1], kv)
163 return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
125} 164}
126 165
127// FromIncomingContext returns the incoming metadata in ctx if it exists. The 166// FromIncomingContext returns the incoming metadata in ctx if it exists. The
@@ -132,10 +171,39 @@ func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
132 return 171 return
133} 172}
134 173
174// FromOutgoingContextRaw returns the un-merged, intermediary contents
175// of rawMD. Remember to perform strings.ToLower on the keys. The returned
176// MD should not be modified. Writing to it may cause races. Modification
177// should be made to copies of the returned MD.
178//
179// This is intended for gRPC-internal use ONLY.
180func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
181 raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
182 if !ok {
183 return nil, nil, false
184 }
185
186 return raw.md, raw.added, true
187}
188
135// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The 189// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
136// returned MD should not be modified. Writing to it may cause races. 190// returned MD should not be modified. Writing to it may cause races.
137// Modification should be made to the copies of the returned MD. 191// Modification should be made to copies of the returned MD.
138func FromOutgoingContext(ctx context.Context) (md MD, ok bool) { 192func FromOutgoingContext(ctx context.Context) (MD, bool) {
139 md, ok = ctx.Value(mdOutgoingKey{}).(MD) 193 raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
140 return 194 if !ok {
195 return nil, false
196 }
197
198 mds := make([]MD, 0, len(raw.added)+1)
199 mds = append(mds, raw.md)
200 for _, vv := range raw.added {
201 mds = append(mds, Pairs(vv...))
202 }
203 return Join(mds...), ok
204}
205
206type rawMD struct {
207 md MD
208 added [][]string
141} 209}
diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go
index efd37e3..fd8cd3b 100644
--- a/vendor/google.golang.org/grpc/naming/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go
@@ -19,13 +19,13 @@
19package naming 19package naming
20 20
21import ( 21import (
22 "context"
22 "errors" 23 "errors"
23 "fmt" 24 "fmt"
24 "net" 25 "net"
25 "strconv" 26 "strconv"
26 "time" 27 "time"
27 28
28 "golang.org/x/net/context"
29 "google.golang.org/grpc/grpclog" 29 "google.golang.org/grpc/grpclog"
30) 30)
31 31
@@ -37,6 +37,9 @@ const (
37var ( 37var (
38 errMissingAddr = errors.New("missing address") 38 errMissingAddr = errors.New("missing address")
39 errWatcherClose = errors.New("watcher has been closed") 39 errWatcherClose = errors.New("watcher has been closed")
40
41 lookupHost = net.DefaultResolver.LookupHost
42 lookupSRV = net.DefaultResolver.LookupSRV
40) 43)
41 44
42// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and 45// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
@@ -141,8 +144,8 @@ type dnsWatcher struct {
141 r *dnsResolver 144 r *dnsResolver
142 host string 145 host string
143 port string 146 port string
144 // The latest resolved address list 147 // The latest resolved address set
145 curAddrs []*Update 148 curAddrs map[string]*Update
146 ctx context.Context 149 ctx context.Context
147 cancel context.CancelFunc 150 cancel context.CancelFunc
148 t *time.Timer 151 t *time.Timer
@@ -153,10 +156,10 @@ type ipWatcher struct {
153 updateChan chan *Update 156 updateChan chan *Update
154} 157}
155 158
156// Next returns the adrress resolution Update for the target. For IP address, 159// Next returns the address resolution Update for the target. For IP address,
157// the resolution is itself, thus polling name server is unncessary. Therefore, 160// the resolution is itself, thus polling name server is unnecessary. Therefore,
158// Next() will return an Update the first time it is called, and will be blocked 161// Next() will return an Update the first time it is called, and will be blocked
159// for all following calls as no Update exisits until watcher is closed. 162// for all following calls as no Update exists until watcher is closed.
160func (i *ipWatcher) Next() ([]*Update, error) { 163func (i *ipWatcher) Next() ([]*Update, error) {
161 u, ok := <-i.updateChan 164 u, ok := <-i.updateChan
162 if !ok { 165 if !ok {
@@ -192,28 +195,24 @@ type AddrMetadataGRPCLB struct {
192 195
193// compileUpdate compares the old resolved addresses and newly resolved addresses, 196// compileUpdate compares the old resolved addresses and newly resolved addresses,
194// and generates an update list 197// and generates an update list
195func (w *dnsWatcher) compileUpdate(newAddrs []*Update) []*Update { 198func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update {
196 update := make(map[Update]bool) 199 var res []*Update
197 for _, u := range newAddrs { 200 for a, u := range w.curAddrs {
198 update[*u] = true 201 if _, ok := newAddrs[a]; !ok {
199 } 202 u.Op = Delete
200 for _, u := range w.curAddrs { 203 res = append(res, u)
201 if _, ok := update[*u]; ok {
202 delete(update, *u)
203 continue
204 } 204 }
205 update[Update{Addr: u.Addr, Op: Delete, Metadata: u.Metadata}] = true
206 } 205 }
207 res := make([]*Update, 0, len(update)) 206 for a, u := range newAddrs {
208 for k := range update { 207 if _, ok := w.curAddrs[a]; !ok {
209 tmp := k 208 res = append(res, u)
210 res = append(res, &tmp) 209 }
211 } 210 }
212 return res 211 return res
213} 212}
214 213
215func (w *dnsWatcher) lookupSRV() []*Update { 214func (w *dnsWatcher) lookupSRV() map[string]*Update {
216 var newAddrs []*Update 215 newAddrs := make(map[string]*Update)
217 _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) 216 _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host)
218 if err != nil { 217 if err != nil {
219 grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) 218 grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
@@ -231,15 +230,16 @@ func (w *dnsWatcher) lookupSRV() []*Update {
231 grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) 230 grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
232 continue 231 continue
233 } 232 }
234 newAddrs = append(newAddrs, &Update{Addr: a + ":" + strconv.Itoa(int(s.Port)), 233 addr := a + ":" + strconv.Itoa(int(s.Port))
235 Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}}) 234 newAddrs[addr] = &Update{Addr: addr,
235 Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}}
236 } 236 }
237 } 237 }
238 return newAddrs 238 return newAddrs
239} 239}
240 240
241func (w *dnsWatcher) lookupHost() []*Update { 241func (w *dnsWatcher) lookupHost() map[string]*Update {
242 var newAddrs []*Update 242 newAddrs := make(map[string]*Update)
243 addrs, err := lookupHost(w.ctx, w.host) 243 addrs, err := lookupHost(w.ctx, w.host)
244 if err != nil { 244 if err != nil {
245 grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) 245 grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
@@ -251,7 +251,8 @@ func (w *dnsWatcher) lookupHost() []*Update {
251 grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) 251 grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
252 continue 252 continue
253 } 253 }
254 newAddrs = append(newAddrs, &Update{Addr: a + ":" + w.port}) 254 addr := a + ":" + w.port
255 newAddrs[addr] = &Update{Addr: addr}
255 } 256 }
256 return newAddrs 257 return newAddrs
257} 258}
diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go
index 1af7e32..8cc39e9 100644
--- a/vendor/google.golang.org/grpc/naming/naming.go
+++ b/vendor/google.golang.org/grpc/naming/naming.go
@@ -18,20 +18,26 @@
18 18
19// Package naming defines the naming API and related data structures for gRPC. 19// Package naming defines the naming API and related data structures for gRPC.
20// The interface is EXPERIMENTAL and may be suject to change. 20// The interface is EXPERIMENTAL and may be suject to change.
21//
22// Deprecated: please use package resolver.
21package naming 23package naming
22 24
23// Operation defines the corresponding operations for a name resolution change. 25// Operation defines the corresponding operations for a name resolution change.
26//
27// Deprecated: please use package resolver.
24type Operation uint8 28type Operation uint8
25 29
26const ( 30const (
27 // Add indicates a new address is added. 31 // Add indicates a new address is added.
28 Add Operation = iota 32 Add Operation = iota
29 // Delete indicates an exisiting address is deleted. 33 // Delete indicates an existing address is deleted.
30 Delete 34 Delete
31) 35)
32 36
33// Update defines a name resolution update. Notice that it is not valid having both 37// Update defines a name resolution update. Notice that it is not valid having both
34// empty string Addr and nil Metadata in an Update. 38// empty string Addr and nil Metadata in an Update.
39//
40// Deprecated: please use package resolver.
35type Update struct { 41type Update struct {
36 // Op indicates the operation of the update. 42 // Op indicates the operation of the update.
37 Op Operation 43 Op Operation
@@ -43,12 +49,16 @@ type Update struct {
43} 49}
44 50
45// Resolver creates a Watcher for a target to track its resolution changes. 51// Resolver creates a Watcher for a target to track its resolution changes.
52//
53// Deprecated: please use package resolver.
46type Resolver interface { 54type Resolver interface {
47 // Resolve creates a Watcher for target. 55 // Resolve creates a Watcher for target.
48 Resolve(target string) (Watcher, error) 56 Resolve(target string) (Watcher, error)
49} 57}
50 58
51// Watcher watches for the updates on the specified target. 59// Watcher watches for the updates on the specified target.
60//
61// Deprecated: please use package resolver.
52type Watcher interface { 62type Watcher interface {
53 // Next blocks until an update or error happens. It may return one or more 63 // Next blocks until an update or error happens. It may return one or more
54 // updates. The first call should get the full set of the results. It should 64 // updates. The first call should get the full set of the results. It should
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
index 317b8b9..e01d219 100644
--- a/vendor/google.golang.org/grpc/peer/peer.go
+++ b/vendor/google.golang.org/grpc/peer/peer.go
@@ -21,9 +21,9 @@
21package peer 21package peer
22 22
23import ( 23import (
24 "context"
24 "net" 25 "net"
25 26
26 "golang.org/x/net/context"
27 "google.golang.org/grpc/credentials" 27 "google.golang.org/grpc/credentials"
28) 28)
29 29
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
new file mode 100644
index 0000000..14f915d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -0,0 +1,180 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package grpc
20
21import (
22 "context"
23 "io"
24 "sync"
25
26 "google.golang.org/grpc/balancer"
27 "google.golang.org/grpc/codes"
28 "google.golang.org/grpc/grpclog"
29 "google.golang.org/grpc/internal/channelz"
30 "google.golang.org/grpc/internal/transport"
31 "google.golang.org/grpc/status"
32)
33
34// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
35// actions and unblock when there's a picker update.
36type pickerWrapper struct {
37 mu sync.Mutex
38 done bool
39 blockingCh chan struct{}
40 picker balancer.Picker
41
42 // The latest connection happened.
43 connErrMu sync.Mutex
44 connErr error
45}
46
47func newPickerWrapper() *pickerWrapper {
48 bp := &pickerWrapper{blockingCh: make(chan struct{})}
49 return bp
50}
51
52func (bp *pickerWrapper) updateConnectionError(err error) {
53 bp.connErrMu.Lock()
54 bp.connErr = err
55 bp.connErrMu.Unlock()
56}
57
58func (bp *pickerWrapper) connectionError() error {
59 bp.connErrMu.Lock()
60 err := bp.connErr
61 bp.connErrMu.Unlock()
62 return err
63}
64
65// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
66func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
67 bp.mu.Lock()
68 if bp.done {
69 bp.mu.Unlock()
70 return
71 }
72 bp.picker = p
73 // bp.blockingCh should never be nil.
74 close(bp.blockingCh)
75 bp.blockingCh = make(chan struct{})
76 bp.mu.Unlock()
77}
78
79func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
80 acw.mu.Lock()
81 ac := acw.ac
82 acw.mu.Unlock()
83 ac.incrCallsStarted()
84 return func(b balancer.DoneInfo) {
85 if b.Err != nil && b.Err != io.EOF {
86 ac.incrCallsFailed()
87 } else {
88 ac.incrCallsSucceeded()
89 }
90 if done != nil {
91 done(b)
92 }
93 }
94}
95
96// pick returns the transport that will be used for the RPC.
97// It may block in the following cases:
98// - there's no picker
99// - the current picker returns ErrNoSubConnAvailable
100// - the current picker returns other errors and failfast is false.
101// - the subConn returned by the current picker is not READY
102// When one of these situations happens, pick blocks until the picker gets updated.
103func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
104 var (
105 p balancer.Picker
106 ch chan struct{}
107 )
108
109 for {
110 bp.mu.Lock()
111 if bp.done {
112 bp.mu.Unlock()
113 return nil, nil, ErrClientConnClosing
114 }
115
116 if bp.picker == nil {
117 ch = bp.blockingCh
118 }
119 if ch == bp.blockingCh {
120 // This could happen when either:
121 // - bp.picker is nil (the previous if condition), or
122 // - has called pick on the current picker.
123 bp.mu.Unlock()
124 select {
125 case <-ctx.Done():
126 return nil, nil, ctx.Err()
127 case <-ch:
128 }
129 continue
130 }
131
132 ch = bp.blockingCh
133 p = bp.picker
134 bp.mu.Unlock()
135
136 subConn, done, err := p.Pick(ctx, opts)
137
138 if err != nil {
139 switch err {
140 case balancer.ErrNoSubConnAvailable:
141 continue
142 case balancer.ErrTransientFailure:
143 if !failfast {
144 continue
145 }
146 return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
147 default:
148 // err is some other error.
149 return nil, nil, toRPCErr(err)
150 }
151 }
152
153 acw, ok := subConn.(*acBalancerWrapper)
154 if !ok {
155 grpclog.Infof("subconn returned from pick is not *acBalancerWrapper")
156 continue
157 }
158 if t, ok := acw.getAddrConn().getReadyTransport(); ok {
159 if channelz.IsOn() {
160 return t, doneChannelzWrapper(acw, done), nil
161 }
162 return t, done, nil
163 }
164 grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
165 // If ok == false, ac.state is not READY.
166 // A valid picker always returns READY subConn. This means the state of ac
167 // just changed, and picker will be updated shortly.
168 // continue back to the beginning of the for loop to repick.
169 }
170}
171
172func (bp *pickerWrapper) close() {
173 bp.mu.Lock()
174 defer bp.mu.Unlock()
175 if bp.done {
176 return
177 }
178 bp.done = true
179 close(bp.blockingCh)
180}
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
new file mode 100644
index 0000000..d1e38aa
--- /dev/null
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -0,0 +1,110 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package grpc
20
21import (
22 "context"
23
24 "google.golang.org/grpc/balancer"
25 "google.golang.org/grpc/connectivity"
26 "google.golang.org/grpc/grpclog"
27 "google.golang.org/grpc/resolver"
28)
29
30// PickFirstBalancerName is the name of the pick_first balancer.
31const PickFirstBalancerName = "pick_first"
32
33func newPickfirstBuilder() balancer.Builder {
34 return &pickfirstBuilder{}
35}
36
37type pickfirstBuilder struct{}
38
39func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
40 return &pickfirstBalancer{cc: cc}
41}
42
43func (*pickfirstBuilder) Name() string {
44 return PickFirstBalancerName
45}
46
47type pickfirstBalancer struct {
48 cc balancer.ClientConn
49 sc balancer.SubConn
50}
51
52func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
53 if err != nil {
54 grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
55 return
56 }
57 if b.sc == nil {
58 b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
59 if err != nil {
60 //TODO(yuxuanli): why not change the cc state to Idle?
61 grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
62 return
63 }
64 b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
65 b.sc.Connect()
66 } else {
67 b.sc.UpdateAddresses(addrs)
68 b.sc.Connect()
69 }
70}
71
72func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
73 grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
74 if b.sc != sc {
75 grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
76 return
77 }
78 if s == connectivity.Shutdown {
79 b.sc = nil
80 return
81 }
82
83 switch s {
84 case connectivity.Ready, connectivity.Idle:
85 b.cc.UpdateBalancerState(s, &picker{sc: sc})
86 case connectivity.Connecting:
87 b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
88 case connectivity.TransientFailure:
89 b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
90 }
91}
92
93func (b *pickfirstBalancer) Close() {
94}
95
96type picker struct {
97 err error
98 sc balancer.SubConn
99}
100
101func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
102 if p.err != nil {
103 return nil, nil, p.err
104 }
105 return p.sc, nil, nil
106}
107
108func init() {
109 balancer.Register(newPickfirstBuilder())
110}
diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go
index 2d40236..f8f69bf 100644
--- a/vendor/google.golang.org/grpc/proxy.go
+++ b/vendor/google.golang.org/grpc/proxy.go
@@ -20,6 +20,8 @@ package grpc
20 20
21import ( 21import (
22 "bufio" 22 "bufio"
23 "context"
24 "encoding/base64"
23 "errors" 25 "errors"
24 "fmt" 26 "fmt"
25 "io" 27 "io"
@@ -27,10 +29,10 @@ import (
27 "net/http" 29 "net/http"
28 "net/http/httputil" 30 "net/http/httputil"
29 "net/url" 31 "net/url"
30
31 "golang.org/x/net/context"
32) 32)
33 33
34const proxyAuthHeaderKey = "Proxy-Authorization"
35
34var ( 36var (
35 // errDisabled indicates that proxy is disabled for the address. 37 // errDisabled indicates that proxy is disabled for the address.
36 errDisabled = errors.New("proxy is disabled for the address") 38 errDisabled = errors.New("proxy is disabled for the address")
@@ -38,7 +40,7 @@ var (
38 httpProxyFromEnvironment = http.ProxyFromEnvironment 40 httpProxyFromEnvironment = http.ProxyFromEnvironment
39) 41)
40 42
41func mapAddress(ctx context.Context, address string) (string, error) { 43func mapAddress(ctx context.Context, address string) (*url.URL, error) {
42 req := &http.Request{ 44 req := &http.Request{
43 URL: &url.URL{ 45 URL: &url.URL{
44 Scheme: "https", 46 Scheme: "https",
@@ -47,12 +49,12 @@ func mapAddress(ctx context.Context, address string) (string, error) {
47 } 49 }
48 url, err := httpProxyFromEnvironment(req) 50 url, err := httpProxyFromEnvironment(req)
49 if err != nil { 51 if err != nil {
50 return "", err 52 return nil, err
51 } 53 }
52 if url == nil { 54 if url == nil {
53 return "", errDisabled 55 return nil, errDisabled
54 } 56 }
55 return url.Host, nil 57 return url, nil
56} 58}
57 59
58// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. 60// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
@@ -69,18 +71,28 @@ func (c *bufConn) Read(b []byte) (int, error) {
69 return c.r.Read(b) 71 return c.r.Read(b)
70} 72}
71 73
72func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) { 74func basicAuth(username, password string) string {
75 auth := username + ":" + password
76 return base64.StdEncoding.EncodeToString([]byte(auth))
77}
78
79func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) {
73 defer func() { 80 defer func() {
74 if err != nil { 81 if err != nil {
75 conn.Close() 82 conn.Close()
76 } 83 }
77 }() 84 }()
78 85
79 req := (&http.Request{ 86 req := &http.Request{
80 Method: http.MethodConnect, 87 Method: http.MethodConnect,
81 URL: &url.URL{Host: addr}, 88 URL: &url.URL{Host: backendAddr},
82 Header: map[string][]string{"User-Agent": {grpcUA}}, 89 Header: map[string][]string{"User-Agent": {grpcUA}},
83 }) 90 }
91 if t := proxyURL.User; t != nil {
92 u := t.Username()
93 p, _ := t.Password()
94 req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
95 }
84 96
85 if err := sendHTTPRequest(ctx, req, conn); err != nil { 97 if err := sendHTTPRequest(ctx, req, conn); err != nil {
86 return nil, fmt.Errorf("failed to write the HTTP request: %v", err) 98 return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
@@ -108,23 +120,33 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_
108// provided dialer, does HTTP CONNECT handshake and returns the connection. 120// provided dialer, does HTTP CONNECT handshake and returns the connection.
109func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { 121func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
110 return func(ctx context.Context, addr string) (conn net.Conn, err error) { 122 return func(ctx context.Context, addr string) (conn net.Conn, err error) {
111 var skipHandshake bool 123 var newAddr string
112 newAddr, err := mapAddress(ctx, addr) 124 proxyURL, err := mapAddress(ctx, addr)
113 if err != nil { 125 if err != nil {
114 if err != errDisabled { 126 if err != errDisabled {
115 return nil, err 127 return nil, err
116 } 128 }
117 skipHandshake = true
118 newAddr = addr 129 newAddr = addr
130 } else {
131 newAddr = proxyURL.Host
119 } 132 }
120 133
121 conn, err = dialer(ctx, newAddr) 134 conn, err = dialer(ctx, newAddr)
122 if err != nil { 135 if err != nil {
123 return 136 return
124 } 137 }
125 if !skipHandshake { 138 if proxyURL != nil {
126 conn, err = doHTTPConnectHandshake(ctx, conn, addr) 139 // proxy is disabled if proxyURL is nil.
140 conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL)
127 } 141 }
128 return 142 return
129 } 143 }
130} 144}
145
146func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
147 req = req.WithContext(ctx)
148 if err := req.Write(conn); err != nil {
149 return fmt.Errorf("failed to write the HTTP request: %v", err)
150 }
151 return nil
152}
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
new file mode 100644
index 0000000..f33189f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
@@ -0,0 +1,436 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package dns implements a dns resolver to be installed as the default resolver
20// in grpc.
21package dns
22
23import (
24 "context"
25 "encoding/json"
26 "errors"
27 "fmt"
28 "net"
29 "os"
30 "strconv"
31 "strings"
32 "sync"
33 "time"
34
35 "google.golang.org/grpc/grpclog"
36 "google.golang.org/grpc/internal/backoff"
37 "google.golang.org/grpc/internal/grpcrand"
38 "google.golang.org/grpc/resolver"
39)
40
41func init() {
42 resolver.Register(NewBuilder())
43}
44
45const (
46 defaultPort = "443"
47 defaultFreq = time.Minute * 30
48 defaultDNSSvrPort = "53"
49 golang = "GO"
50 // In DNS, service config is encoded in a TXT record via the mechanism
51 // described in RFC-1464 using the attribute name grpc_config.
52 txtAttribute = "grpc_config="
53)
54
55var (
56 errMissingAddr = errors.New("dns resolver: missing address")
57
58 // Addresses ending with a colon that is supposed to be the separator
59 // between host and port is not allowed. E.g. "::" is a valid address as
60 // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
61 // a colon as the host and port separator
62 errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
63)
64
65var (
66 defaultResolver netResolver = net.DefaultResolver
67)
68
69var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
70 return func(ctx context.Context, network, address string) (net.Conn, error) {
71 var dialer net.Dialer
72 return dialer.DialContext(ctx, network, authority)
73 }
74}
75
76var customAuthorityResolver = func(authority string) (netResolver, error) {
77 host, port, err := parseTarget(authority, defaultDNSSvrPort)
78 if err != nil {
79 return nil, err
80 }
81
82 authorityWithPort := net.JoinHostPort(host, port)
83
84 return &net.Resolver{
85 PreferGo: true,
86 Dial: customAuthorityDialler(authorityWithPort),
87 }, nil
88}
89
90// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
91func NewBuilder() resolver.Builder {
92 return &dnsBuilder{minFreq: defaultFreq}
93}
94
95type dnsBuilder struct {
96 // minimum frequency of polling the DNS server.
97 minFreq time.Duration
98}
99
100// Build creates and starts a DNS resolver that watches the name resolution of the target.
101func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
102 host, port, err := parseTarget(target.Endpoint, defaultPort)
103 if err != nil {
104 return nil, err
105 }
106
107 // IP address.
108 if net.ParseIP(host) != nil {
109 host, _ = formatIP(host)
110 addr := []resolver.Address{{Addr: host + ":" + port}}
111 i := &ipResolver{
112 cc: cc,
113 ip: addr,
114 rn: make(chan struct{}, 1),
115 q: make(chan struct{}),
116 }
117 cc.NewAddress(addr)
118 go i.watcher()
119 return i, nil
120 }
121
122 // DNS address (non-IP).
123 ctx, cancel := context.WithCancel(context.Background())
124 d := &dnsResolver{
125 freq: b.minFreq,
126 backoff: backoff.Exponential{MaxDelay: b.minFreq},
127 host: host,
128 port: port,
129 ctx: ctx,
130 cancel: cancel,
131 cc: cc,
132 t: time.NewTimer(0),
133 rn: make(chan struct{}, 1),
134 disableServiceConfig: opts.DisableServiceConfig,
135 }
136
137 if target.Authority == "" {
138 d.resolver = defaultResolver
139 } else {
140 d.resolver, err = customAuthorityResolver(target.Authority)
141 if err != nil {
142 return nil, err
143 }
144 }
145
146 d.wg.Add(1)
147 go d.watcher()
148 return d, nil
149}
150
151// Scheme returns the naming scheme of this resolver builder, which is "dns".
152func (b *dnsBuilder) Scheme() string {
153 return "dns"
154}
155
156type netResolver interface {
157 LookupHost(ctx context.Context, host string) (addrs []string, err error)
158 LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
159 LookupTXT(ctx context.Context, name string) (txts []string, err error)
160}
161
162// ipResolver watches for the name resolution update for an IP address.
163type ipResolver struct {
164 cc resolver.ClientConn
165 ip []resolver.Address
166 // rn channel is used by ResolveNow() to force an immediate resolution of the target.
167 rn chan struct{}
168 q chan struct{}
169}
170
171// ResolveNow resend the address it stores, no resolution is needed.
172func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
173 select {
174 case i.rn <- struct{}{}:
175 default:
176 }
177}
178
179// Close closes the ipResolver.
180func (i *ipResolver) Close() {
181 close(i.q)
182}
183
184func (i *ipResolver) watcher() {
185 for {
186 select {
187 case <-i.rn:
188 i.cc.NewAddress(i.ip)
189 case <-i.q:
190 return
191 }
192 }
193}
194
195// dnsResolver watches for the name resolution update for a non-IP target.
196type dnsResolver struct {
197 freq time.Duration
198 backoff backoff.Exponential
199 retryCount int
200 host string
201 port string
202 resolver netResolver
203 ctx context.Context
204 cancel context.CancelFunc
205 cc resolver.ClientConn
206 // rn channel is used by ResolveNow() to force an immediate resolution of the target.
207 rn chan struct{}
208 t *time.Timer
209 // wg is used to enforce Close() to return after the watcher() goroutine has finished.
210 // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
211 // replace the real lookup functions with mocked ones to facilitate testing.
212 // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
213 // will warns lookup (READ the lookup function pointers) inside watcher() goroutine
214 // has data race with replaceNetFunc (WRITE the lookup function pointers).
215 wg sync.WaitGroup
216 disableServiceConfig bool
217}
218
219// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
220func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
221 select {
222 case d.rn <- struct{}{}:
223 default:
224 }
225}
226
227// Close closes the dnsResolver.
228func (d *dnsResolver) Close() {
229 d.cancel()
230 d.wg.Wait()
231 d.t.Stop()
232}
233
234func (d *dnsResolver) watcher() {
235 defer d.wg.Done()
236 for {
237 select {
238 case <-d.ctx.Done():
239 return
240 case <-d.t.C:
241 case <-d.rn:
242 }
243 result, sc := d.lookup()
244 // Next lookup should happen within an interval defined by d.freq. It may be
245 // more often due to exponential retry on empty address list.
246 if len(result) == 0 {
247 d.retryCount++
248 d.t.Reset(d.backoff.Backoff(d.retryCount))
249 } else {
250 d.retryCount = 0
251 d.t.Reset(d.freq)
252 }
253 d.cc.NewServiceConfig(sc)
254 d.cc.NewAddress(result)
255 }
256}
257
258func (d *dnsResolver) lookupSRV() []resolver.Address {
259 var newAddrs []resolver.Address
260 _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
261 if err != nil {
262 grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
263 return nil
264 }
265 for _, s := range srvs {
266 lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
267 if err != nil {
268 grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
269 continue
270 }
271 for _, a := range lbAddrs {
272 a, ok := formatIP(a)
273 if !ok {
274 grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
275 continue
276 }
277 addr := a + ":" + strconv.Itoa(int(s.Port))
278 newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
279 }
280 }
281 return newAddrs
282}
283
284func (d *dnsResolver) lookupTXT() string {
285 ss, err := d.resolver.LookupTXT(d.ctx, d.host)
286 if err != nil {
287 grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
288 return ""
289 }
290 var res string
291 for _, s := range ss {
292 res += s
293 }
294
295 // TXT record must have "grpc_config=" attribute in order to be used as service config.
296 if !strings.HasPrefix(res, txtAttribute) {
297 grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
298 return ""
299 }
300 return strings.TrimPrefix(res, txtAttribute)
301}
302
303func (d *dnsResolver) lookupHost() []resolver.Address {
304 var newAddrs []resolver.Address
305 addrs, err := d.resolver.LookupHost(d.ctx, d.host)
306 if err != nil {
307 grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
308 return nil
309 }
310 for _, a := range addrs {
311 a, ok := formatIP(a)
312 if !ok {
313 grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
314 continue
315 }
316 addr := a + ":" + d.port
317 newAddrs = append(newAddrs, resolver.Address{Addr: addr})
318 }
319 return newAddrs
320}
321
322func (d *dnsResolver) lookup() ([]resolver.Address, string) {
323 newAddrs := d.lookupSRV()
324 // Support fallback to non-balancer address.
325 newAddrs = append(newAddrs, d.lookupHost()...)
326 if d.disableServiceConfig {
327 return newAddrs, ""
328 }
329 sc := d.lookupTXT()
330 return newAddrs, canaryingSC(sc)
331}
332
333// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
334// If addr is an IPv4 address, return the addr and ok = true.
335// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
336func formatIP(addr string) (addrIP string, ok bool) {
337 ip := net.ParseIP(addr)
338 if ip == nil {
339 return "", false
340 }
341 if ip.To4() != nil {
342 return addr, true
343 }
344 return "[" + addr + "]", true
345}
346
347// parseTarget takes the user input target string and default port, returns formatted host and port info.
348// If target doesn't specify a port, set the port to be the defaultPort.
349// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
350// are strippd when setting the host.
351// examples:
352// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
353// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
354// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443"
355// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
356func parseTarget(target, defaultPort string) (host, port string, err error) {
357 if target == "" {
358 return "", "", errMissingAddr
359 }
360 if ip := net.ParseIP(target); ip != nil {
361 // target is an IPv4 or IPv6(without brackets) address
362 return target, defaultPort, nil
363 }
364 if host, port, err = net.SplitHostPort(target); err == nil {
365 if port == "" {
366 // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
367 return "", "", errEndsWithColon
368 }
369 // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
370 if host == "" {
371 // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
372 host = "localhost"
373 }
374 return host, port, nil
375 }
376 if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
377 // target doesn't have port
378 return host, port, nil
379 }
380 return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
381}
382
383type rawChoice struct {
384 ClientLanguage *[]string `json:"clientLanguage,omitempty"`
385 Percentage *int `json:"percentage,omitempty"`
386 ClientHostName *[]string `json:"clientHostName,omitempty"`
387 ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"`
388}
389
390func containsString(a *[]string, b string) bool {
391 if a == nil {
392 return true
393 }
394 for _, c := range *a {
395 if c == b {
396 return true
397 }
398 }
399 return false
400}
401
402func chosenByPercentage(a *int) bool {
403 if a == nil {
404 return true
405 }
406 return grpcrand.Intn(100)+1 <= *a
407}
408
409func canaryingSC(js string) string {
410 if js == "" {
411 return ""
412 }
413 var rcs []rawChoice
414 err := json.Unmarshal([]byte(js), &rcs)
415 if err != nil {
416 grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
417 return ""
418 }
419 cliHostname, err := os.Hostname()
420 if err != nil {
421 grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
422 return ""
423 }
424 var sc string
425 for _, c := range rcs {
426 if !containsString(c.ClientLanguage, golang) ||
427 !chosenByPercentage(c.Percentage) ||
428 !containsString(c.ClientHostName, cliHostname) ||
429 c.ServiceConfig == nil {
430 continue
431 }
432 sc = string(*c.ServiceConfig)
433 break
434 }
435 return sc
436}
diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
new file mode 100644
index 0000000..b76010d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
@@ -0,0 +1,57 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package passthrough implements a pass-through resolver. It sends the target
20// name without scheme back to gRPC as resolved address.
21package passthrough
22
23import "google.golang.org/grpc/resolver"
24
25const scheme = "passthrough"
26
27type passthroughBuilder struct{}
28
29func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
30 r := &passthroughResolver{
31 target: target,
32 cc: cc,
33 }
34 r.start()
35 return r, nil
36}
37
38func (*passthroughBuilder) Scheme() string {
39 return scheme
40}
41
42type passthroughResolver struct {
43 target resolver.Target
44 cc resolver.ClientConn
45}
46
47func (r *passthroughResolver) start() {
48 r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
49}
50
51func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
52
53func (*passthroughResolver) Close() {}
54
55func init() {
56 resolver.Register(&passthroughBuilder{})
57}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
new file mode 100644
index 0000000..145cf47
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -0,0 +1,158 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package resolver defines APIs for name resolution in gRPC.
20// All APIs in this package are experimental.
21package resolver
22
23var (
24 // m is a map from scheme to resolver builder.
25 m = make(map[string]Builder)
26 // defaultScheme is the default scheme to use.
27 defaultScheme = "passthrough"
28)
29
30// TODO(bar) install dns resolver in init(){}.
31
32// Register registers the resolver builder to the resolver map. b.Scheme will be
33// used as the scheme registered with this builder.
34//
35// NOTE: this function must only be called during initialization time (i.e. in
36// an init() function), and is not thread-safe. If multiple Resolvers are
37// registered with the same name, the one registered last will take effect.
38func Register(b Builder) {
39 m[b.Scheme()] = b
40}
41
42// Get returns the resolver builder registered with the given scheme.
43//
44// If no builder is register with the scheme, nil will be returned.
45func Get(scheme string) Builder {
46 if b, ok := m[scheme]; ok {
47 return b
48 }
49 return nil
50}
51
52// SetDefaultScheme sets the default scheme that will be used. The default
53// default scheme is "passthrough".
54//
55// NOTE: this function must only be called during initialization time (i.e. in
56// an init() function), and is not thread-safe. The scheme set last overrides
57// previously set values.
58func SetDefaultScheme(scheme string) {
59 defaultScheme = scheme
60}
61
62// GetDefaultScheme gets the default scheme that will be used.
63func GetDefaultScheme() string {
64 return defaultScheme
65}
66
67// AddressType indicates the address type returned by name resolution.
68type AddressType uint8
69
70const (
71 // Backend indicates the address is for a backend server.
72 Backend AddressType = iota
73 // GRPCLB indicates the address is for a grpclb load balancer.
74 GRPCLB
75)
76
77// Address represents a server the client connects to.
78// This is the EXPERIMENTAL API and may be changed or extended in the future.
79type Address struct {
80 // Addr is the server address on which a connection will be established.
81 Addr string
82 // Type is the type of this address.
83 Type AddressType
84 // ServerName is the name of this address.
85 //
86 // e.g. if Type is GRPCLB, ServerName should be the name of the remote load
87 // balancer, not the name of the backend.
88 ServerName string
89 // Metadata is the information associated with Addr, which may be used
90 // to make load balancing decision.
91 Metadata interface{}
92}
93
94// BuildOption includes additional information for the builder to create
95// the resolver.
96type BuildOption struct {
97 // DisableServiceConfig indicates whether resolver should fetch service config data.
98 DisableServiceConfig bool
99}
100
101// ClientConn contains the callbacks for resolver to notify any updates
102// to the gRPC ClientConn.
103//
104// This interface is to be implemented by gRPC. Users should not need a
105// brand new implementation of this interface. For the situations like
106// testing, the new implementation should embed this interface. This allows
107// gRPC to add new methods to this interface.
108type ClientConn interface {
109 // NewAddress is called by resolver to notify ClientConn a new list
110 // of resolved addresses.
111 // The address list should be the complete list of resolved addresses.
112 NewAddress(addresses []Address)
113 // NewServiceConfig is called by resolver to notify ClientConn a new
114 // service config. The service config should be provided as a json string.
115 NewServiceConfig(serviceConfig string)
116}
117
118// Target represents a target for gRPC, as specified in:
119// https://github.com/grpc/grpc/blob/master/doc/naming.md.
120type Target struct {
121 Scheme string
122 Authority string
123 Endpoint string
124}
125
126// Builder creates a resolver that will be used to watch name resolution updates.
127type Builder interface {
128 // Build creates a new resolver for the given target.
129 //
130 // gRPC dial calls Build synchronously, and fails if the returned error is
131 // not nil.
132 Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
133 // Scheme returns the scheme supported by this resolver.
134 // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
135 Scheme() string
136}
137
138// ResolveNowOption includes additional information for ResolveNow.
139type ResolveNowOption struct{}
140
141// Resolver watches for the updates on the specified target.
142// Updates include address updates and service config updates.
143type Resolver interface {
144 // ResolveNow will be called by gRPC to try to resolve the target name
145 // again. It's just a hint, resolver can ignore this if it's not necessary.
146 //
147 // It could be called multiple times concurrently.
148 ResolveNow(ResolveNowOption)
149 // Close closes the resolver.
150 Close()
151}
152
153// UnregisterForTesting removes the resolver builder with the given scheme from the
154// resolver map.
155// This function is for testing only.
156func UnregisterForTesting(scheme string) {
157 delete(m, scheme)
158}
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
new file mode 100644
index 0000000..50991ea
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -0,0 +1,155 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package grpc
20
21import (
22 "fmt"
23 "strings"
24
25 "google.golang.org/grpc/grpclog"
26 "google.golang.org/grpc/internal/channelz"
27 "google.golang.org/grpc/resolver"
28)
29
30// ccResolverWrapper is a wrapper on top of cc for resolvers.
31// It implements resolver.ClientConnection interface.
32type ccResolverWrapper struct {
33 cc *ClientConn
34 resolver resolver.Resolver
35 addrCh chan []resolver.Address
36 scCh chan string
37 done chan struct{}
38 lastAddressesCount int
39}
40
41// split2 returns the values from strings.SplitN(s, sep, 2).
42// If sep is not found, it returns ("", "", false) instead.
43func split2(s, sep string) (string, string, bool) {
44 spl := strings.SplitN(s, sep, 2)
45 if len(spl) < 2 {
46 return "", "", false
47 }
48 return spl[0], spl[1], true
49}
50
51// parseTarget splits target into a struct containing scheme, authority and
52// endpoint.
53//
54// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
55// target}.
56func parseTarget(target string) (ret resolver.Target) {
57 var ok bool
58 ret.Scheme, ret.Endpoint, ok = split2(target, "://")
59 if !ok {
60 return resolver.Target{Endpoint: target}
61 }
62 ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
63 if !ok {
64 return resolver.Target{Endpoint: target}
65 }
66 return ret
67}
68
69// newCCResolverWrapper parses cc.target for scheme and gets the resolver
70// builder for this scheme and builds the resolver. The monitoring goroutine
71// for it is not started yet and can be created by calling start().
72//
73// If withResolverBuilder dial option is set, the specified resolver will be
74// used instead.
75func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
76 rb := cc.dopts.resolverBuilder
77 if rb == nil {
78 return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
79 }
80
81 ccr := &ccResolverWrapper{
82 cc: cc,
83 addrCh: make(chan []resolver.Address, 1),
84 scCh: make(chan string, 1),
85 done: make(chan struct{}),
86 }
87
88 var err error
89 ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
90 if err != nil {
91 return nil, err
92 }
93 return ccr, nil
94}
95
96func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
97 ccr.resolver.ResolveNow(o)
98}
99
100func (ccr *ccResolverWrapper) close() {
101 ccr.resolver.Close()
102 close(ccr.done)
103}
104
105// NewAddress is called by the resolver implemenetion to send addresses to gRPC.
106func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
107 select {
108 case <-ccr.done:
109 return
110 default:
111 }
112 grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
113 if channelz.IsOn() {
114 ccr.addChannelzTraceEvent(addrs)
115 }
116 ccr.cc.handleResolvedAddrs(addrs, nil)
117}
118
119// NewServiceConfig is called by the resolver implemenetion to send service
120// configs to gRPC.
121func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
122 select {
123 case <-ccr.done:
124 return
125 default:
126 }
127 grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
128 ccr.cc.handleServiceConfig(sc)
129}
130
131func (ccr *ccResolverWrapper) addChannelzTraceEvent(addrs []resolver.Address) {
132 if len(addrs) == 0 && ccr.lastAddressesCount != 0 {
133 channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
134 Desc: "Resolver returns an empty address list",
135 Severity: channelz.CtWarning,
136 })
137 } else if len(addrs) != 0 && ccr.lastAddressesCount == 0 {
138 var s string
139 for i, a := range addrs {
140 if a.ServerName != "" {
141 s += a.Addr + "(" + a.ServerName + ")"
142 } else {
143 s += a.Addr
144 }
145 if i != len(addrs)-1 {
146 s += " "
147 }
148 }
149 channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
150 Desc: fmt.Sprintf("Resolver returns a non-empty address list (previous one was empty) %q", s),
151 Severity: channelz.CtINFO,
152 })
153 }
154 ccr.lastAddressesCount = len(addrs)
155}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 9b9d388..8d0d3dc 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -21,24 +21,31 @@ package grpc
21import ( 21import (
22 "bytes" 22 "bytes"
23 "compress/gzip" 23 "compress/gzip"
24 "context"
24 "encoding/binary" 25 "encoding/binary"
26 "fmt"
25 "io" 27 "io"
26 "io/ioutil" 28 "io/ioutil"
27 "math" 29 "math"
30 "net/url"
31 "strings"
28 "sync" 32 "sync"
29 "time" 33 "time"
30 34
31 "golang.org/x/net/context"
32 "google.golang.org/grpc/codes" 35 "google.golang.org/grpc/codes"
33 "google.golang.org/grpc/credentials" 36 "google.golang.org/grpc/credentials"
37 "google.golang.org/grpc/encoding"
38 "google.golang.org/grpc/encoding/proto"
39 "google.golang.org/grpc/internal/transport"
34 "google.golang.org/grpc/metadata" 40 "google.golang.org/grpc/metadata"
35 "google.golang.org/grpc/peer" 41 "google.golang.org/grpc/peer"
36 "google.golang.org/grpc/stats" 42 "google.golang.org/grpc/stats"
37 "google.golang.org/grpc/status" 43 "google.golang.org/grpc/status"
38 "google.golang.org/grpc/transport"
39) 44)
40 45
41// Compressor defines the interface gRPC uses to compress a message. 46// Compressor defines the interface gRPC uses to compress a message.
47//
48// Deprecated: use package encoding.
42type Compressor interface { 49type Compressor interface {
43 // Do compresses p into w. 50 // Do compresses p into w.
44 Do(w io.Writer, p []byte) error 51 Do(w io.Writer, p []byte) error
@@ -51,18 +58,39 @@ type gzipCompressor struct {
51} 58}
52 59
53// NewGZIPCompressor creates a Compressor based on GZIP. 60// NewGZIPCompressor creates a Compressor based on GZIP.
61//
62// Deprecated: use package encoding/gzip.
54func NewGZIPCompressor() Compressor { 63func NewGZIPCompressor() Compressor {
64 c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
65 return c
66}
67
68// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
69// of assuming DefaultCompression.
70//
71// The error returned will be nil if the level is valid.
72//
73// Deprecated: use package encoding/gzip.
74func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
75 if level < gzip.DefaultCompression || level > gzip.BestCompression {
76 return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
77 }
55 return &gzipCompressor{ 78 return &gzipCompressor{
56 pool: sync.Pool{ 79 pool: sync.Pool{
57 New: func() interface{} { 80 New: func() interface{} {
58 return gzip.NewWriter(ioutil.Discard) 81 w, err := gzip.NewWriterLevel(ioutil.Discard, level)
82 if err != nil {
83 panic(err)
84 }
85 return w
59 }, 86 },
60 }, 87 },
61 } 88 }, nil
62} 89}
63 90
64func (c *gzipCompressor) Do(w io.Writer, p []byte) error { 91func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
65 z := c.pool.Get().(*gzip.Writer) 92 z := c.pool.Get().(*gzip.Writer)
93 defer c.pool.Put(z)
66 z.Reset(w) 94 z.Reset(w)
67 if _, err := z.Write(p); err != nil { 95 if _, err := z.Write(p); err != nil {
68 return err 96 return err
@@ -75,6 +103,8 @@ func (c *gzipCompressor) Type() string {
75} 103}
76 104
77// Decompressor defines the interface gRPC uses to decompress a message. 105// Decompressor defines the interface gRPC uses to decompress a message.
106//
107// Deprecated: use package encoding.
78type Decompressor interface { 108type Decompressor interface {
79 // Do reads the data from r and uncompress them. 109 // Do reads the data from r and uncompress them.
80 Do(r io.Reader) ([]byte, error) 110 Do(r io.Reader) ([]byte, error)
@@ -87,6 +117,8 @@ type gzipDecompressor struct {
87} 117}
88 118
89// NewGZIPDecompressor creates a Decompressor based on GZIP. 119// NewGZIPDecompressor creates a Decompressor based on GZIP.
120//
121// Deprecated: use package encoding/gzip.
90func NewGZIPDecompressor() Decompressor { 122func NewGZIPDecompressor() Decompressor {
91 return &gzipDecompressor{} 123 return &gzipDecompressor{}
92} 124}
@@ -121,17 +153,23 @@ func (d *gzipDecompressor) Type() string {
121 153
122// callInfo contains all related configuration and information about an RPC. 154// callInfo contains all related configuration and information about an RPC.
123type callInfo struct { 155type callInfo struct {
156 compressorType string
124 failFast bool 157 failFast bool
125 headerMD metadata.MD 158 stream ClientStream
126 trailerMD metadata.MD
127 peer *peer.Peer
128 traceInfo traceInfo // in trace.go
129 maxReceiveMessageSize *int 159 maxReceiveMessageSize *int
130 maxSendMessageSize *int 160 maxSendMessageSize *int
131 creds credentials.PerRPCCredentials 161 creds credentials.PerRPCCredentials
162 contentSubtype string
163 codec baseCodec
164 maxRetryRPCBufferSize int
132} 165}
133 166
134var defaultCallInfo = callInfo{failFast: true} 167func defaultCallInfo() *callInfo {
168 return &callInfo{
169 failFast: true,
170 maxRetryRPCBufferSize: 256 * 1024, // 256KB
171 }
172}
135 173
136// CallOption configures a Call before it starts or extracts information from 174// CallOption configures a Call before it starts or extracts information from
137// a Call after it completes. 175// a Call after it completes.
@@ -153,87 +191,267 @@ type EmptyCallOption struct{}
153func (EmptyCallOption) before(*callInfo) error { return nil } 191func (EmptyCallOption) before(*callInfo) error { return nil }
154func (EmptyCallOption) after(*callInfo) {} 192func (EmptyCallOption) after(*callInfo) {}
155 193
156type beforeCall func(c *callInfo) error
157
158func (o beforeCall) before(c *callInfo) error { return o(c) }
159func (o beforeCall) after(c *callInfo) {}
160
161type afterCall func(c *callInfo)
162
163func (o afterCall) before(c *callInfo) error { return nil }
164func (o afterCall) after(c *callInfo) { o(c) }
165
166// Header returns a CallOptions that retrieves the header metadata 194// Header returns a CallOptions that retrieves the header metadata
167// for a unary RPC. 195// for a unary RPC.
168func Header(md *metadata.MD) CallOption { 196func Header(md *metadata.MD) CallOption {
169 return afterCall(func(c *callInfo) { 197 return HeaderCallOption{HeaderAddr: md}
170 *md = c.headerMD 198}
171 }) 199
200// HeaderCallOption is a CallOption for collecting response header metadata.
201// The metadata field will be populated *after* the RPC completes.
202// This is an EXPERIMENTAL API.
203type HeaderCallOption struct {
204 HeaderAddr *metadata.MD
205}
206
207func (o HeaderCallOption) before(c *callInfo) error { return nil }
208func (o HeaderCallOption) after(c *callInfo) {
209 if c.stream != nil {
210 *o.HeaderAddr, _ = c.stream.Header()
211 }
172} 212}
173 213
174// Trailer returns a CallOptions that retrieves the trailer metadata 214// Trailer returns a CallOptions that retrieves the trailer metadata
175// for a unary RPC. 215// for a unary RPC.
176func Trailer(md *metadata.MD) CallOption { 216func Trailer(md *metadata.MD) CallOption {
177 return afterCall(func(c *callInfo) { 217 return TrailerCallOption{TrailerAddr: md}
178 *md = c.trailerMD 218}
179 }) 219
220// TrailerCallOption is a CallOption for collecting response trailer metadata.
221// The metadata field will be populated *after* the RPC completes.
222// This is an EXPERIMENTAL API.
223type TrailerCallOption struct {
224 TrailerAddr *metadata.MD
225}
226
227func (o TrailerCallOption) before(c *callInfo) error { return nil }
228func (o TrailerCallOption) after(c *callInfo) {
229 if c.stream != nil {
230 *o.TrailerAddr = c.stream.Trailer()
231 }
232}
233
234// Peer returns a CallOption that retrieves peer information for a unary RPC.
235// The peer field will be populated *after* the RPC completes.
236func Peer(p *peer.Peer) CallOption {
237 return PeerCallOption{PeerAddr: p}
180} 238}
181 239
182// Peer returns a CallOption that retrieves peer information for a 240// PeerCallOption is a CallOption for collecting the identity of the remote
183// unary RPC. 241// peer. The peer field will be populated *after* the RPC completes.
184func Peer(peer *peer.Peer) CallOption { 242// This is an EXPERIMENTAL API.
185 return afterCall(func(c *callInfo) { 243type PeerCallOption struct {
186 if c.peer != nil { 244 PeerAddr *peer.Peer
187 *peer = *c.peer 245}
246
247func (o PeerCallOption) before(c *callInfo) error { return nil }
248func (o PeerCallOption) after(c *callInfo) {
249 if c.stream != nil {
250 if x, ok := peer.FromContext(c.stream.Context()); ok {
251 *o.PeerAddr = *x
188 } 252 }
189 }) 253 }
190} 254}
191 255
192// FailFast configures the action to take when an RPC is attempted on broken 256// WaitForReady configures the action to take when an RPC is attempted on broken
193// connections or unreachable servers. If failfast is true, the RPC will fail 257// connections or unreachable servers. If waitForReady is false, the RPC will fail
194// immediately. Otherwise, the RPC client will block the call until a 258// immediately. Otherwise, the RPC client will block the call until a
195// connection is available (or the call is canceled or times out) and will retry 259// connection is available (or the call is canceled or times out) and will
196// the call if it fails due to a transient error. Please refer to 260// retry the call if it fails due to a transient error. gRPC will not retry if
261// data was written to the wire unless the server indicates it did not process
262// the data. Please refer to
197// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. 263// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
198// Note: failFast is default to true. 264//
265// By default, RPCs don't "wait for ready".
266func WaitForReady(waitForReady bool) CallOption {
267 return FailFastCallOption{FailFast: !waitForReady}
268}
269
270// FailFast is the opposite of WaitForReady.
271//
272// Deprecated: use WaitForReady.
199func FailFast(failFast bool) CallOption { 273func FailFast(failFast bool) CallOption {
200 return beforeCall(func(c *callInfo) error { 274 return FailFastCallOption{FailFast: failFast}
201 c.failFast = failFast
202 return nil
203 })
204} 275}
205 276
277// FailFastCallOption is a CallOption for indicating whether an RPC should fail
278// fast or not.
279// This is an EXPERIMENTAL API.
280type FailFastCallOption struct {
281 FailFast bool
282}
283
284func (o FailFastCallOption) before(c *callInfo) error {
285 c.failFast = o.FailFast
286 return nil
287}
288func (o FailFastCallOption) after(c *callInfo) {}
289
206// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. 290// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
207func MaxCallRecvMsgSize(s int) CallOption { 291func MaxCallRecvMsgSize(s int) CallOption {
208 return beforeCall(func(o *callInfo) error { 292 return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
209 o.maxReceiveMessageSize = &s 293}
210 return nil 294
211 }) 295// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
296// size the client can receive.
297// This is an EXPERIMENTAL API.
298type MaxRecvMsgSizeCallOption struct {
299 MaxRecvMsgSize int
212} 300}
213 301
302func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
303 c.maxReceiveMessageSize = &o.MaxRecvMsgSize
304 return nil
305}
306func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
307
214// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. 308// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
215func MaxCallSendMsgSize(s int) CallOption { 309func MaxCallSendMsgSize(s int) CallOption {
216 return beforeCall(func(o *callInfo) error { 310 return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
217 o.maxSendMessageSize = &s 311}
218 return nil 312
219 }) 313// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
314// size the client can send.
315// This is an EXPERIMENTAL API.
316type MaxSendMsgSizeCallOption struct {
317 MaxSendMsgSize int
220} 318}
221 319
320func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
321 c.maxSendMessageSize = &o.MaxSendMsgSize
322 return nil
323}
324func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
325
222// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials 326// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
223// for a call. 327// for a call.
224func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { 328func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
225 return beforeCall(func(c *callInfo) error { 329 return PerRPCCredsCallOption{Creds: creds}
226 c.creds = creds 330}
227 return nil 331
228 }) 332// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
333// credentials to use for the call.
334// This is an EXPERIMENTAL API.
335type PerRPCCredsCallOption struct {
336 Creds credentials.PerRPCCredentials
337}
338
339func (o PerRPCCredsCallOption) before(c *callInfo) error {
340 c.creds = o.Creds
341 return nil
342}
343func (o PerRPCCredsCallOption) after(c *callInfo) {}
344
345// UseCompressor returns a CallOption which sets the compressor used when
346// sending the request. If WithCompressor is also set, UseCompressor has
347// higher priority.
348//
349// This API is EXPERIMENTAL.
350func UseCompressor(name string) CallOption {
351 return CompressorCallOption{CompressorType: name}
352}
353
354// CompressorCallOption is a CallOption that indicates the compressor to use.
355// This is an EXPERIMENTAL API.
356type CompressorCallOption struct {
357 CompressorType string
358}
359
360func (o CompressorCallOption) before(c *callInfo) error {
361 c.compressorType = o.CompressorType
362 return nil
363}
364func (o CompressorCallOption) after(c *callInfo) {}
365
366// CallContentSubtype returns a CallOption that will set the content-subtype
367// for a call. For example, if content-subtype is "json", the Content-Type over
368// the wire will be "application/grpc+json". The content-subtype is converted
369// to lowercase before being included in Content-Type. See Content-Type on
370// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
371// more details.
372//
373// If CallCustomCodec is not also used, the content-subtype will be used to
374// look up the Codec to use in the registry controlled by RegisterCodec. See
375// the documentation on RegisterCodec for details on registration. The lookup
376// of content-subtype is case-insensitive. If no such Codec is found, the call
377// will result in an error with code codes.Internal.
378//
379// If CallCustomCodec is also used, that Codec will be used for all request and
380// response messages, with the content-subtype set to the given contentSubtype
381// here for requests.
382func CallContentSubtype(contentSubtype string) CallOption {
383 return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
384}
385
386// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
387// used for marshaling messages.
388// This is an EXPERIMENTAL API.
389type ContentSubtypeCallOption struct {
390 ContentSubtype string
391}
392
393func (o ContentSubtypeCallOption) before(c *callInfo) error {
394 c.contentSubtype = o.ContentSubtype
395 return nil
396}
397func (o ContentSubtypeCallOption) after(c *callInfo) {}
398
399// CallCustomCodec returns a CallOption that will set the given Codec to be
400// used for all request and response messages for a call. The result of calling
401// String() will be used as the content-subtype in a case-insensitive manner.
402//
403// See Content-Type on
404// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
405// more details. Also see the documentation on RegisterCodec and
406// CallContentSubtype for more details on the interaction between Codec and
407// content-subtype.
408//
409// This function is provided for advanced users; prefer to use only
410// CallContentSubtype to select a registered codec instead.
411func CallCustomCodec(codec Codec) CallOption {
412 return CustomCodecCallOption{Codec: codec}
413}
414
415// CustomCodecCallOption is a CallOption that indicates the codec used for
416// marshaling messages.
417// This is an EXPERIMENTAL API.
418type CustomCodecCallOption struct {
419 Codec Codec
420}
421
422func (o CustomCodecCallOption) before(c *callInfo) error {
423 c.codec = o.Codec
424 return nil
425}
426func (o CustomCodecCallOption) after(c *callInfo) {}
427
428// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
429// used for buffering this RPC's requests for retry purposes.
430//
431// This API is EXPERIMENTAL.
432func MaxRetryRPCBufferSize(bytes int) CallOption {
433 return MaxRetryRPCBufferSizeCallOption{bytes}
434}
435
436// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
437// memory to be used for caching this RPC for retry purposes.
438// This is an EXPERIMENTAL API.
439type MaxRetryRPCBufferSizeCallOption struct {
440 MaxRetryRPCBufferSize int
441}
442
443func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
444 c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
445 return nil
229} 446}
447func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
230 448
231// The format of the payload: compressed or not? 449// The format of the payload: compressed or not?
232type payloadFormat uint8 450type payloadFormat uint8
233 451
234const ( 452const (
235 compressionNone payloadFormat = iota // no compression 453 compressionNone payloadFormat = 0 // no compression
236 compressionMade 454 compressionMade payloadFormat = 1 // compressed
237) 455)
238 456
239// parser reads complete gRPC messages from the underlying reader. 457// parser reads complete gRPC messages from the underlying reader.
@@ -243,8 +461,8 @@ type parser struct {
243 // error types. 461 // error types.
244 r io.Reader 462 r io.Reader
245 463
246 // The header of a gRPC message. Find more detail 464 // The header of a gRPC message. Find more detail at
247 // at https://grpc.io/docs/guides/wire.html. 465 // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
248 header [5]byte 466 header [5]byte
249} 467}
250 468
@@ -257,7 +475,7 @@ type parser struct {
257// * io.EOF, when no messages remain 475// * io.EOF, when no messages remain
258// * io.ErrUnexpectedEOF 476// * io.ErrUnexpectedEOF
259// * of type transport.ConnectionError 477// * of type transport.ConnectionError
260// * of type transport.StreamError 478// * an error from the status package
261// No other error values or types must be returned, which also means 479// No other error values or types must be returned, which also means
262// that the underlying io.Reader must not return an incompatible 480// that the underlying io.Reader must not return an incompatible
263// error. 481// error.
@@ -272,8 +490,11 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
272 if length == 0 { 490 if length == 0 {
273 return pf, nil, nil 491 return pf, nil, nil
274 } 492 }
275 if length > uint32(maxReceiveMessageSize) { 493 if int64(length) > int64(maxInt) {
276 return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) 494 return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
495 }
496 if int(length) > maxReceiveMessageSize {
497 return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
277 } 498 }
278 // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead 499 // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
279 // of making it for each message: 500 // of making it for each message:
@@ -287,120 +508,173 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
287 return pf, msg, nil 508 return pf, msg, nil
288} 509}
289 510
290// encode serializes msg and prepends the message header. If msg is nil, it 511// encode serializes msg and returns a buffer containing the message, or an
291// generates the message header of 0 message length. 512// error if it is too large to be transmitted by grpc. If msg is nil, it
292func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, error) { 513// generates an empty message.
293 var ( 514func encode(c baseCodec, msg interface{}) ([]byte, error) {
294 b []byte 515 if msg == nil { // NOTE: typed nils will not be caught by this check
295 length uint 516 return nil, nil
296 ) 517 }
297 if msg != nil { 518 b, err := c.Marshal(msg)
298 var err error 519 if err != nil {
299 // TODO(zhaoq): optimize to reduce memory alloc and copying. 520 return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
300 b, err = c.Marshal(msg) 521 }
522 if uint(len(b)) > math.MaxUint32 {
523 return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
524 }
525 return b, nil
526}
527
528// compress returns the input bytes compressed by compressor or cp. If both
529// compressors are nil, returns nil.
530//
531// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
532func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
533 if compressor == nil && cp == nil {
534 return nil, nil
535 }
536 wrapErr := func(err error) error {
537 return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
538 }
539 cbuf := &bytes.Buffer{}
540 if compressor != nil {
541 z, err := compressor.Compress(cbuf)
301 if err != nil { 542 if err != nil {
302 return nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) 543 return nil, wrapErr(err)
303 } 544 }
304 if outPayload != nil { 545 if _, err := z.Write(in); err != nil {
305 outPayload.Payload = msg 546 return nil, wrapErr(err)
306 // TODO truncate large payload.
307 outPayload.Data = b
308 outPayload.Length = len(b)
309 } 547 }
310 if cp != nil { 548 if err := z.Close(); err != nil {
311 if err := cp.Do(cbuf, b); err != nil { 549 return nil, wrapErr(err)
312 return nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) 550 }
313 } 551 } else {
314 b = cbuf.Bytes() 552 if err := cp.Do(cbuf, in); err != nil {
553 return nil, wrapErr(err)
315 } 554 }
316 length = uint(len(b))
317 }
318 if length > math.MaxUint32 {
319 return nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", length)
320 } 555 }
556 return cbuf.Bytes(), nil
557}
321 558
322 const ( 559const (
323 payloadLen = 1 560 payloadLen = 1
324 sizeLen = 4 561 sizeLen = 4
325 ) 562 headerLen = payloadLen + sizeLen
326 563)
327 var buf = make([]byte, payloadLen+sizeLen+len(b))
328 564
329 // Write payload format 565// msgHeader returns a 5-byte header for the message being transmitted and the
330 if cp == nil { 566// payload, which is compData if non-nil or data otherwise.
331 buf[0] = byte(compressionNone) 567func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
568 hdr = make([]byte, headerLen)
569 if compData != nil {
570 hdr[0] = byte(compressionMade)
571 data = compData
332 } else { 572 } else {
333 buf[0] = byte(compressionMade) 573 hdr[0] = byte(compressionNone)
334 } 574 }
335 // Write length of b into buf
336 binary.BigEndian.PutUint32(buf[1:], uint32(length))
337 // Copy encoded msg to buf
338 copy(buf[5:], b)
339 575
340 if outPayload != nil { 576 // Write length of payload into buf
341 outPayload.WireLength = len(buf) 577 binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
342 } 578 return hdr, data
579}
343 580
344 return buf, nil 581func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
582 return &stats.OutPayload{
583 Client: client,
584 Payload: msg,
585 Data: data,
586 Length: len(data),
587 WireLength: len(payload) + headerLen,
588 SentTime: t,
589 }
345} 590}
346 591
347func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { 592func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
348 switch pf { 593 switch pf {
349 case compressionNone: 594 case compressionNone:
350 case compressionMade: 595 case compressionMade:
351 if dc == nil || recvCompress != dc.Type() { 596 if recvCompress == "" || recvCompress == encoding.Identity {
352 return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) 597 return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
598 }
599 if !haveCompressor {
600 return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
353 } 601 }
354 default: 602 default:
355 return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf) 603 return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
356 } 604 }
357 return nil 605 return nil
358} 606}
359 607
360func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error { 608type payloadInfo struct {
609 wireLength int // The compressed length got from wire.
610 uncompressedBytes []byte
611}
612
613func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
361 pf, d, err := p.recvMsg(maxReceiveMessageSize) 614 pf, d, err := p.recvMsg(maxReceiveMessageSize)
362 if err != nil { 615 if err != nil {
363 return err 616 return nil, err
364 } 617 }
365 if inPayload != nil { 618 if payInfo != nil {
366 inPayload.WireLength = len(d) 619 payInfo.wireLength = len(d)
367 } 620 }
368 if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { 621
369 return err 622 if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
623 return nil, st.Err()
370 } 624 }
625
371 if pf == compressionMade { 626 if pf == compressionMade {
372 d, err = dc.Do(bytes.NewReader(d)) 627 // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
373 if err != nil { 628 // use this decompressor as the default.
374 return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) 629 if dc != nil {
630 d, err = dc.Do(bytes.NewReader(d))
631 if err != nil {
632 return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
633 }
634 } else {
635 dcReader, err := compressor.Decompress(bytes.NewReader(d))
636 if err != nil {
637 return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
638 }
639 d, err = ioutil.ReadAll(dcReader)
640 if err != nil {
641 return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
642 }
375 } 643 }
376 } 644 }
377 if len(d) > maxReceiveMessageSize { 645 if len(d) > maxReceiveMessageSize {
378 // TODO: Revisit the error code. Currently keep it consistent with java 646 // TODO: Revisit the error code. Currently keep it consistent with java
379 // implementation. 647 // implementation.
380 return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) 648 return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
649 }
650 return d, nil
651}
652
653// For the two compressor parameters, both should not be set, but if they are,
654// dc takes precedence over compressor.
655// TODO(dfawley): wrap the old compressor/decompressor using the new API?
656func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
657 d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
658 if err != nil {
659 return err
381 } 660 }
382 if err := c.Unmarshal(d, m); err != nil { 661 if err := c.Unmarshal(d, m); err != nil {
383 return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) 662 return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
384 } 663 }
385 if inPayload != nil { 664 if payInfo != nil {
386 inPayload.RecvTime = time.Now() 665 payInfo.uncompressedBytes = d
387 inPayload.Payload = m
388 // TODO truncate large payload.
389 inPayload.Data = d
390 inPayload.Length = len(d)
391 } 666 }
392 return nil 667 return nil
393} 668}
394 669
395type rpcInfo struct { 670type rpcInfo struct {
396 bytesSent bool 671 failfast bool
397 bytesReceived bool
398} 672}
399 673
400type rpcInfoContextKey struct{} 674type rpcInfoContextKey struct{}
401 675
402func newContextWithRPCInfo(ctx context.Context) context.Context { 676func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context {
403 return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{}) 677 return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast})
404} 678}
405 679
406func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { 680func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
@@ -408,117 +682,135 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
408 return 682 return
409} 683}
410 684
411func updateRPCInfoInContext(ctx context.Context, s rpcInfo) {
412 if ss, ok := rpcInfoFromContext(ctx); ok {
413 *ss = s
414 }
415 return
416}
417
418// Code returns the error code for err if it was produced by the rpc system. 685// Code returns the error code for err if it was produced by the rpc system.
419// Otherwise, it returns codes.Unknown. 686// Otherwise, it returns codes.Unknown.
420// 687//
421// Deprecated; use status.FromError and Code method instead. 688// Deprecated: use status.Code instead.
422func Code(err error) codes.Code { 689func Code(err error) codes.Code {
423 if s, ok := status.FromError(err); ok { 690 return status.Code(err)
424 return s.Code()
425 }
426 return codes.Unknown
427} 691}
428 692
429// ErrorDesc returns the error description of err if it was produced by the rpc system. 693// ErrorDesc returns the error description of err if it was produced by the rpc system.
430// Otherwise, it returns err.Error() or empty string when err is nil. 694// Otherwise, it returns err.Error() or empty string when err is nil.
431// 695//
432// Deprecated; use status.FromError and Message method instead. 696// Deprecated: use status.Convert and Message method instead.
433func ErrorDesc(err error) string { 697func ErrorDesc(err error) string {
434 if s, ok := status.FromError(err); ok { 698 return status.Convert(err).Message()
435 return s.Message()
436 }
437 return err.Error()
438} 699}
439 700
440// Errorf returns an error containing an error code and a description; 701// Errorf returns an error containing an error code and a description;
441// Errorf returns nil if c is OK. 702// Errorf returns nil if c is OK.
442// 703//
443// Deprecated; use status.Errorf instead. 704// Deprecated: use status.Errorf instead.
444func Errorf(c codes.Code, format string, a ...interface{}) error { 705func Errorf(c codes.Code, format string, a ...interface{}) error {
445 return status.Errorf(c, format, a...) 706 return status.Errorf(c, format, a...)
446} 707}
447 708
448// MethodConfig defines the configuration recommended by the service providers for a 709// toRPCErr converts an error into an error from the status package.
449// particular method. 710func toRPCErr(err error) error {
450// This is EXPERIMENTAL and subject to change. 711 if err == nil || err == io.EOF {
451type MethodConfig struct { 712 return err
452 // WaitForReady indicates whether RPCs sent to this method should wait until 713 }
453 // the connection is ready by default (!failfast). The value specified via the 714 if err == io.ErrUnexpectedEOF {
454 // gRPC client API will override the value set here. 715 return status.Error(codes.Internal, err.Error())
455 WaitForReady *bool 716 }
456 // Timeout is the default timeout for RPCs sent to this method. The actual 717 if _, ok := status.FromError(err); ok {
457 // deadline used will be the minimum of the value specified here and the value 718 return err
458 // set by the application via the gRPC client API. If either one is not set, 719 }
459 // then the other will be used. If neither is set, then the RPC has no deadline. 720 switch e := err.(type) {
460 Timeout *time.Duration 721 case transport.ConnectionError:
461 // MaxReqSize is the maximum allowed payload size for an individual request in a 722 return status.Error(codes.Unavailable, e.Desc)
462 // stream (client->server) in bytes. The size which is measured is the serialized 723 default:
463 // payload after per-message compression (but before stream compression) in bytes. 724 switch err {
464 // The actual value used is the minumum of the value specified here and the value set 725 case context.DeadlineExceeded:
465 // by the application via the gRPC client API. If either one is not set, then the other 726 return status.Error(codes.DeadlineExceeded, err.Error())
466 // will be used. If neither is set, then the built-in default is used. 727 case context.Canceled:
467 MaxReqSize *int 728 return status.Error(codes.Canceled, err.Error())
468 // MaxRespSize is the maximum allowed payload size for an individual response in a 729 }
469 // stream (server->client) in bytes. 730 }
470 MaxRespSize *int 731 return status.Error(codes.Unknown, err.Error())
471} 732}
472
473// ServiceConfig is provided by the service provider and contains parameters for how
474// clients that connect to the service should behave.
475// This is EXPERIMENTAL and subject to change.
476type ServiceConfig struct {
477 // LB is the load balancer the service providers recommends. The balancer specified
478 // via grpc.WithBalancer will override this.
479 LB Balancer
480 // Methods contains a map for the methods in this service.
481 // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
482 // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
483 // Otherwise, the method has no MethodConfig to use.
484 Methods map[string]MethodConfig
485}
486
487func min(a, b *int) *int {
488 if *a < *b {
489 return a
490 }
491 return b
492}
493
494func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
495 if mcMax == nil && doptMax == nil {
496 return &defaultVal
497 }
498 if mcMax != nil && doptMax != nil {
499 return min(mcMax, doptMax)
500 }
501 if mcMax != nil {
502 return mcMax
503 }
504 return doptMax
505}
506
507// SupportPackageIsVersion3 is referenced from generated protocol buffer files.
508// The latest support package version is 4.
509// SupportPackageIsVersion3 is kept for compability. It will be removed in the
510// next support package version update.
511const SupportPackageIsVersion3 = true
512
513// SupportPackageIsVersion4 is referenced from generated protocol buffer files
514// to assert that that code is compatible with this version of the grpc package.
515//
516// This constant may be renamed in the future if a change in the generated code
517// requires a synchronised update of grpc-go and protoc-gen-go. This constant
518// should not be referenced from any other code.
519const SupportPackageIsVersion4 = true
520 733
521// Version is the current grpc version. 734// setCallInfoCodec should only be called after CallOptions have been applied.
522const Version = "1.6.0-dev" 735func setCallInfoCodec(c *callInfo) error {
736 if c.codec != nil {
737 // codec was already set by a CallOption; use it.
738 return nil
739 }
740
741 if c.contentSubtype == "" {
742 // No codec specified in CallOptions; use proto by default.
743 c.codec = encoding.GetCodec(proto.Name)
744 return nil
745 }
746
747 // c.contentSubtype is already lowercased in CallContentSubtype
748 c.codec = encoding.GetCodec(c.contentSubtype)
749 if c.codec == nil {
750 return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
751 }
752 return nil
753}
754
755// parseDialTarget returns the network and address to pass to dialer
756func parseDialTarget(target string) (net string, addr string) {
757 net = "tcp"
758
759 m1 := strings.Index(target, ":")
760 m2 := strings.Index(target, ":/")
761
762 // handle unix:addr which will fail with url.Parse
763 if m1 >= 0 && m2 < 0 {
764 if n := target[0:m1]; n == "unix" {
765 net = n
766 addr = target[m1+1:]
767 return net, addr
768 }
769 }
770 if m2 >= 0 {
771 t, err := url.Parse(target)
772 if err != nil {
773 return net, target
774 }
775 scheme := t.Scheme
776 addr = t.Path
777 if scheme == "unix" {
778 net = scheme
779 if addr == "" {
780 addr = t.Host
781 }
782 return net, addr
783 }
784 }
785
786 return net, target
787}
788
789// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
790// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
791// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
792// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
793type channelzData struct {
794 callsStarted int64
795 callsFailed int64
796 callsSucceeded int64
797 // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
798 // time.Time since it's more costly to atomically update time.Time variable than int64 variable.
799 lastCallStartedTime int64
800}
801
802// The SupportPackageIsVersion variables are referenced from generated protocol
803// buffer files to ensure compatibility with the gRPC version used. The latest
804// support package version is 5.
805//
806// Older versions are kept for compatibility. They may be removed if
807// compatibility cannot be maintained.
808//
809// These constants should not be referenced from any other code.
810const (
811 SupportPackageIsVersion3 = true
812 SupportPackageIsVersion4 = true
813 SupportPackageIsVersion5 = true
814)
523 815
524const grpcUA = "grpc-go/" + Version 816const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 42733e2..d705d7a 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -19,36 +19,41 @@
19package grpc 19package grpc
20 20
21import ( 21import (
22 "bytes" 22 "context"
23 "errors" 23 "errors"
24 "fmt" 24 "fmt"
25 "io" 25 "io"
26 "math"
26 "net" 27 "net"
27 "net/http" 28 "net/http"
28 "reflect" 29 "reflect"
29 "runtime" 30 "runtime"
30 "strings" 31 "strings"
31 "sync" 32 "sync"
33 "sync/atomic"
32 "time" 34 "time"
33 35
34 "golang.org/x/net/context"
35 "golang.org/x/net/http2"
36 "golang.org/x/net/trace" 36 "golang.org/x/net/trace"
37
37 "google.golang.org/grpc/codes" 38 "google.golang.org/grpc/codes"
38 "google.golang.org/grpc/credentials" 39 "google.golang.org/grpc/credentials"
40 "google.golang.org/grpc/encoding"
41 "google.golang.org/grpc/encoding/proto"
39 "google.golang.org/grpc/grpclog" 42 "google.golang.org/grpc/grpclog"
40 "google.golang.org/grpc/internal" 43 "google.golang.org/grpc/internal/binarylog"
44 "google.golang.org/grpc/internal/channelz"
45 "google.golang.org/grpc/internal/transport"
41 "google.golang.org/grpc/keepalive" 46 "google.golang.org/grpc/keepalive"
42 "google.golang.org/grpc/metadata" 47 "google.golang.org/grpc/metadata"
48 "google.golang.org/grpc/peer"
43 "google.golang.org/grpc/stats" 49 "google.golang.org/grpc/stats"
44 "google.golang.org/grpc/status" 50 "google.golang.org/grpc/status"
45 "google.golang.org/grpc/tap" 51 "google.golang.org/grpc/tap"
46 "google.golang.org/grpc/transport"
47) 52)
48 53
49const ( 54const (
50 defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 55 defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
51 defaultServerMaxSendMessageSize = 1024 * 1024 * 4 56 defaultServerMaxSendMessageSize = math.MaxInt32
52) 57)
53 58
54type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) 59type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
@@ -88,18 +93,24 @@ type Server struct {
88 conns map[io.Closer]bool 93 conns map[io.Closer]bool
89 serve bool 94 serve bool
90 drain bool 95 drain bool
91 ctx context.Context 96 cv *sync.Cond // signaled when connections close for GracefulStop
92 cancel context.CancelFunc
93 // A CondVar to let GracefulStop() blocks until all the pending RPCs are finished
94 // and all the transport goes away.
95 cv *sync.Cond
96 m map[string]*service // service name -> service info 97 m map[string]*service // service name -> service info
97 events trace.EventLog 98 events trace.EventLog
99
100 quit chan struct{}
101 done chan struct{}
102 quitOnce sync.Once
103 doneOnce sync.Once
104 channelzRemoveOnce sync.Once
105 serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
106
107 channelzID int64 // channelz unique identification number
108 czData *channelzData
98} 109}
99 110
100type options struct { 111type options struct {
101 creds credentials.TransportCredentials 112 creds credentials.TransportCredentials
102 codec Codec 113 codec baseCodec
103 cp Compressor 114 cp Compressor
104 dc Decompressor 115 dc Decompressor
105 unaryInt UnaryServerInterceptor 116 unaryInt UnaryServerInterceptor
@@ -109,22 +120,50 @@ type options struct {
109 maxConcurrentStreams uint32 120 maxConcurrentStreams uint32
110 maxReceiveMessageSize int 121 maxReceiveMessageSize int
111 maxSendMessageSize int 122 maxSendMessageSize int
112 useHandlerImpl bool // use http.Handler-based server
113 unknownStreamDesc *StreamDesc 123 unknownStreamDesc *StreamDesc
114 keepaliveParams keepalive.ServerParameters 124 keepaliveParams keepalive.ServerParameters
115 keepalivePolicy keepalive.EnforcementPolicy 125 keepalivePolicy keepalive.EnforcementPolicy
116 initialWindowSize int32 126 initialWindowSize int32
117 initialConnWindowSize int32 127 initialConnWindowSize int32
128 writeBufferSize int
129 readBufferSize int
130 connectionTimeout time.Duration
131 maxHeaderListSize *uint32
118} 132}
119 133
120var defaultServerOptions = options{ 134var defaultServerOptions = options{
121 maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, 135 maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
122 maxSendMessageSize: defaultServerMaxSendMessageSize, 136 maxSendMessageSize: defaultServerMaxSendMessageSize,
137 connectionTimeout: 120 * time.Second,
138 writeBufferSize: defaultWriteBufSize,
139 readBufferSize: defaultReadBufSize,
123} 140}
124 141
125// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. 142// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
126type ServerOption func(*options) 143type ServerOption func(*options)
127 144
145// WriteBufferSize determines how much data can be batched before doing a write on the wire.
146// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
147// The default value for this buffer is 32KB.
148// Zero will disable the write buffer such that each write will be on underlying connection.
149// Note: A Send call may not directly translate to a write.
150func WriteBufferSize(s int) ServerOption {
151 return func(o *options) {
152 o.writeBufferSize = s
153 }
154}
155
156// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
157// for one read syscall.
158// The default value for this buffer is 32KB.
159// Zero will disable read buffer for a connection so data framer can access the underlying
160// conn directly.
161func ReadBufferSize(s int) ServerOption {
162 return func(o *options) {
163 o.readBufferSize = s
164 }
165}
166
128// InitialWindowSize returns a ServerOption that sets window size for stream. 167// InitialWindowSize returns a ServerOption that sets window size for stream.
129// The lower bound for window size is 64K and any value smaller than that will be ignored. 168// The lower bound for window size is 64K and any value smaller than that will be ignored.
130func InitialWindowSize(s int32) ServerOption { 169func InitialWindowSize(s int32) ServerOption {
@@ -156,20 +195,32 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
156} 195}
157 196
158// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. 197// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
198//
199// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
159func CustomCodec(codec Codec) ServerOption { 200func CustomCodec(codec Codec) ServerOption {
160 return func(o *options) { 201 return func(o *options) {
161 o.codec = codec 202 o.codec = codec
162 } 203 }
163} 204}
164 205
165// RPCCompressor returns a ServerOption that sets a compressor for outbound messages. 206// RPCCompressor returns a ServerOption that sets a compressor for outbound
207// messages. For backward compatibility, all outbound messages will be sent
208// using this compressor, regardless of incoming message compression. By
209// default, server messages will be sent using the same compressor with which
210// request messages were sent.
211//
212// Deprecated: use encoding.RegisterCompressor instead.
166func RPCCompressor(cp Compressor) ServerOption { 213func RPCCompressor(cp Compressor) ServerOption {
167 return func(o *options) { 214 return func(o *options) {
168 o.cp = cp 215 o.cp = cp
169 } 216 }
170} 217}
171 218
172// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages. 219// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
220// messages. It has higher priority than decompressors registered via
221// encoding.RegisterCompressor.
222//
223// Deprecated: use encoding.RegisterCompressor instead.
173func RPCDecompressor(dc Decompressor) ServerOption { 224func RPCDecompressor(dc Decompressor) ServerOption {
174 return func(o *options) { 225 return func(o *options) {
175 o.dc = dc 226 o.dc = dc
@@ -177,7 +228,9 @@ func RPCDecompressor(dc Decompressor) ServerOption {
177} 228}
178 229
179// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. 230// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
180// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead. 231// If this is not set, gRPC uses the default limit.
232//
233// Deprecated: use MaxRecvMsgSize instead.
181func MaxMsgSize(m int) ServerOption { 234func MaxMsgSize(m int) ServerOption {
182 return MaxRecvMsgSize(m) 235 return MaxRecvMsgSize(m)
183} 236}
@@ -259,7 +312,7 @@ func StatsHandler(h stats.Handler) ServerOption {
259// handler that will be invoked instead of returning the "unimplemented" gRPC 312// handler that will be invoked instead of returning the "unimplemented" gRPC
260// error whenever a request is received for an unregistered service or method. 313// error whenever a request is received for an unregistered service or method.
261// The handling function has full access to the Context of the request and the 314// The handling function has full access to the Context of the request and the
262// stream, and the invocation passes through interceptors. 315// stream, and the invocation bypasses interceptors.
263func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { 316func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
264 return func(o *options) { 317 return func(o *options) {
265 o.unknownStreamDesc = &StreamDesc{ 318 o.unknownStreamDesc = &StreamDesc{
@@ -272,6 +325,26 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
272 } 325 }
273} 326}
274 327
328// ConnectionTimeout returns a ServerOption that sets the timeout for
329// connection establishment (up to and including HTTP/2 handshaking) for all
330// new connections. If this is not set, the default is 120 seconds. A zero or
331// negative value will result in an immediate timeout.
332//
333// This API is EXPERIMENTAL.
334func ConnectionTimeout(d time.Duration) ServerOption {
335 return func(o *options) {
336 o.connectionTimeout = d
337 }
338}
339
340// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
341// of header list that the server is prepared to accept.
342func MaxHeaderListSize(s uint32) ServerOption {
343 return func(o *options) {
344 o.maxHeaderListSize = &s
345 }
346}
347
275// NewServer creates a gRPC server which has no service registered and has not 348// NewServer creates a gRPC server which has no service registered and has not
276// started to accept requests yet. 349// started to accept requests yet.
277func NewServer(opt ...ServerOption) *Server { 350func NewServer(opt ...ServerOption) *Server {
@@ -279,22 +352,24 @@ func NewServer(opt ...ServerOption) *Server {
279 for _, o := range opt { 352 for _, o := range opt {
280 o(&opts) 353 o(&opts)
281 } 354 }
282 if opts.codec == nil {
283 // Set the default codec.
284 opts.codec = protoCodec{}
285 }
286 s := &Server{ 355 s := &Server{
287 lis: make(map[net.Listener]bool), 356 lis: make(map[net.Listener]bool),
288 opts: opts, 357 opts: opts,
289 conns: make(map[io.Closer]bool), 358 conns: make(map[io.Closer]bool),
290 m: make(map[string]*service), 359 m: make(map[string]*service),
360 quit: make(chan struct{}),
361 done: make(chan struct{}),
362 czData: new(channelzData),
291 } 363 }
292 s.cv = sync.NewCond(&s.mu) 364 s.cv = sync.NewCond(&s.mu)
293 s.ctx, s.cancel = context.WithCancel(context.Background())
294 if EnableTracing { 365 if EnableTracing {
295 _, file, line, _ := runtime.Caller(1) 366 _, file, line, _ := runtime.Caller(1)
296 s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) 367 s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
297 } 368 }
369
370 if channelz.IsOn() {
371 s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
372 }
298 return s 373 return s
299} 374}
300 375
@@ -399,11 +474,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo {
399 return ret 474 return ret
400} 475}
401 476
402var ( 477// ErrServerStopped indicates that the operation is now illegal because of
403 // ErrServerStopped indicates that the operation is now illegal because of 478// the server being stopped.
404 // the server being stopped. 479var ErrServerStopped = errors.New("grpc: the server has been stopped")
405 ErrServerStopped = errors.New("grpc: the server has been stopped")
406)
407 480
408func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { 481func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
409 if s.opts.creds == nil { 482 if s.opts.creds == nil {
@@ -412,28 +485,67 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
412 return s.opts.creds.ServerHandshake(rawConn) 485 return s.opts.creds.ServerHandshake(rawConn)
413} 486}
414 487
488type listenSocket struct {
489 net.Listener
490 channelzID int64
491}
492
493func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
494 return &channelz.SocketInternalMetric{
495 SocketOptions: channelz.GetSocketOption(l.Listener),
496 LocalAddr: l.Listener.Addr(),
497 }
498}
499
500func (l *listenSocket) Close() error {
501 err := l.Listener.Close()
502 if channelz.IsOn() {
503 channelz.RemoveEntry(l.channelzID)
504 }
505 return err
506}
507
415// Serve accepts incoming connections on the listener lis, creating a new 508// Serve accepts incoming connections on the listener lis, creating a new
416// ServerTransport and service goroutine for each. The service goroutines 509// ServerTransport and service goroutine for each. The service goroutines
417// read gRPC requests and then call the registered handlers to reply to them. 510// read gRPC requests and then call the registered handlers to reply to them.
418// Serve returns when lis.Accept fails with fatal errors. lis will be closed when 511// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
419// this method returns. 512// this method returns.
420// Serve always returns non-nil error. 513// Serve will return a non-nil error unless Stop or GracefulStop is called.
421func (s *Server) Serve(lis net.Listener) error { 514func (s *Server) Serve(lis net.Listener) error {
422 s.mu.Lock() 515 s.mu.Lock()
423 s.printf("serving") 516 s.printf("serving")
424 s.serve = true 517 s.serve = true
425 if s.lis == nil { 518 if s.lis == nil {
519 // Serve called after Stop or GracefulStop.
426 s.mu.Unlock() 520 s.mu.Unlock()
427 lis.Close() 521 lis.Close()
428 return ErrServerStopped 522 return ErrServerStopped
429 } 523 }
430 s.lis[lis] = true 524
525 s.serveWG.Add(1)
526 defer func() {
527 s.serveWG.Done()
528 select {
529 // Stop or GracefulStop called; block until done and return nil.
530 case <-s.quit:
531 <-s.done
532 default:
533 }
534 }()
535
536 ls := &listenSocket{Listener: lis}
537 s.lis[ls] = true
538
539 if channelz.IsOn() {
540 ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
541 }
431 s.mu.Unlock() 542 s.mu.Unlock()
543
432 defer func() { 544 defer func() {
433 s.mu.Lock() 545 s.mu.Lock()
434 if s.lis != nil && s.lis[lis] { 546 if s.lis != nil && s.lis[ls] {
435 lis.Close() 547 ls.Close()
436 delete(s.lis, lis) 548 delete(s.lis, ls)
437 } 549 }
438 s.mu.Unlock() 550 s.mu.Unlock()
439 }() 551 }()
@@ -460,36 +572,52 @@ func (s *Server) Serve(lis net.Listener) error {
460 timer := time.NewTimer(tempDelay) 572 timer := time.NewTimer(tempDelay)
461 select { 573 select {
462 case <-timer.C: 574 case <-timer.C:
463 case <-s.ctx.Done(): 575 case <-s.quit:
576 timer.Stop()
577 return nil
464 } 578 }
465 timer.Stop()
466 continue 579 continue
467 } 580 }
468 s.mu.Lock() 581 s.mu.Lock()
469 s.printf("done serving; Accept = %v", err) 582 s.printf("done serving; Accept = %v", err)
470 s.mu.Unlock() 583 s.mu.Unlock()
584
585 select {
586 case <-s.quit:
587 return nil
588 default:
589 }
471 return err 590 return err
472 } 591 }
473 tempDelay = 0 592 tempDelay = 0
474 // Start a new goroutine to deal with rawConn 593 // Start a new goroutine to deal with rawConn so we don't stall this Accept
475 // so we don't stall this Accept loop goroutine. 594 // loop goroutine.
476 go s.handleRawConn(rawConn) 595 //
596 // Make sure we account for the goroutine so GracefulStop doesn't nil out
597 // s.conns before this conn can be added.
598 s.serveWG.Add(1)
599 go func() {
600 s.handleRawConn(rawConn)
601 s.serveWG.Done()
602 }()
477 } 603 }
478} 604}
479 605
480// handleRawConn is run in its own goroutine and handles a just-accepted 606// handleRawConn forks a goroutine to handle a just-accepted connection that
481// connection that has not had any I/O performed on it yet. 607// has not had any I/O performed on it yet.
482func (s *Server) handleRawConn(rawConn net.Conn) { 608func (s *Server) handleRawConn(rawConn net.Conn) {
609 rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
483 conn, authInfo, err := s.useTransportAuthenticator(rawConn) 610 conn, authInfo, err := s.useTransportAuthenticator(rawConn)
484 if err != nil { 611 if err != nil {
485 s.mu.Lock() 612 s.mu.Lock()
486 s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) 613 s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
487 s.mu.Unlock() 614 s.mu.Unlock()
488 grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) 615 grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
489 // If serverHandShake returns ErrConnDispatched, keep rawConn open. 616 // If serverHandshake returns ErrConnDispatched, keep rawConn open.
490 if err != credentials.ErrConnDispatched { 617 if err != credentials.ErrConnDispatched {
491 rawConn.Close() 618 rawConn.Close()
492 } 619 }
620 rawConn.SetDeadline(time.Time{})
493 return 621 return
494 } 622 }
495 623
@@ -501,19 +629,25 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
501 } 629 }
502 s.mu.Unlock() 630 s.mu.Unlock()
503 631
504 if s.opts.useHandlerImpl { 632 // Finish handshaking (HTTP2)
505 s.serveUsingHandler(conn) 633 st := s.newHTTP2Transport(conn, authInfo)
506 } else { 634 if st == nil {
507 s.serveHTTP2Transport(conn, authInfo) 635 return
636 }
637
638 rawConn.SetDeadline(time.Time{})
639 if !s.addConn(st) {
640 return
508 } 641 }
642 go func() {
643 s.serveStreams(st)
644 s.removeConn(st)
645 }()
509} 646}
510 647
511// serveHTTP2Transport sets up a http/2 transport (using the 648// newHTTP2Transport sets up a http/2 transport (using the
512// gRPC http2 server transport in transport/http2_server.go) and 649// gRPC http2 server transport in transport/http2_server.go).
513// serves streams on it. 650func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport {
514// This is run in its own goroutine (it does network I/O in
515// transport.NewServerTransport).
516func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
517 config := &transport.ServerConfig{ 651 config := &transport.ServerConfig{
518 MaxStreams: s.opts.maxConcurrentStreams, 652 MaxStreams: s.opts.maxConcurrentStreams,
519 AuthInfo: authInfo, 653 AuthInfo: authInfo,
@@ -523,6 +657,10 @@ func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo)
523 KeepalivePolicy: s.opts.keepalivePolicy, 657 KeepalivePolicy: s.opts.keepalivePolicy,
524 InitialWindowSize: s.opts.initialWindowSize, 658 InitialWindowSize: s.opts.initialWindowSize,
525 InitialConnWindowSize: s.opts.initialConnWindowSize, 659 InitialConnWindowSize: s.opts.initialConnWindowSize,
660 WriteBufferSize: s.opts.writeBufferSize,
661 ReadBufferSize: s.opts.readBufferSize,
662 ChannelzParentID: s.channelzID,
663 MaxHeaderListSize: s.opts.maxHeaderListSize,
526 } 664 }
527 st, err := transport.NewServerTransport("http2", c, config) 665 st, err := transport.NewServerTransport("http2", c, config)
528 if err != nil { 666 if err != nil {
@@ -531,17 +669,13 @@ func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo)
531 s.mu.Unlock() 669 s.mu.Unlock()
532 c.Close() 670 c.Close()
533 grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) 671 grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
534 return 672 return nil
535 }
536 if !s.addConn(st) {
537 st.Close()
538 return
539 } 673 }
540 s.serveStreams(st) 674
675 return st
541} 676}
542 677
543func (s *Server) serveStreams(st transport.ServerTransport) { 678func (s *Server) serveStreams(st transport.ServerTransport) {
544 defer s.removeConn(st)
545 defer st.Close() 679 defer st.Close()
546 var wg sync.WaitGroup 680 var wg sync.WaitGroup
547 st.HandleStreams(func(stream *transport.Stream) { 681 st.HandleStreams(func(stream *transport.Stream) {
@@ -562,32 +696,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
562 696
563var _ http.Handler = (*Server)(nil) 697var _ http.Handler = (*Server)(nil)
564 698
565// serveUsingHandler is called from handleRawConn when s is configured
566// to handle requests via the http.Handler interface. It sets up a
567// net/http.Server to handle the just-accepted conn. The http.Server
568// is configured to route all incoming requests (all HTTP/2 streams)
569// to ServeHTTP, which creates a new ServerTransport for each stream.
570// serveUsingHandler blocks until conn closes.
571//
572// This codepath is only used when Server.TestingUseHandlerImpl has
573// been configured. This lets the end2end tests exercise the ServeHTTP
574// method as one of the environment types.
575//
576// conn is the *tls.Conn that's already been authenticated.
577func (s *Server) serveUsingHandler(conn net.Conn) {
578 if !s.addConn(conn) {
579 conn.Close()
580 return
581 }
582 defer s.removeConn(conn)
583 h2s := &http2.Server{
584 MaxConcurrentStreams: s.opts.maxConcurrentStreams,
585 }
586 h2s.ServeConn(conn, &http2.ServeConnOpts{
587 Handler: s,
588 })
589}
590
591// ServeHTTP implements the Go standard library's http.Handler 699// ServeHTTP implements the Go standard library's http.Handler
592// interface by responding to the gRPC request r, by looking up 700// interface by responding to the gRPC request r, by looking up
593// the requested gRPC method in the gRPC server s. 701// the requested gRPC method in the gRPC server s.
@@ -613,13 +721,12 @@ func (s *Server) serveUsingHandler(conn net.Conn) {
613// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL 721// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
614// and subject to change. 722// and subject to change.
615func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { 723func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
616 st, err := transport.NewServerHandlerTransport(w, r) 724 st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
617 if err != nil { 725 if err != nil {
618 http.Error(w, err.Error(), http.StatusInternalServerError) 726 http.Error(w, err.Error(), http.StatusInternalServerError)
619 return 727 return
620 } 728 }
621 if !s.addConn(st) { 729 if !s.addConn(st) {
622 st.Close()
623 return 730 return
624 } 731 }
625 defer s.removeConn(st) 732 defer s.removeConn(st)
@@ -649,9 +756,15 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
649func (s *Server) addConn(c io.Closer) bool { 756func (s *Server) addConn(c io.Closer) bool {
650 s.mu.Lock() 757 s.mu.Lock()
651 defer s.mu.Unlock() 758 defer s.mu.Unlock()
652 if s.conns == nil || s.drain { 759 if s.conns == nil {
760 c.Close()
653 return false 761 return false
654 } 762 }
763 if s.drain {
764 // Transport added after we drained our existing conns: drain it
765 // immediately.
766 c.(transport.ServerTransport).Drain()
767 }
655 s.conns[c] = true 768 s.conns[c] = true
656 return true 769 return true
657} 770}
@@ -665,43 +778,73 @@ func (s *Server) removeConn(c io.Closer) {
665 } 778 }
666} 779}
667 780
668func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { 781func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
669 var ( 782 return &channelz.ServerInternalMetric{
670 cbuf *bytes.Buffer 783 CallsStarted: atomic.LoadInt64(&s.czData.callsStarted),
671 outPayload *stats.OutPayload 784 CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded),
672 ) 785 CallsFailed: atomic.LoadInt64(&s.czData.callsFailed),
673 if cp != nil { 786 LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
674 cbuf = new(bytes.Buffer)
675 } 787 }
676 if s.opts.statsHandler != nil { 788}
677 outPayload = &stats.OutPayload{} 789
678 } 790func (s *Server) incrCallsStarted() {
679 p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) 791 atomic.AddInt64(&s.czData.callsStarted, 1)
792 atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
793}
794
795func (s *Server) incrCallsSucceeded() {
796 atomic.AddInt64(&s.czData.callsSucceeded, 1)
797}
798
799func (s *Server) incrCallsFailed() {
800 atomic.AddInt64(&s.czData.callsFailed, 1)
801}
802
803func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
804 data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
680 if err != nil { 805 if err != nil {
681 grpclog.Errorln("grpc: server failed to encode response: ", err) 806 grpclog.Errorln("grpc: server failed to encode response: ", err)
682 return err 807 return err
683 } 808 }
684 if len(p) > s.opts.maxSendMessageSize { 809 compData, err := compress(data, cp, comp)
685 return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(p), s.opts.maxSendMessageSize) 810 if err != nil {
811 grpclog.Errorln("grpc: server failed to compress response: ", err)
812 return err
813 }
814 hdr, payload := msgHeader(data, compData)
815 // TODO(dfawley): should we be checking len(data) instead?
816 if len(payload) > s.opts.maxSendMessageSize {
817 return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
686 } 818 }
687 err = t.Write(stream, p, opts) 819 err = t.Write(stream, hdr, payload, opts)
688 if err == nil && outPayload != nil { 820 if err == nil && s.opts.statsHandler != nil {
689 outPayload.SentTime = time.Now() 821 s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
690 s.opts.statsHandler.HandleRPC(stream.Context(), outPayload)
691 } 822 }
692 return err 823 return err
693} 824}
694 825
695func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { 826func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
827 if channelz.IsOn() {
828 s.incrCallsStarted()
829 defer func() {
830 if err != nil && err != io.EOF {
831 s.incrCallsFailed()
832 } else {
833 s.incrCallsSucceeded()
834 }
835 }()
836 }
696 sh := s.opts.statsHandler 837 sh := s.opts.statsHandler
697 if sh != nil { 838 if sh != nil {
839 beginTime := time.Now()
698 begin := &stats.Begin{ 840 begin := &stats.Begin{
699 BeginTime: time.Now(), 841 BeginTime: beginTime,
700 } 842 }
701 sh.HandleRPC(stream.Context(), begin) 843 sh.HandleRPC(stream.Context(), begin)
702 defer func() { 844 defer func() {
703 end := &stats.End{ 845 end := &stats.End{
704 EndTime: time.Now(), 846 BeginTime: beginTime,
847 EndTime: time.Now(),
705 } 848 }
706 if err != nil && err != io.EOF { 849 if err != nil && err != io.EOF {
707 end.Error = toRPCErr(err) 850 end.Error = toRPCErr(err)
@@ -720,94 +863,112 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
720 } 863 }
721 }() 864 }()
722 } 865 }
723 if s.opts.cp != nil { 866
724 // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. 867 binlog := binarylog.GetMethodLogger(stream.Method())
725 stream.SetSendCompress(s.opts.cp.Type()) 868 if binlog != nil {
869 ctx := stream.Context()
870 md, _ := metadata.FromIncomingContext(ctx)
871 logEntry := &binarylog.ClientHeader{
872 Header: md,
873 MethodName: stream.Method(),
874 PeerAddr: nil,
875 }
876 if deadline, ok := ctx.Deadline(); ok {
877 logEntry.Timeout = deadline.Sub(time.Now())
878 if logEntry.Timeout < 0 {
879 logEntry.Timeout = 0
880 }
881 }
882 if a := md[":authority"]; len(a) > 0 {
883 logEntry.Authority = a[0]
884 }
885 if peer, ok := peer.FromContext(ctx); ok {
886 logEntry.PeerAddr = peer.Addr
887 }
888 binlog.Log(logEntry)
889 }
890
891 // comp and cp are used for compression. decomp and dc are used for
892 // decompression. If comp and decomp are both set, they are the same;
893 // however they are kept separate to ensure that at most one of the
894 // compressor/decompressor variable pairs are set for use later.
895 var comp, decomp encoding.Compressor
896 var cp Compressor
897 var dc Decompressor
898
899 // If dc is set and matches the stream's compression, use it. Otherwise, try
900 // to find a matching registered compressor for decomp.
901 if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
902 dc = s.opts.dc
903 } else if rc != "" && rc != encoding.Identity {
904 decomp = encoding.GetCompressor(rc)
905 if decomp == nil {
906 st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
907 t.WriteStatus(stream, st)
908 return st.Err()
909 }
726 } 910 }
727 p := &parser{r: stream} 911
728 pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) 912 // If cp is set, use it. Otherwise, attempt to compress the response using
729 if err == io.EOF { 913 // the incoming message compression method.
730 // The entire stream is done (for unary RPC only). 914 //
731 return err 915 // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
916 if s.opts.cp != nil {
917 cp = s.opts.cp
918 stream.SetSendCompress(cp.Type())
919 } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
920 // Legacy compressor not specified; attempt to respond with same encoding.
921 comp = encoding.GetCompressor(rc)
922 if comp != nil {
923 stream.SetSendCompress(rc)
924 }
732 } 925 }
733 if err == io.ErrUnexpectedEOF { 926
734 err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) 927 var payInfo *payloadInfo
928 if sh != nil || binlog != nil {
929 payInfo = &payloadInfo{}
735 } 930 }
931 d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
736 if err != nil { 932 if err != nil {
737 if st, ok := status.FromError(err); ok { 933 if st, ok := status.FromError(err); ok {
738 if e := t.WriteStatus(stream, st); e != nil { 934 if e := t.WriteStatus(stream, st); e != nil {
739 grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) 935 grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
740 } 936 }
741 } else {
742 switch st := err.(type) {
743 case transport.ConnectionError:
744 // Nothing to do here.
745 case transport.StreamError:
746 if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
747 grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
748 }
749 default:
750 panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st))
751 }
752 } 937 }
753 return err 938 return err
754 } 939 }
755 940 if channelz.IsOn() {
756 if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { 941 t.IncrMsgRecv()
757 if st, ok := status.FromError(err); ok {
758 if e := t.WriteStatus(stream, st); e != nil {
759 grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
760 }
761 return err
762 }
763 if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil {
764 grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
765 }
766
767 // TODO checkRecvPayload always return RPC error. Add a return here if necessary.
768 }
769 var inPayload *stats.InPayload
770 if sh != nil {
771 inPayload = &stats.InPayload{
772 RecvTime: time.Now(),
773 }
774 } 942 }
775 df := func(v interface{}) error { 943 df := func(v interface{}) error {
776 if inPayload != nil { 944 if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
777 inPayload.WireLength = len(req)
778 }
779 if pf == compressionMade {
780 var err error
781 req, err = s.opts.dc.Do(bytes.NewReader(req))
782 if err != nil {
783 return Errorf(codes.Internal, err.Error())
784 }
785 }
786 if len(req) > s.opts.maxReceiveMessageSize {
787 // TODO: Revisit the error code. Currently keep it consistent with
788 // java implementation.
789 return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
790 }
791 if err := s.opts.codec.Unmarshal(req, v); err != nil {
792 return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) 945 return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
793 } 946 }
794 if inPayload != nil { 947 if sh != nil {
795 inPayload.Payload = v 948 sh.HandleRPC(stream.Context(), &stats.InPayload{
796 inPayload.Data = req 949 RecvTime: time.Now(),
797 inPayload.Length = len(req) 950 Payload: v,
798 sh.HandleRPC(stream.Context(), inPayload) 951 Data: d,
952 Length: len(d),
953 })
954 }
955 if binlog != nil {
956 binlog.Log(&binarylog.ClientMessage{
957 Message: d,
958 })
799 } 959 }
800 if trInfo != nil { 960 if trInfo != nil {
801 trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) 961 trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
802 } 962 }
803 return nil 963 return nil
804 } 964 }
805 reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) 965 ctx := NewContextWithServerTransportStream(stream.Context(), stream)
966 reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
806 if appErr != nil { 967 if appErr != nil {
807 appStatus, ok := status.FromError(appErr) 968 appStatus, ok := status.FromError(appErr)
808 if !ok { 969 if !ok {
809 // Convert appErr if it is not a grpc status error. 970 // Convert appErr if it is not a grpc status error.
810 appErr = status.Error(convertCode(appErr), appErr.Error()) 971 appErr = status.Error(codes.Unknown, appErr.Error())
811 appStatus, _ = status.FromError(appErr) 972 appStatus, _ = status.FromError(appErr)
812 } 973 }
813 if trInfo != nil { 974 if trInfo != nil {
@@ -817,16 +978,27 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
817 if e := t.WriteStatus(stream, appStatus); e != nil { 978 if e := t.WriteStatus(stream, appStatus); e != nil {
818 grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) 979 grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
819 } 980 }
981 if binlog != nil {
982 if h, _ := stream.Header(); h.Len() > 0 {
983 // Only log serverHeader if there was header. Otherwise it can
984 // be trailer only.
985 binlog.Log(&binarylog.ServerHeader{
986 Header: h,
987 })
988 }
989 binlog.Log(&binarylog.ServerTrailer{
990 Trailer: stream.Trailer(),
991 Err: appErr,
992 })
993 }
820 return appErr 994 return appErr
821 } 995 }
822 if trInfo != nil { 996 if trInfo != nil {
823 trInfo.tr.LazyLog(stringer("OK"), false) 997 trInfo.tr.LazyLog(stringer("OK"), false)
824 } 998 }
825 opts := &transport.Options{ 999 opts := &transport.Options{Last: true}
826 Last: true, 1000
827 Delay: false, 1001 if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
828 }
829 if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
830 if err == io.EOF { 1002 if err == io.EOF {
831 // The entire stream is done (for unary RPC only). 1003 // The entire stream is done (for unary RPC only).
832 return err 1004 return err
@@ -839,35 +1011,72 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
839 switch st := err.(type) { 1011 switch st := err.(type) {
840 case transport.ConnectionError: 1012 case transport.ConnectionError:
841 // Nothing to do here. 1013 // Nothing to do here.
842 case transport.StreamError:
843 if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
844 grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
845 }
846 default: 1014 default:
847 panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) 1015 panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
848 } 1016 }
849 } 1017 }
1018 if binlog != nil {
1019 h, _ := stream.Header()
1020 binlog.Log(&binarylog.ServerHeader{
1021 Header: h,
1022 })
1023 binlog.Log(&binarylog.ServerTrailer{
1024 Trailer: stream.Trailer(),
1025 Err: appErr,
1026 })
1027 }
850 return err 1028 return err
851 } 1029 }
1030 if binlog != nil {
1031 h, _ := stream.Header()
1032 binlog.Log(&binarylog.ServerHeader{
1033 Header: h,
1034 })
1035 binlog.Log(&binarylog.ServerMessage{
1036 Message: reply,
1037 })
1038 }
1039 if channelz.IsOn() {
1040 t.IncrMsgSent()
1041 }
852 if trInfo != nil { 1042 if trInfo != nil {
853 trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) 1043 trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
854 } 1044 }
855 // TODO: Should we be logging if writing status failed here, like above? 1045 // TODO: Should we be logging if writing status failed here, like above?
856 // Should the logging be in WriteStatus? Should we ignore the WriteStatus 1046 // Should the logging be in WriteStatus? Should we ignore the WriteStatus
857 // error or allow the stats handler to see it? 1047 // error or allow the stats handler to see it?
858 return t.WriteStatus(stream, status.New(codes.OK, "")) 1048 err = t.WriteStatus(stream, status.New(codes.OK, ""))
1049 if binlog != nil {
1050 binlog.Log(&binarylog.ServerTrailer{
1051 Trailer: stream.Trailer(),
1052 Err: appErr,
1053 })
1054 }
1055 return err
859} 1056}
860 1057
861func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { 1058func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
1059 if channelz.IsOn() {
1060 s.incrCallsStarted()
1061 defer func() {
1062 if err != nil && err != io.EOF {
1063 s.incrCallsFailed()
1064 } else {
1065 s.incrCallsSucceeded()
1066 }
1067 }()
1068 }
862 sh := s.opts.statsHandler 1069 sh := s.opts.statsHandler
863 if sh != nil { 1070 if sh != nil {
1071 beginTime := time.Now()
864 begin := &stats.Begin{ 1072 begin := &stats.Begin{
865 BeginTime: time.Now(), 1073 BeginTime: beginTime,
866 } 1074 }
867 sh.HandleRPC(stream.Context(), begin) 1075 sh.HandleRPC(stream.Context(), begin)
868 defer func() { 1076 defer func() {
869 end := &stats.End{ 1077 end := &stats.End{
870 EndTime: time.Now(), 1078 BeginTime: beginTime,
1079 EndTime: time.Now(),
871 } 1080 }
872 if err != nil && err != io.EOF { 1081 if err != nil && err != io.EOF {
873 end.Error = toRPCErr(err) 1082 end.Error = toRPCErr(err)
@@ -875,24 +1084,70 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
875 sh.HandleRPC(stream.Context(), end) 1084 sh.HandleRPC(stream.Context(), end)
876 }() 1085 }()
877 } 1086 }
878 if s.opts.cp != nil { 1087 ctx := NewContextWithServerTransportStream(stream.Context(), stream)
879 stream.SetSendCompress(s.opts.cp.Type())
880 }
881 ss := &serverStream{ 1088 ss := &serverStream{
882 t: t, 1089 ctx: ctx,
883 s: stream, 1090 t: t,
884 p: &parser{r: stream}, 1091 s: stream,
885 codec: s.opts.codec, 1092 p: &parser{r: stream},
886 cp: s.opts.cp, 1093 codec: s.getCodec(stream.ContentSubtype()),
887 dc: s.opts.dc,
888 maxReceiveMessageSize: s.opts.maxReceiveMessageSize, 1094 maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
889 maxSendMessageSize: s.opts.maxSendMessageSize, 1095 maxSendMessageSize: s.opts.maxSendMessageSize,
890 trInfo: trInfo, 1096 trInfo: trInfo,
891 statsHandler: sh, 1097 statsHandler: sh,
892 } 1098 }
893 if ss.cp != nil { 1099
894 ss.cbuf = new(bytes.Buffer) 1100 ss.binlog = binarylog.GetMethodLogger(stream.Method())
1101 if ss.binlog != nil {
1102 md, _ := metadata.FromIncomingContext(ctx)
1103 logEntry := &binarylog.ClientHeader{
1104 Header: md,
1105 MethodName: stream.Method(),
1106 PeerAddr: nil,
1107 }
1108 if deadline, ok := ctx.Deadline(); ok {
1109 logEntry.Timeout = deadline.Sub(time.Now())
1110 if logEntry.Timeout < 0 {
1111 logEntry.Timeout = 0
1112 }
1113 }
1114 if a := md[":authority"]; len(a) > 0 {
1115 logEntry.Authority = a[0]
1116 }
1117 if peer, ok := peer.FromContext(ss.Context()); ok {
1118 logEntry.PeerAddr = peer.Addr
1119 }
1120 ss.binlog.Log(logEntry)
1121 }
1122
1123 // If dc is set and matches the stream's compression, use it. Otherwise, try
1124 // to find a matching registered compressor for decomp.
1125 if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
1126 ss.dc = s.opts.dc
1127 } else if rc != "" && rc != encoding.Identity {
1128 ss.decomp = encoding.GetCompressor(rc)
1129 if ss.decomp == nil {
1130 st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
1131 t.WriteStatus(ss.s, st)
1132 return st.Err()
1133 }
1134 }
1135
1136 // If cp is set, use it. Otherwise, attempt to compress the response using
1137 // the incoming message compression method.
1138 //
1139 // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
1140 if s.opts.cp != nil {
1141 ss.cp = s.opts.cp
1142 stream.SetSendCompress(s.opts.cp.Type())
1143 } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
1144 // Legacy compressor not specified; attempt to respond with same encoding.
1145 ss.comp = encoding.GetCompressor(rc)
1146 if ss.comp != nil {
1147 stream.SetSendCompress(rc)
1148 }
895 } 1149 }
1150
896 if trInfo != nil { 1151 if trInfo != nil {
897 trInfo.tr.LazyLog(&trInfo.firstLine, false) 1152 trInfo.tr.LazyLog(&trInfo.firstLine, false)
898 defer func() { 1153 defer func() {
@@ -924,12 +1179,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
924 if appErr != nil { 1179 if appErr != nil {
925 appStatus, ok := status.FromError(appErr) 1180 appStatus, ok := status.FromError(appErr)
926 if !ok { 1181 if !ok {
927 switch err := appErr.(type) { 1182 appStatus = status.New(codes.Unknown, appErr.Error())
928 case transport.StreamError:
929 appStatus = status.New(err.Code, err.Desc)
930 default:
931 appStatus = status.New(convertCode(appErr), appErr.Error())
932 }
933 appErr = appStatus.Err() 1183 appErr = appStatus.Err()
934 } 1184 }
935 if trInfo != nil { 1185 if trInfo != nil {
@@ -939,6 +1189,12 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
939 ss.mu.Unlock() 1189 ss.mu.Unlock()
940 } 1190 }
941 t.WriteStatus(ss.s, appStatus) 1191 t.WriteStatus(ss.s, appStatus)
1192 if ss.binlog != nil {
1193 ss.binlog.Log(&binarylog.ServerTrailer{
1194 Trailer: ss.s.Trailer(),
1195 Err: appErr,
1196 })
1197 }
942 // TODO: Should we log an error from WriteStatus here and below? 1198 // TODO: Should we log an error from WriteStatus here and below?
943 return appErr 1199 return appErr
944 } 1200 }
@@ -947,8 +1203,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
947 ss.trInfo.tr.LazyLog(stringer("OK"), false) 1203 ss.trInfo.tr.LazyLog(stringer("OK"), false)
948 ss.mu.Unlock() 1204 ss.mu.Unlock()
949 } 1205 }
950 return t.WriteStatus(ss.s, status.New(codes.OK, "")) 1206 err = t.WriteStatus(ss.s, status.New(codes.OK, ""))
951 1207 if ss.binlog != nil {
1208 ss.binlog.Log(&binarylog.ServerTrailer{
1209 Trailer: ss.s.Trailer(),
1210 Err: appErr,
1211 })
1212 }
1213 return err
952} 1214}
953 1215
954func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { 1216func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
@@ -977,47 +1239,27 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
977 } 1239 }
978 service := sm[:pos] 1240 service := sm[:pos]
979 method := sm[pos+1:] 1241 method := sm[pos+1:]
980 srv, ok := s.m[service] 1242
981 if !ok { 1243 if srv, ok := s.m[service]; ok {
982 if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { 1244 if md, ok := srv.md[method]; ok {
983 s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) 1245 s.processUnaryRPC(t, stream, srv, md, trInfo)
984 return 1246 return
985 } 1247 }
986 if trInfo != nil { 1248 if sd, ok := srv.sd[method]; ok {
987 trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) 1249 s.processStreamingRPC(t, stream, srv, sd, trInfo)
988 trInfo.tr.SetError() 1250 return
989 }
990 errDesc := fmt.Sprintf("unknown service %v", service)
991 if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
992 if trInfo != nil {
993 trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
994 trInfo.tr.SetError()
995 }
996 grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
997 }
998 if trInfo != nil {
999 trInfo.tr.Finish()
1000 } 1251 }
1001 return
1002 }
1003 // Unary RPC or Streaming RPC?
1004 if md, ok := srv.md[method]; ok {
1005 s.processUnaryRPC(t, stream, srv, md, trInfo)
1006 return
1007 } 1252 }
1008 if sd, ok := srv.sd[method]; ok { 1253 // Unknown service, or known server unknown method.
1009 s.processStreamingRPC(t, stream, srv, sd, trInfo) 1254 if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
1255 s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
1010 return 1256 return
1011 } 1257 }
1012 if trInfo != nil { 1258 if trInfo != nil {
1013 trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) 1259 trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
1014 trInfo.tr.SetError() 1260 trInfo.tr.SetError()
1015 } 1261 }
1016 if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { 1262 errDesc := fmt.Sprintf("unknown service %v", service)
1017 s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
1018 return
1019 }
1020 errDesc := fmt.Sprintf("unknown method %v", method)
1021 if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { 1263 if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
1022 if trInfo != nil { 1264 if trInfo != nil {
1023 trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) 1265 trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
@@ -1030,12 +1272,65 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
1030 } 1272 }
1031} 1273}
1032 1274
1275// The key to save ServerTransportStream in the context.
1276type streamKey struct{}
1277
1278// NewContextWithServerTransportStream creates a new context from ctx and
1279// attaches stream to it.
1280//
1281// This API is EXPERIMENTAL.
1282func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
1283 return context.WithValue(ctx, streamKey{}, stream)
1284}
1285
1286// ServerTransportStream is a minimal interface that a transport stream must
1287// implement. This can be used to mock an actual transport stream for tests of
1288// handler code that use, for example, grpc.SetHeader (which requires some
1289// stream to be in context).
1290//
1291// See also NewContextWithServerTransportStream.
1292//
1293// This API is EXPERIMENTAL.
1294type ServerTransportStream interface {
1295 Method() string
1296 SetHeader(md metadata.MD) error
1297 SendHeader(md metadata.MD) error
1298 SetTrailer(md metadata.MD) error
1299}
1300
1301// ServerTransportStreamFromContext returns the ServerTransportStream saved in
1302// ctx. Returns nil if the given context has no stream associated with it
1303// (which implies it is not an RPC invocation context).
1304//
1305// This API is EXPERIMENTAL.
1306func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
1307 s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
1308 return s
1309}
1310
1033// Stop stops the gRPC server. It immediately closes all open 1311// Stop stops the gRPC server. It immediately closes all open
1034// connections and listeners. 1312// connections and listeners.
1035// It cancels all active RPCs on the server side and the corresponding 1313// It cancels all active RPCs on the server side and the corresponding
1036// pending RPCs on the client side will get notified by connection 1314// pending RPCs on the client side will get notified by connection
1037// errors. 1315// errors.
1038func (s *Server) Stop() { 1316func (s *Server) Stop() {
1317 s.quitOnce.Do(func() {
1318 close(s.quit)
1319 })
1320
1321 defer func() {
1322 s.serveWG.Wait()
1323 s.doneOnce.Do(func() {
1324 close(s.done)
1325 })
1326 }()
1327
1328 s.channelzRemoveOnce.Do(func() {
1329 if channelz.IsOn() {
1330 channelz.RemoveEntry(s.channelzID)
1331 }
1332 })
1333
1039 s.mu.Lock() 1334 s.mu.Lock()
1040 listeners := s.lis 1335 listeners := s.lis
1041 s.lis = nil 1336 s.lis = nil
@@ -1053,7 +1348,6 @@ func (s *Server) Stop() {
1053 } 1348 }
1054 1349
1055 s.mu.Lock() 1350 s.mu.Lock()
1056 s.cancel()
1057 if s.events != nil { 1351 if s.events != nil {
1058 s.events.Finish() 1352 s.events.Finish()
1059 s.events = nil 1353 s.events = nil
@@ -1065,22 +1359,44 @@ func (s *Server) Stop() {
1065// accepting new connections and RPCs and blocks until all the pending RPCs are 1359// accepting new connections and RPCs and blocks until all the pending RPCs are
1066// finished. 1360// finished.
1067func (s *Server) GracefulStop() { 1361func (s *Server) GracefulStop() {
1362 s.quitOnce.Do(func() {
1363 close(s.quit)
1364 })
1365
1366 defer func() {
1367 s.doneOnce.Do(func() {
1368 close(s.done)
1369 })
1370 }()
1371
1372 s.channelzRemoveOnce.Do(func() {
1373 if channelz.IsOn() {
1374 channelz.RemoveEntry(s.channelzID)
1375 }
1376 })
1068 s.mu.Lock() 1377 s.mu.Lock()
1069 defer s.mu.Unlock()
1070 if s.conns == nil { 1378 if s.conns == nil {
1379 s.mu.Unlock()
1071 return 1380 return
1072 } 1381 }
1382
1073 for lis := range s.lis { 1383 for lis := range s.lis {
1074 lis.Close() 1384 lis.Close()
1075 } 1385 }
1076 s.lis = nil 1386 s.lis = nil
1077 s.cancel()
1078 if !s.drain { 1387 if !s.drain {
1079 for c := range s.conns { 1388 for c := range s.conns {
1080 c.(transport.ServerTransport).Drain() 1389 c.(transport.ServerTransport).Drain()
1081 } 1390 }
1082 s.drain = true 1391 s.drain = true
1083 } 1392 }
1393
1394 // Wait for serving threads to be ready to exit. Only then can we be sure no
1395 // new conns will be created.
1396 s.mu.Unlock()
1397 s.serveWG.Wait()
1398 s.mu.Lock()
1399
1084 for len(s.conns) != 0 { 1400 for len(s.conns) != 0 {
1085 s.cv.Wait() 1401 s.cv.Wait()
1086 } 1402 }
@@ -1089,26 +1405,23 @@ func (s *Server) GracefulStop() {
1089 s.events.Finish() 1405 s.events.Finish()
1090 s.events = nil 1406 s.events = nil
1091 } 1407 }
1408 s.mu.Unlock()
1092} 1409}
1093 1410
1094func init() { 1411// contentSubtype must be lowercase
1095 internal.TestingCloseConns = func(arg interface{}) { 1412// cannot return nil
1096 arg.(*Server).testingCloseConns() 1413func (s *Server) getCodec(contentSubtype string) baseCodec {
1414 if s.opts.codec != nil {
1415 return s.opts.codec
1097 } 1416 }
1098 internal.TestingUseHandlerImpl = func(arg interface{}) { 1417 if contentSubtype == "" {
1099 arg.(*Server).opts.useHandlerImpl = true 1418 return encoding.GetCodec(proto.Name)
1100 } 1419 }
1101} 1420 codec := encoding.GetCodec(contentSubtype)
1102 1421 if codec == nil {
1103// testingCloseConns closes all existing transports but keeps s.lis 1422 return encoding.GetCodec(proto.Name)
1104// accepting new connections.
1105func (s *Server) testingCloseConns() {
1106 s.mu.Lock()
1107 for c := range s.conns {
1108 c.Close()
1109 delete(s.conns, c)
1110 } 1423 }
1111 s.mu.Unlock() 1424 return codec
1112} 1425}
1113 1426
1114// SetHeader sets the header metadata. 1427// SetHeader sets the header metadata.
@@ -1121,9 +1434,9 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
1121 if md.Len() == 0 { 1434 if md.Len() == 0 {
1122 return nil 1435 return nil
1123 } 1436 }
1124 stream, ok := transport.StreamFromContext(ctx) 1437 stream := ServerTransportStreamFromContext(ctx)
1125 if !ok { 1438 if stream == nil {
1126 return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) 1439 return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
1127 } 1440 }
1128 return stream.SetHeader(md) 1441 return stream.SetHeader(md)
1129} 1442}
@@ -1131,15 +1444,11 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
1131// SendHeader sends header metadata. It may be called at most once. 1444// SendHeader sends header metadata. It may be called at most once.
1132// The provided md and headers set by SetHeader() will be sent. 1445// The provided md and headers set by SetHeader() will be sent.
1133func SendHeader(ctx context.Context, md metadata.MD) error { 1446func SendHeader(ctx context.Context, md metadata.MD) error {
1134 stream, ok := transport.StreamFromContext(ctx) 1447 stream := ServerTransportStreamFromContext(ctx)
1135 if !ok { 1448 if stream == nil {
1136 return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) 1449 return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
1137 } 1450 }
1138 t := stream.ServerTransport() 1451 if err := stream.SendHeader(md); err != nil {
1139 if t == nil {
1140 grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream)
1141 }
1142 if err := t.WriteHeader(stream, md); err != nil {
1143 return toRPCErr(err) 1452 return toRPCErr(err)
1144 } 1453 }
1145 return nil 1454 return nil
@@ -1151,9 +1460,27 @@ func SetTrailer(ctx context.Context, md metadata.MD) error {
1151 if md.Len() == 0 { 1460 if md.Len() == 0 {
1152 return nil 1461 return nil
1153 } 1462 }
1154 stream, ok := transport.StreamFromContext(ctx) 1463 stream := ServerTransportStreamFromContext(ctx)
1155 if !ok { 1464 if stream == nil {
1156 return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) 1465 return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
1157 } 1466 }
1158 return stream.SetTrailer(md) 1467 return stream.SetTrailer(md)
1159} 1468}
1469
1470// Method returns the method string for the server context. The returned
1471// string is in the format of "/service/method".
1472func Method(ctx context.Context) (string, bool) {
1473 s := ServerTransportStreamFromContext(ctx)
1474 if s == nil {
1475 return "", false
1476 }
1477 return s.Method(), true
1478}
1479
1480type channelzServer struct {
1481 s *Server
1482}
1483
1484func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
1485 return c.s.channelzMetric()
1486}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
new file mode 100644
index 0000000..162857e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -0,0 +1,372 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package grpc
20
21import (
22 "encoding/json"
23 "fmt"
24 "strconv"
25 "strings"
26 "time"
27
28 "google.golang.org/grpc/codes"
29 "google.golang.org/grpc/grpclog"
30)
31
32const maxInt = int(^uint(0) >> 1)
33
34// MethodConfig defines the configuration recommended by the service providers for a
35// particular method.
36//
37// Deprecated: Users should not use this struct. Service config should be received
38// through name resolver, as specified here
39// https://github.com/grpc/grpc/blob/master/doc/service_config.md
40type MethodConfig struct {
41 // WaitForReady indicates whether RPCs sent to this method should wait until
42 // the connection is ready by default (!failfast). The value specified via the
43 // gRPC client API will override the value set here.
44 WaitForReady *bool
45 // Timeout is the default timeout for RPCs sent to this method. The actual
46 // deadline used will be the minimum of the value specified here and the value
47 // set by the application via the gRPC client API. If either one is not set,
48 // then the other will be used. If neither is set, then the RPC has no deadline.
49 Timeout *time.Duration
50 // MaxReqSize is the maximum allowed payload size for an individual request in a
51 // stream (client->server) in bytes. The size which is measured is the serialized
52 // payload after per-message compression (but before stream compression) in bytes.
53 // The actual value used is the minimum of the value specified here and the value set
54 // by the application via the gRPC client API. If either one is not set, then the other
55 // will be used. If neither is set, then the built-in default is used.
56 MaxReqSize *int
57 // MaxRespSize is the maximum allowed payload size for an individual response in a
58 // stream (server->client) in bytes.
59 MaxRespSize *int
60 // RetryPolicy configures retry options for the method.
61 retryPolicy *retryPolicy
62}
63
64// ServiceConfig is provided by the service provider and contains parameters for how
65// clients that connect to the service should behave.
66//
67// Deprecated: Users should not use this struct. Service config should be received
68// through name resolver, as specified here
69// https://github.com/grpc/grpc/blob/master/doc/service_config.md
70type ServiceConfig struct {
71 // LB is the load balancer the service providers recommends. The balancer specified
72 // via grpc.WithBalancer will override this.
73 LB *string
74
75 // Methods contains a map for the methods in this service. If there is an
76 // exact match for a method (i.e. /service/method) in the map, use the
77 // corresponding MethodConfig. If there's no exact match, look for the
78 // default config for the service (/service/) and use the corresponding
79 // MethodConfig if it exists. Otherwise, the method has no MethodConfig to
80 // use.
81 Methods map[string]MethodConfig
82
83 // If a retryThrottlingPolicy is provided, gRPC will automatically throttle
84 // retry attempts and hedged RPCs when the client’s ratio of failures to
85 // successes exceeds a threshold.
86 //
87 // For each server name, the gRPC client will maintain a token_count which is
88 // initially set to maxTokens, and can take values between 0 and maxTokens.
89 //
90 // Every outgoing RPC (regardless of service or method invoked) will change
91 // token_count as follows:
92 //
93 // - Every failed RPC will decrement the token_count by 1.
94 // - Every successful RPC will increment the token_count by tokenRatio.
95 //
96 // If token_count is less than or equal to maxTokens / 2, then RPCs will not
97 // be retried and hedged RPCs will not be sent.
98 retryThrottling *retryThrottlingPolicy
99 // healthCheckConfig must be set as one of the requirement to enable LB channel
100 // health check.
101 healthCheckConfig *healthCheckConfig
102}
103
104// healthCheckConfig defines the go-native version of the LB channel health check config.
105type healthCheckConfig struct {
106 // serviceName is the service name to use in the health-checking request.
107 ServiceName string
108}
109
110// retryPolicy defines the go-native version of the retry policy defined by the
111// service config here:
112// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
113type retryPolicy struct {
114 // MaxAttempts is the maximum number of attempts, including the original RPC.
115 //
116 // This field is required and must be two or greater.
117 maxAttempts int
118
119 // Exponential backoff parameters. The initial retry attempt will occur at
120 // random(0, initialBackoffMS). In general, the nth attempt will occur at
121 // random(0,
122 // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
123 //
124 // These fields are required and must be greater than zero.
125 initialBackoff time.Duration
126 maxBackoff time.Duration
127 backoffMultiplier float64
128
129 // The set of status codes which may be retried.
130 //
131 // Status codes are specified as strings, e.g., "UNAVAILABLE".
132 //
133 // This field is required and must be non-empty.
134 // Note: a set is used to store this for easy lookup.
135 retryableStatusCodes map[codes.Code]bool
136}
137
138type jsonRetryPolicy struct {
139 MaxAttempts int
140 InitialBackoff string
141 MaxBackoff string
142 BackoffMultiplier float64
143 RetryableStatusCodes []codes.Code
144}
145
146// retryThrottlingPolicy defines the go-native version of the retry throttling
147// policy defined by the service config here:
148// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
149type retryThrottlingPolicy struct {
150 // The number of tokens starts at maxTokens. The token_count will always be
151 // between 0 and maxTokens.
152 //
153 // This field is required and must be greater than zero.
154 MaxTokens float64
155 // The amount of tokens to add on each successful RPC. Typically this will
156 // be some number between 0 and 1, e.g., 0.1.
157 //
158 // This field is required and must be greater than zero. Up to 3 decimal
159 // places are supported.
160 TokenRatio float64
161}
162
163func parseDuration(s *string) (*time.Duration, error) {
164 if s == nil {
165 return nil, nil
166 }
167 if !strings.HasSuffix(*s, "s") {
168 return nil, fmt.Errorf("malformed duration %q", *s)
169 }
170 ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
171 if len(ss) > 2 {
172 return nil, fmt.Errorf("malformed duration %q", *s)
173 }
174 // hasDigits is set if either the whole or fractional part of the number is
175 // present, since both are optional but one is required.
176 hasDigits := false
177 var d time.Duration
178 if len(ss[0]) > 0 {
179 i, err := strconv.ParseInt(ss[0], 10, 32)
180 if err != nil {
181 return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
182 }
183 d = time.Duration(i) * time.Second
184 hasDigits = true
185 }
186 if len(ss) == 2 && len(ss[1]) > 0 {
187 if len(ss[1]) > 9 {
188 return nil, fmt.Errorf("malformed duration %q", *s)
189 }
190 f, err := strconv.ParseInt(ss[1], 10, 64)
191 if err != nil {
192 return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
193 }
194 for i := 9; i > len(ss[1]); i-- {
195 f *= 10
196 }
197 d += time.Duration(f)
198 hasDigits = true
199 }
200 if !hasDigits {
201 return nil, fmt.Errorf("malformed duration %q", *s)
202 }
203
204 return &d, nil
205}
206
207type jsonName struct {
208 Service *string
209 Method *string
210}
211
212func (j jsonName) generatePath() (string, bool) {
213 if j.Service == nil {
214 return "", false
215 }
216 res := "/" + *j.Service + "/"
217 if j.Method != nil {
218 res += *j.Method
219 }
220 return res, true
221}
222
223// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
224type jsonMC struct {
225 Name *[]jsonName
226 WaitForReady *bool
227 Timeout *string
228 MaxRequestMessageBytes *int64
229 MaxResponseMessageBytes *int64
230 RetryPolicy *jsonRetryPolicy
231}
232
233// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
234type jsonSC struct {
235 LoadBalancingPolicy *string
236 MethodConfig *[]jsonMC
237 RetryThrottling *retryThrottlingPolicy
238 HealthCheckConfig *healthCheckConfig
239}
240
241func parseServiceConfig(js string) (ServiceConfig, error) {
242 if len(js) == 0 {
243 return ServiceConfig{}, fmt.Errorf("no JSON service config provided")
244 }
245 var rsc jsonSC
246 err := json.Unmarshal([]byte(js), &rsc)
247 if err != nil {
248 grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
249 return ServiceConfig{}, err
250 }
251 sc := ServiceConfig{
252 LB: rsc.LoadBalancingPolicy,
253 Methods: make(map[string]MethodConfig),
254 retryThrottling: rsc.RetryThrottling,
255 healthCheckConfig: rsc.HealthCheckConfig,
256 }
257 if rsc.MethodConfig == nil {
258 return sc, nil
259 }
260
261 for _, m := range *rsc.MethodConfig {
262 if m.Name == nil {
263 continue
264 }
265 d, err := parseDuration(m.Timeout)
266 if err != nil {
267 grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
268 return ServiceConfig{}, err
269 }
270
271 mc := MethodConfig{
272 WaitForReady: m.WaitForReady,
273 Timeout: d,
274 }
275 if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
276 grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
277 return ServiceConfig{}, err
278 }
279 if m.MaxRequestMessageBytes != nil {
280 if *m.MaxRequestMessageBytes > int64(maxInt) {
281 mc.MaxReqSize = newInt(maxInt)
282 } else {
283 mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes))
284 }
285 }
286 if m.MaxResponseMessageBytes != nil {
287 if *m.MaxResponseMessageBytes > int64(maxInt) {
288 mc.MaxRespSize = newInt(maxInt)
289 } else {
290 mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
291 }
292 }
293 for _, n := range *m.Name {
294 if path, valid := n.generatePath(); valid {
295 sc.Methods[path] = mc
296 }
297 }
298 }
299
300 if sc.retryThrottling != nil {
301 if sc.retryThrottling.MaxTokens <= 0 ||
302 sc.retryThrottling.MaxTokens >= 1000 ||
303 sc.retryThrottling.TokenRatio <= 0 {
304 // Illegal throttling config; disable throttling.
305 sc.retryThrottling = nil
306 }
307 }
308 return sc, nil
309}
310
311func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
312 if jrp == nil {
313 return nil, nil
314 }
315 ib, err := parseDuration(&jrp.InitialBackoff)
316 if err != nil {
317 return nil, err
318 }
319 mb, err := parseDuration(&jrp.MaxBackoff)
320 if err != nil {
321 return nil, err
322 }
323
324 if jrp.MaxAttempts <= 1 ||
325 *ib <= 0 ||
326 *mb <= 0 ||
327 jrp.BackoffMultiplier <= 0 ||
328 len(jrp.RetryableStatusCodes) == 0 {
329 grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
330 return nil, nil
331 }
332
333 rp := &retryPolicy{
334 maxAttempts: jrp.MaxAttempts,
335 initialBackoff: *ib,
336 maxBackoff: *mb,
337 backoffMultiplier: jrp.BackoffMultiplier,
338 retryableStatusCodes: make(map[codes.Code]bool),
339 }
340 if rp.maxAttempts > 5 {
341 // TODO(retry): Make the max maxAttempts configurable.
342 rp.maxAttempts = 5
343 }
344 for _, code := range jrp.RetryableStatusCodes {
345 rp.retryableStatusCodes[code] = true
346 }
347 return rp, nil
348}
349
350func min(a, b *int) *int {
351 if *a < *b {
352 return a
353 }
354 return b
355}
356
357func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
358 if mcMax == nil && doptMax == nil {
359 return &defaultVal
360 }
361 if mcMax != nil && doptMax != nil {
362 return min(mcMax, doptMax)
363 }
364 if mcMax != nil {
365 return mcMax
366 }
367 return doptMax
368}
369
370func newInt(b int) *int {
371 return &b
372}
diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go
index 05b384c..dc03731 100644
--- a/vendor/google.golang.org/grpc/stats/handlers.go
+++ b/vendor/google.golang.org/grpc/stats/handlers.go
@@ -19,9 +19,8 @@
19package stats 19package stats
20 20
21import ( 21import (
22 "context"
22 "net" 23 "net"
23
24 "golang.org/x/net/context"
25) 24)
26 25
27// ConnTagInfo defines the relevant information needed by connection context tagger. 26// ConnTagInfo defines the relevant information needed by connection context tagger.
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index 338a3a7..84f77da 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -16,12 +16,15 @@
16 * 16 *
17 */ 17 */
18 18
19//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto
20
19// Package stats is for collecting and reporting various network and RPC stats. 21// Package stats is for collecting and reporting various network and RPC stats.
20// This package is for monitoring purpose only. All fields are read-only. 22// This package is for monitoring purpose only. All fields are read-only.
21// All APIs are experimental. 23// All APIs are experimental.
22package stats // import "google.golang.org/grpc/stats" 24package stats // import "google.golang.org/grpc/stats"
23 25
24import ( 26import (
27 "context"
25 "net" 28 "net"
26 "time" 29 "time"
27) 30)
@@ -131,8 +134,6 @@ func (s *OutPayload) isRPCStats() {}
131type OutHeader struct { 134type OutHeader struct {
132 // Client is true if this OutHeader is from client side. 135 // Client is true if this OutHeader is from client side.
133 Client bool 136 Client bool
134 // WireLength is the wire length of header.
135 WireLength int
136 137
137 // The following fields are valid only if Client is true. 138 // The following fields are valid only if Client is true.
138 // FullMethod is the full RPC method string, i.e., /package.service/method. 139 // FullMethod is the full RPC method string, i.e., /package.service/method.
@@ -167,6 +168,8 @@ func (s *OutTrailer) isRPCStats() {}
167type End struct { 168type End struct {
168 // Client is true if this End is from client side. 169 // Client is true if this End is from client side.
169 Client bool 170 Client bool
171 // BeginTime is the time when the RPC began.
172 BeginTime time.Time
170 // EndTime is the time when the RPC ends. 173 // EndTime is the time when the RPC ends.
171 EndTime time.Time 174 EndTime time.Time
172 // Error is the error the RPC ended with. It is an error generated from 175 // Error is the error the RPC ended with. It is an error generated from
@@ -208,3 +211,85 @@ type ConnEnd struct {
208func (s *ConnEnd) IsClient() bool { return s.Client } 211func (s *ConnEnd) IsClient() bool { return s.Client }
209 212
210func (s *ConnEnd) isConnStats() {} 213func (s *ConnEnd) isConnStats() {}
214
215type incomingTagsKey struct{}
216type outgoingTagsKey struct{}
217
218// SetTags attaches stats tagging data to the context, which will be sent in
219// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to
220// SetTags will overwrite the values from earlier calls.
221//
222// NOTE: this is provided only for backward compatibility with existing clients
223// and will likely be removed in an upcoming release. New uses should transmit
224// this type of data using metadata with a different, non-reserved (i.e. does
225// not begin with "grpc-") header name.
226func SetTags(ctx context.Context, b []byte) context.Context {
227 return context.WithValue(ctx, outgoingTagsKey{}, b)
228}
229
230// Tags returns the tags from the context for the inbound RPC.
231//
232// NOTE: this is provided only for backward compatibility with existing clients
233// and will likely be removed in an upcoming release. New uses should transmit
234// this type of data using metadata with a different, non-reserved (i.e. does
235// not begin with "grpc-") header name.
236func Tags(ctx context.Context) []byte {
237 b, _ := ctx.Value(incomingTagsKey{}).([]byte)
238 return b
239}
240
241// SetIncomingTags attaches stats tagging data to the context, to be read by
242// the application (not sent in outgoing RPCs).
243//
244// This is intended for gRPC-internal use ONLY.
245func SetIncomingTags(ctx context.Context, b []byte) context.Context {
246 return context.WithValue(ctx, incomingTagsKey{}, b)
247}
248
249// OutgoingTags returns the tags from the context for the outbound RPC.
250//
251// This is intended for gRPC-internal use ONLY.
252func OutgoingTags(ctx context.Context) []byte {
253 b, _ := ctx.Value(outgoingTagsKey{}).([]byte)
254 return b
255}
256
257type incomingTraceKey struct{}
258type outgoingTraceKey struct{}
259
260// SetTrace attaches stats tagging data to the context, which will be sent in
261// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to
262// SetTrace will overwrite the values from earlier calls.
263//
264// NOTE: this is provided only for backward compatibility with existing clients
265// and will likely be removed in an upcoming release. New uses should transmit
266// this type of data using metadata with a different, non-reserved (i.e. does
267// not begin with "grpc-") header name.
268func SetTrace(ctx context.Context, b []byte) context.Context {
269 return context.WithValue(ctx, outgoingTraceKey{}, b)
270}
271
272// Trace returns the trace from the context for the inbound RPC.
273//
274// NOTE: this is provided only for backward compatibility with existing clients
275// and will likely be removed in an upcoming release. New uses should transmit
276// this type of data using metadata with a different, non-reserved (i.e. does
277// not begin with "grpc-") header name.
278func Trace(ctx context.Context) []byte {
279 b, _ := ctx.Value(incomingTraceKey{}).([]byte)
280 return b
281}
282
283// SetIncomingTrace attaches stats tagging data to the context, to be read by
284// the application (not sent in outgoing RPCs). It is intended for
285// gRPC-internal use.
286func SetIncomingTrace(ctx context.Context, b []byte) context.Context {
287 return context.WithValue(ctx, incomingTraceKey{}, b)
288}
289
290// OutgoingTrace returns the trace from the context for the outbound RPC. It is
291// intended for gRPC-internal use.
292func OutgoingTrace(ctx context.Context) []byte {
293 b, _ := ctx.Value(outgoingTraceKey{}).([]byte)
294 return b
295}
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index 871dc4b..ed36681 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -28,6 +28,7 @@
28package status 28package status
29 29
30import ( 30import (
31 "context"
31 "errors" 32 "errors"
32 "fmt" 33 "fmt"
33 34
@@ -46,7 +47,7 @@ func (se *statusError) Error() string {
46 return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) 47 return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
47} 48}
48 49
49func (se *statusError) status() *Status { 50func (se *statusError) GRPCStatus() *Status {
50 return &Status{s: (*spb.Status)(se)} 51 return &Status{s: (*spb.Status)(se)}
51} 52}
52 53
@@ -120,15 +121,25 @@ func FromProto(s *spb.Status) *Status {
120} 121}
121 122
122// FromError returns a Status representing err if it was produced from this 123// FromError returns a Status representing err if it was produced from this
123// package, otherwise it returns nil, false. 124// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
125// Status is returned with codes.Unknown and the original error message.
124func FromError(err error) (s *Status, ok bool) { 126func FromError(err error) (s *Status, ok bool) {
125 if err == nil { 127 if err == nil {
126 return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true 128 return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
127 } 129 }
128 if s, ok := err.(*statusError); ok { 130 if se, ok := err.(interface {
129 return s.status(), true 131 GRPCStatus() *Status
132 }); ok {
133 return se.GRPCStatus(), true
130 } 134 }
131 return nil, false 135 return New(codes.Unknown, err.Error()), false
136}
137
138// Convert is a convenience function which removes the need to handle the
139// boolean return value from FromError.
140func Convert(err error) *Status {
141 s, _ := FromError(err)
142 return s
132} 143}
133 144
134// WithDetails returns a new status with the provided details messages appended to the status. 145// WithDetails returns a new status with the provided details messages appended to the status.
@@ -166,3 +177,34 @@ func (s *Status) Details() []interface{} {
166 } 177 }
167 return details 178 return details
168} 179}
180
181// Code returns the Code of the error if it is a Status error, codes.OK if err
182// is nil, or codes.Unknown otherwise.
183func Code(err error) codes.Code {
184 // Don't use FromError to avoid allocation of OK status.
185 if err == nil {
186 return codes.OK
187 }
188 if se, ok := err.(interface {
189 GRPCStatus() *Status
190 }); ok {
191 return se.GRPCStatus().Code()
192 }
193 return codes.Unknown
194}
195
196// FromContextError converts a context error into a Status. It returns a
197// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
198// non-nil and not a context error.
199func FromContextError(err error) *Status {
200 switch err {
201 case nil:
202 return New(codes.OK, "")
203 case context.DeadlineExceeded:
204 return New(codes.DeadlineExceeded, err.Error())
205 case context.Canceled:
206 return New(codes.Canceled, err.Error())
207 default:
208 return New(codes.Unknown, err.Error())
209 }
210}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 1c621ba..d06279a 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -19,24 +19,35 @@
19package grpc 19package grpc
20 20
21import ( 21import (
22 "bytes" 22 "context"
23 "errors" 23 "errors"
24 "io" 24 "io"
25 "math"
26 "strconv"
25 "sync" 27 "sync"
26 "time" 28 "time"
27 29
28 "golang.org/x/net/context"
29 "golang.org/x/net/trace" 30 "golang.org/x/net/trace"
31 "google.golang.org/grpc/balancer"
30 "google.golang.org/grpc/codes" 32 "google.golang.org/grpc/codes"
33 "google.golang.org/grpc/connectivity"
34 "google.golang.org/grpc/encoding"
35 "google.golang.org/grpc/grpclog"
36 "google.golang.org/grpc/internal/binarylog"
37 "google.golang.org/grpc/internal/channelz"
38 "google.golang.org/grpc/internal/grpcrand"
39 "google.golang.org/grpc/internal/transport"
31 "google.golang.org/grpc/metadata" 40 "google.golang.org/grpc/metadata"
32 "google.golang.org/grpc/peer" 41 "google.golang.org/grpc/peer"
33 "google.golang.org/grpc/stats" 42 "google.golang.org/grpc/stats"
34 "google.golang.org/grpc/status" 43 "google.golang.org/grpc/status"
35 "google.golang.org/grpc/transport"
36) 44)
37 45
38// StreamHandler defines the handler called by gRPC server to complete the 46// StreamHandler defines the handler called by gRPC server to complete the
39// execution of a streaming RPC. 47// execution of a streaming RPC. If a StreamHandler returns an error, it
48// should be produced by the status package, or else gRPC will use
49// codes.Unknown as the status code and err.Error() as the status message
50// of the RPC.
40type StreamHandler func(srv interface{}, stream ServerStream) error 51type StreamHandler func(srv interface{}, stream ServerStream) error
41 52
42// StreamDesc represents a streaming RPC service's method specification. 53// StreamDesc represents a streaming RPC service's method specification.
@@ -50,30 +61,21 @@ type StreamDesc struct {
50} 61}
51 62
52// Stream defines the common interface a client or server stream has to satisfy. 63// Stream defines the common interface a client or server stream has to satisfy.
64//
65// Deprecated: See ClientStream and ServerStream documentation instead.
53type Stream interface { 66type Stream interface {
54 // Context returns the context for this stream. 67 // Deprecated: See ClientStream and ServerStream documentation instead.
55 Context() context.Context 68 Context() context.Context
56 // SendMsg blocks until it sends m, the stream is done or the stream 69 // Deprecated: See ClientStream and ServerStream documentation instead.
57 // breaks.
58 // On error, it aborts the stream and returns an RPC status on client
59 // side. On server side, it simply returns the error to the caller.
60 // SendMsg is called by generated code. Also Users can call SendMsg
61 // directly when it is really needed in their use cases.
62 // It's safe to have a goroutine calling SendMsg and another goroutine calling
63 // recvMsg on the same stream at the same time.
64 // But it is not safe to call SendMsg on the same stream in different goroutines.
65 SendMsg(m interface{}) error 70 SendMsg(m interface{}) error
66 // RecvMsg blocks until it receives a message or the stream is 71 // Deprecated: See ClientStream and ServerStream documentation instead.
67 // done. On client side, it returns io.EOF when the stream is done. On
68 // any other error, it aborts the stream and returns an RPC status. On
69 // server side, it simply returns the error to the caller.
70 // It's safe to have a goroutine calling SendMsg and another goroutine calling
71 // recvMsg on the same stream at the same time.
72 // But it is not safe to call RecvMsg on the same stream in different goroutines.
73 RecvMsg(m interface{}) error 72 RecvMsg(m interface{}) error
74} 73}
75 74
76// ClientStream defines the interface a client stream has to satisfy. 75// ClientStream defines the client-side behavior of a streaming RPC.
76//
77// All errors returned from ClientStream methods are compatible with the
78// status package.
77type ClientStream interface { 79type ClientStream interface {
78 // Header returns the header metadata received from the server if there 80 // Header returns the header metadata received from the server if there
79 // is any. It blocks if the metadata is not ready to read. 81 // is any. It blocks if the metadata is not ready to read.
@@ -83,62 +85,147 @@ type ClientStream interface {
83 // stream.Recv has returned a non-nil error (including io.EOF). 85 // stream.Recv has returned a non-nil error (including io.EOF).
84 Trailer() metadata.MD 86 Trailer() metadata.MD
85 // CloseSend closes the send direction of the stream. It closes the stream 87 // CloseSend closes the send direction of the stream. It closes the stream
86 // when non-nil error is met. 88 // when non-nil error is met. It is also not safe to call CloseSend
89 // concurrently with SendMsg.
87 CloseSend() error 90 CloseSend() error
88 // Stream.SendMsg() may return a non-nil error when something wrong happens sending 91 // Context returns the context for this stream.
89 // the request. The returned error indicates the status of this sending, not the final 92 //
90 // status of the RPC. 93 // It should not be called until after Header or RecvMsg has returned. Once
91 // Always call Stream.RecvMsg() to get the final status if you care about the status of 94 // called, subsequent client-side retries are disabled.
92 // the RPC. 95 Context() context.Context
93 Stream 96 // SendMsg is generally called by generated code. On error, SendMsg aborts
97 // the stream. If the error was generated by the client, the status is
98 // returned directly; otherwise, io.EOF is returned and the status of
99 // the stream may be discovered using RecvMsg.
100 //
101 // SendMsg blocks until:
102 // - There is sufficient flow control to schedule m with the transport, or
103 // - The stream is done, or
104 // - The stream breaks.
105 //
106 // SendMsg does not wait until the message is received by the server. An
107 // untimely stream closure may result in lost messages. To ensure delivery,
108 // users should ensure the RPC completed successfully using RecvMsg.
109 //
110 // It is safe to have a goroutine calling SendMsg and another goroutine
111 // calling RecvMsg on the same stream at the same time, but it is not safe
112 // to call SendMsg on the same stream in different goroutines. It is also
113 // not safe to call CloseSend concurrently with SendMsg.
114 SendMsg(m interface{}) error
115 // RecvMsg blocks until it receives a message into m or the stream is
116 // done. It returns io.EOF when the stream completes successfully. On
117 // any other error, the stream is aborted and the error contains the RPC
118 // status.
119 //
120 // It is safe to have a goroutine calling SendMsg and another goroutine
121 // calling RecvMsg on the same stream at the same time, but it is not
122 // safe to call RecvMsg on the same stream in different goroutines.
123 RecvMsg(m interface{}) error
94} 124}
95 125
96// NewClientStream creates a new Stream for the client side. This is called 126// NewStream creates a new Stream for the client side. This is typically
97// by generated code. 127// called by generated code. ctx is used for the lifetime of the stream.
98func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { 128//
129// To ensure resources are not leaked due to the stream returned, one of the following
130// actions must be performed:
131//
132// 1. Call Close on the ClientConn.
133// 2. Cancel the context provided.
134// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
135// client-streaming RPC, for instance, might use the helper function
136// CloseAndRecv (note that CloseSend does not Recv, therefore is not
137// guaranteed to release all resources).
138// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
139//
140// If none of the above happen, a goroutine and a context will be leaked, and grpc
141// will not call the optionally-configured stats handler with a stats.End message.
142func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
143 // allow interceptor to see all applicable call options, which means those
144 // configured as defaults from dial option as well as per-call options
145 opts = combine(cc.dopts.callOptions, opts)
146
99 if cc.dopts.streamInt != nil { 147 if cc.dopts.streamInt != nil {
100 return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) 148 return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
101 } 149 }
102 return newClientStream(ctx, desc, cc, method, opts...) 150 return newClientStream(ctx, desc, cc, method, opts...)
103} 151}
104 152
153// NewClientStream is a wrapper for ClientConn.NewStream.
154func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
155 return cc.NewStream(ctx, desc, method, opts...)
156}
157
105func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { 158func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
106 var ( 159 if channelz.IsOn() {
107 t transport.ClientTransport 160 cc.incrCallsStarted()
108 s *transport.Stream 161 defer func() {
109 put func() 162 if err != nil {
110 cancel context.CancelFunc 163 cc.incrCallsFailed()
111 ) 164 }
112 c := defaultCallInfo 165 }()
166 }
167 c := defaultCallInfo()
168 // Provide an opportunity for the first RPC to see the first service config
169 // provided by the resolver.
170 if err := cc.waitForResolvedAddrs(ctx); err != nil {
171 return nil, err
172 }
113 mc := cc.GetMethodConfig(method) 173 mc := cc.GetMethodConfig(method)
114 if mc.WaitForReady != nil { 174 if mc.WaitForReady != nil {
115 c.failFast = !*mc.WaitForReady 175 c.failFast = !*mc.WaitForReady
116 } 176 }
117 177
118 if mc.Timeout != nil { 178 // Possible context leak:
179 // The cancel function for the child context we create will only be called
180 // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
181 // an error is generated by SendMsg.
182 // https://github.com/grpc/grpc-go/issues/1818.
183 var cancel context.CancelFunc
184 if mc.Timeout != nil && *mc.Timeout >= 0 {
119 ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) 185 ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
186 } else {
187 ctx, cancel = context.WithCancel(ctx)
120 } 188 }
189 defer func() {
190 if err != nil {
191 cancel()
192 }
193 }()
121 194
122 opts = append(cc.dopts.callOptions, opts...)
123 for _, o := range opts { 195 for _, o := range opts {
124 if err := o.before(&c); err != nil { 196 if err := o.before(c); err != nil {
125 return nil, toRPCErr(err) 197 return nil, toRPCErr(err)
126 } 198 }
127 } 199 }
128 c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) 200 c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
129 c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) 201 c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
202 if err := setCallInfoCodec(c); err != nil {
203 return nil, err
204 }
130 205
131 callHdr := &transport.CallHdr{ 206 callHdr := &transport.CallHdr{
132 Host: cc.authority, 207 Host: cc.authority,
133 Method: method, 208 Method: method,
134 // If it's not client streaming, we should already have the request to be sent, 209 ContentSubtype: c.contentSubtype,
135 // so we don't flush the header. 210 }
136 // If it's client streaming, the user may never send a request or send it any 211
137 // time soon, so we ask the transport to flush the header. 212 // Set our outgoing compression according to the UseCompressor CallOption, if
138 Flush: desc.ClientStreams, 213 // set. In that case, also find the compressor from the encoding package.
139 } 214 // Otherwise, use the compressor configured by the WithCompressor DialOption,
140 if cc.dopts.cp != nil { 215 // if set.
216 var cp Compressor
217 var comp encoding.Compressor
218 if ct := c.compressorType; ct != "" {
219 callHdr.SendCompress = ct
220 if ct != encoding.Identity {
221 comp = encoding.GetCompressor(ct)
222 if comp == nil {
223 return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
224 }
225 }
226 } else if cc.dopts.cp != nil {
141 callHdr.SendCompress = cc.dopts.cp.Type() 227 callHdr.SendCompress = cc.dopts.cp.Type()
228 cp = cc.dopts.cp
142 } 229 }
143 if c.creds != nil { 230 if c.creds != nil {
144 callHdr.Creds = c.creds 231 callHdr.Creds = c.creds
@@ -152,380 +239,1019 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
152 } 239 }
153 trInfo.tr.LazyLog(&trInfo.firstLine, false) 240 trInfo.tr.LazyLog(&trInfo.firstLine, false)
154 ctx = trace.NewContext(ctx, trInfo.tr) 241 ctx = trace.NewContext(ctx, trInfo.tr)
155 defer func() {
156 if err != nil {
157 // Need to call tr.finish() if error is returned.
158 // Because tr will not be returned to caller.
159 trInfo.tr.LazyPrintf("RPC: [%v]", err)
160 trInfo.tr.SetError()
161 trInfo.tr.Finish()
162 }
163 }()
164 } 242 }
165 ctx = newContextWithRPCInfo(ctx) 243 ctx = newContextWithRPCInfo(ctx, c.failFast)
166 sh := cc.dopts.copts.StatsHandler 244 sh := cc.dopts.copts.StatsHandler
245 var beginTime time.Time
167 if sh != nil { 246 if sh != nil {
168 ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) 247 ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
248 beginTime = time.Now()
169 begin := &stats.Begin{ 249 begin := &stats.Begin{
170 Client: true, 250 Client: true,
171 BeginTime: time.Now(), 251 BeginTime: beginTime,
172 FailFast: c.failFast, 252 FailFast: c.failFast,
173 } 253 }
174 sh.HandleRPC(ctx, begin) 254 sh.HandleRPC(ctx, begin)
175 defer func() {
176 if err != nil {
177 // Only handle end stats if err != nil.
178 end := &stats.End{
179 Client: true,
180 Error: err,
181 }
182 sh.HandleRPC(ctx, end)
183 }
184 }()
185 } 255 }
186 gopts := BalancerGetOptions{ 256
187 BlockingWait: !c.failFast, 257 cs := &clientStream{
258 callHdr: callHdr,
259 ctx: ctx,
260 methodConfig: &mc,
261 opts: opts,
262 callInfo: c,
263 cc: cc,
264 desc: desc,
265 codec: c.codec,
266 cp: cp,
267 comp: comp,
268 cancel: cancel,
269 beginTime: beginTime,
270 firstAttempt: true,
271 }
272 if !cc.dopts.disableRetry {
273 cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
188 } 274 }
189 for { 275 cs.binlog = binarylog.GetMethodLogger(method)
190 t, put, err = cc.getTransport(ctx, gopts)
191 if err != nil {
192 // TODO(zhaoq): Probably revisit the error handling.
193 if _, ok := status.FromError(err); ok {
194 return nil, err
195 }
196 if err == errConnClosing || err == errConnUnavailable {
197 if c.failFast {
198 return nil, Errorf(codes.Unavailable, "%v", err)
199 }
200 continue
201 }
202 // All the other errors are treated as Internal errors.
203 return nil, Errorf(codes.Internal, "%v", err)
204 }
205 276
206 s, err = t.NewStream(ctx, callHdr) 277 cs.callInfo.stream = cs
207 if err != nil { 278 // Only this initial attempt has stats/tracing.
208 if _, ok := err.(transport.ConnectionError); ok && put != nil { 279 // TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
209 // If error is connection error, transport was sending data on wire, 280 if err := cs.newAttemptLocked(sh, trInfo); err != nil {
210 // and we are not sure if anything has been sent on wire. 281 cs.finish(err)
211 // If error is not connection error, we are sure nothing has been sent. 282 return nil, err
212 updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) 283 }
213 } 284
214 if put != nil { 285 op := func(a *csAttempt) error { return a.newStream() }
215 put() 286 if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
216 put = nil 287 cs.finish(err)
217 } 288 return nil, err
218 if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { 289 }
219 continue 290
291 if cs.binlog != nil {
292 md, _ := metadata.FromOutgoingContext(ctx)
293 logEntry := &binarylog.ClientHeader{
294 OnClientSide: true,
295 Header: md,
296 MethodName: method,
297 Authority: cs.cc.authority,
298 }
299 if deadline, ok := ctx.Deadline(); ok {
300 logEntry.Timeout = deadline.Sub(time.Now())
301 if logEntry.Timeout < 0 {
302 logEntry.Timeout = 0
220 } 303 }
221 return nil, toRPCErr(err)
222 } 304 }
223 break 305 cs.binlog.Log(logEntry)
224 } 306 }
225 // Set callInfo.peer object from stream's context. 307
226 if peer, ok := peer.FromContext(s.Context()); ok { 308 if desc != unaryStreamDesc {
227 c.peer = peer 309 // Listen on cc and stream contexts to cleanup when the user closes the
310 // ClientConn or cancels the stream context. In all other cases, an error
311 // should already be injected into the recv buffer by the transport, which
312 // the client will eventually receive, and then we will cancel the stream's
313 // context in clientStream.finish.
314 go func() {
315 select {
316 case <-cc.ctx.Done():
317 cs.finish(ErrClientConnClosing)
318 case <-ctx.Done():
319 cs.finish(toRPCErr(ctx.Err()))
320 }
321 }()
228 } 322 }
229 cs := &clientStream{
230 opts: opts,
231 c: c,
232 desc: desc,
233 codec: cc.dopts.codec,
234 cp: cc.dopts.cp,
235 dc: cc.dopts.dc,
236 cancel: cancel,
237
238 put: put,
239 t: t,
240 s: s,
241 p: &parser{r: s},
242
243 tracing: EnableTracing,
244 trInfo: trInfo,
245
246 statsCtx: ctx,
247 statsHandler: cc.dopts.copts.StatsHandler,
248 }
249 if cc.dopts.cp != nil {
250 cs.cbuf = new(bytes.Buffer)
251 }
252 // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
253 // when there is no pending I/O operations on this stream.
254 go func() {
255 select {
256 case <-t.Error():
257 // Incur transport error, simply exit.
258 case <-cc.ctx.Done():
259 cs.finish(ErrClientConnClosing)
260 cs.closeTransportStream(ErrClientConnClosing)
261 case <-s.Done():
262 // TODO: The trace of the RPC is terminated here when there is no pending
263 // I/O, which is probably not the optimal solution.
264 cs.finish(s.Status().Err())
265 cs.closeTransportStream(nil)
266 case <-s.GoAway():
267 cs.finish(errConnDrain)
268 cs.closeTransportStream(errConnDrain)
269 case <-s.Context().Done():
270 err := s.Context().Err()
271 cs.finish(err)
272 cs.closeTransportStream(transport.ContextErr(err))
273 }
274 }()
275 return cs, nil 323 return cs, nil
276} 324}
277 325
326func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) error {
327 cs.attempt = &csAttempt{
328 cs: cs,
329 dc: cs.cc.dopts.dc,
330 statsHandler: sh,
331 trInfo: trInfo,
332 }
333
334 if err := cs.ctx.Err(); err != nil {
335 return toRPCErr(err)
336 }
337 t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
338 if err != nil {
339 return err
340 }
341 cs.attempt.t = t
342 cs.attempt.done = done
343 return nil
344}
345
346func (a *csAttempt) newStream() error {
347 cs := a.cs
348 cs.callHdr.PreviousAttempts = cs.numRetries
349 s, err := a.t.NewStream(cs.ctx, cs.callHdr)
350 if err != nil {
351 return toRPCErr(err)
352 }
353 cs.attempt.s = s
354 cs.attempt.p = &parser{r: s}
355 return nil
356}
357
278// clientStream implements a client side Stream. 358// clientStream implements a client side Stream.
279type clientStream struct { 359type clientStream struct {
280 opts []CallOption 360 callHdr *transport.CallHdr
281 c callInfo 361 opts []CallOption
282 t transport.ClientTransport 362 callInfo *callInfo
283 s *transport.Stream 363 cc *ClientConn
284 p *parser 364 desc *StreamDesc
285 desc *StreamDesc 365
286 codec Codec 366 codec baseCodec
287 cp Compressor 367 cp Compressor
288 cbuf *bytes.Buffer 368 comp encoding.Compressor
289 dc Decompressor 369
290 cancel context.CancelFunc 370 cancel context.CancelFunc // cancels all attempts
371
372 sentLast bool // sent an end stream
373 beginTime time.Time
374
375 methodConfig *MethodConfig
376
377 ctx context.Context // the application's context, wrapped by stats/tracing
291 378
292 tracing bool // set to EnableTracing when the clientStream is created. 379 retryThrottler *retryThrottler // The throttler active when the RPC began.
293 380
294 mu sync.Mutex 381 binlog *binarylog.MethodLogger // Binary logger, can be nil.
295 put func() 382 // serverHeaderBinlogged is a boolean for whether server header has been
296 closed bool 383 // logged. Server header will be logged when the first time one of those
297 finished bool 384 // happens: stream.Header(), stream.Recv().
298 // trInfo.tr is set when the clientStream is created (if EnableTracing is true), 385 //
299 // and is set to nil when the clientStream's finish method is called. 386 // It's only read and used by Recv() and Header(), so it doesn't need to be
387 // synchronized.
388 serverHeaderBinlogged bool
389
390 mu sync.Mutex
391 firstAttempt bool // if true, transparent retry is valid
392 numRetries int // exclusive of transparent retry attempt(s)
393 numRetriesSincePushback int // retries since pushback; to reset backoff
394 finished bool // TODO: replace with atomic cmpxchg or sync.Once?
395 attempt *csAttempt // the active client stream attempt
396 // TODO(hedging): hedging will have multiple attempts simultaneously.
397 committed bool // active attempt committed for retry?
398 buffer []func(a *csAttempt) error // operations to replay on retry
399 bufferSize int // current size of buffer
400}
401
402// csAttempt implements a single transport stream attempt within a
403// clientStream.
404type csAttempt struct {
405 cs *clientStream
406 t transport.ClientTransport
407 s *transport.Stream
408 p *parser
409 done func(balancer.DoneInfo)
410
411 finished bool
412 dc Decompressor
413 decomp encoding.Compressor
414 decompSet bool
415
416 mu sync.Mutex // guards trInfo.tr
417 // trInfo.tr is set when created (if EnableTracing is true),
418 // and cleared when the finish method is called.
300 trInfo traceInfo 419 trInfo traceInfo
301 420
302 // statsCtx keeps the user context for stats handling.
303 // All stats collection should use the statsCtx (instead of the stream context)
304 // so that all the generated stats for a particular RPC can be associated in the processing phase.
305 statsCtx context.Context
306 statsHandler stats.Handler 421 statsHandler stats.Handler
307} 422}
308 423
424func (cs *clientStream) commitAttemptLocked() {
425 cs.committed = true
426 cs.buffer = nil
427}
428
429func (cs *clientStream) commitAttempt() {
430 cs.mu.Lock()
431 cs.commitAttemptLocked()
432 cs.mu.Unlock()
433}
434
435// shouldRetry returns nil if the RPC should be retried; otherwise it returns
436// the error that should be returned by the operation.
437func (cs *clientStream) shouldRetry(err error) error {
438 if cs.attempt.s == nil && !cs.callInfo.failFast {
439 // In the event of any error from NewStream (attempt.s == nil), we
440 // never attempted to write anything to the wire, so we can retry
441 // indefinitely for non-fail-fast RPCs.
442 return nil
443 }
444 if cs.finished || cs.committed {
445 // RPC is finished or committed; cannot retry.
446 return err
447 }
448 // Wait for the trailers.
449 if cs.attempt.s != nil {
450 <-cs.attempt.s.Done()
451 }
452 if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
453 // First attempt, wait-for-ready, stream unprocessed: transparently retry.
454 cs.firstAttempt = false
455 return nil
456 }
457 cs.firstAttempt = false
458 if cs.cc.dopts.disableRetry {
459 return err
460 }
461
462 pushback := 0
463 hasPushback := false
464 if cs.attempt.s != nil {
465 if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to {
466 return err
467 }
468
469 // TODO(retry): Move down if the spec changes to not check server pushback
470 // before considering this a failure for throttling.
471 sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
472 if len(sps) == 1 {
473 var e error
474 if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
475 grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0])
476 cs.retryThrottler.throttle() // This counts as a failure for throttling.
477 return err
478 }
479 hasPushback = true
480 } else if len(sps) > 1 {
481 grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps)
482 cs.retryThrottler.throttle() // This counts as a failure for throttling.
483 return err
484 }
485 }
486
487 var code codes.Code
488 if cs.attempt.s != nil {
489 code = cs.attempt.s.Status().Code()
490 } else {
491 code = status.Convert(err).Code()
492 }
493
494 rp := cs.methodConfig.retryPolicy
495 if rp == nil || !rp.retryableStatusCodes[code] {
496 return err
497 }
498
499 // Note: the ordering here is important; we count this as a failure
500 // only if the code matched a retryable code.
501 if cs.retryThrottler.throttle() {
502 return err
503 }
504 if cs.numRetries+1 >= rp.maxAttempts {
505 return err
506 }
507
508 var dur time.Duration
509 if hasPushback {
510 dur = time.Millisecond * time.Duration(pushback)
511 cs.numRetriesSincePushback = 0
512 } else {
513 fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
514 cur := float64(rp.initialBackoff) * fact
515 if max := float64(rp.maxBackoff); cur > max {
516 cur = max
517 }
518 dur = time.Duration(grpcrand.Int63n(int64(cur)))
519 cs.numRetriesSincePushback++
520 }
521
522 // TODO(dfawley): we could eagerly fail here if dur puts us past the
523 // deadline, but unsure if it is worth doing.
524 t := time.NewTimer(dur)
525 select {
526 case <-t.C:
527 cs.numRetries++
528 return nil
529 case <-cs.ctx.Done():
530 t.Stop()
531 return status.FromContextError(cs.ctx.Err()).Err()
532 }
533}
534
535// Returns nil if a retry was performed and succeeded; error otherwise.
536func (cs *clientStream) retryLocked(lastErr error) error {
537 for {
538 cs.attempt.finish(lastErr)
539 if err := cs.shouldRetry(lastErr); err != nil {
540 cs.commitAttemptLocked()
541 return err
542 }
543 if err := cs.newAttemptLocked(nil, traceInfo{}); err != nil {
544 return err
545 }
546 if lastErr = cs.replayBufferLocked(); lastErr == nil {
547 return nil
548 }
549 }
550}
551
309func (cs *clientStream) Context() context.Context { 552func (cs *clientStream) Context() context.Context {
310 return cs.s.Context() 553 cs.commitAttempt()
554 // No need to lock before using attempt, since we know it is committed and
555 // cannot change.
556 return cs.attempt.s.Context()
557}
558
559func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
560 cs.mu.Lock()
561 for {
562 if cs.committed {
563 cs.mu.Unlock()
564 return op(cs.attempt)
565 }
566 a := cs.attempt
567 cs.mu.Unlock()
568 err := op(a)
569 cs.mu.Lock()
570 if a != cs.attempt {
571 // We started another attempt already.
572 continue
573 }
574 if err == io.EOF {
575 <-a.s.Done()
576 }
577 if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
578 onSuccess()
579 cs.mu.Unlock()
580 return err
581 }
582 if err := cs.retryLocked(err); err != nil {
583 cs.mu.Unlock()
584 return err
585 }
586 }
311} 587}
312 588
313func (cs *clientStream) Header() (metadata.MD, error) { 589func (cs *clientStream) Header() (metadata.MD, error) {
314 m, err := cs.s.Header() 590 var m metadata.MD
591 err := cs.withRetry(func(a *csAttempt) error {
592 var err error
593 m, err = a.s.Header()
594 return toRPCErr(err)
595 }, cs.commitAttemptLocked)
315 if err != nil { 596 if err != nil {
316 if _, ok := err.(transport.ConnectionError); !ok { 597 cs.finish(err)
317 cs.closeTransportStream(err) 598 return nil, err
599 }
600 if cs.binlog != nil && !cs.serverHeaderBinlogged {
601 // Only log if binary log is on and header has not been logged.
602 logEntry := &binarylog.ServerHeader{
603 OnClientSide: true,
604 Header: m,
605 PeerAddr: nil,
318 } 606 }
607 if peer, ok := peer.FromContext(cs.Context()); ok {
608 logEntry.PeerAddr = peer.Addr
609 }
610 cs.binlog.Log(logEntry)
611 cs.serverHeaderBinlogged = true
319 } 612 }
320 return m, err 613 return m, err
321} 614}
322 615
323func (cs *clientStream) Trailer() metadata.MD { 616func (cs *clientStream) Trailer() metadata.MD {
324 return cs.s.Trailer() 617 // On RPC failure, we never need to retry, because usage requires that
618 // RecvMsg() returned a non-nil error before calling this function is valid.
619 // We would have retried earlier if necessary.
620 //
621 // Commit the attempt anyway, just in case users are not following those
622 // directions -- it will prevent races and should not meaningfully impact
623 // performance.
624 cs.commitAttempt()
625 if cs.attempt.s == nil {
626 return nil
627 }
628 return cs.attempt.s.Trailer()
325} 629}
326 630
327func (cs *clientStream) SendMsg(m interface{}) (err error) { 631func (cs *clientStream) replayBufferLocked() error {
328 if cs.tracing { 632 a := cs.attempt
329 cs.mu.Lock() 633 for _, f := range cs.buffer {
330 if cs.trInfo.tr != nil { 634 if err := f(a); err != nil {
331 cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) 635 return err
332 } 636 }
333 cs.mu.Unlock()
334 } 637 }
335 // TODO Investigate how to signal the stats handling party. 638 return nil
336 // generate error stats if err != nil && err != io.EOF? 639}
640
641func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
642 // Note: we still will buffer if retry is disabled (for transparent retries).
643 if cs.committed {
644 return
645 }
646 cs.bufferSize += sz
647 if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
648 cs.commitAttemptLocked()
649 return
650 }
651 cs.buffer = append(cs.buffer, op)
652}
653
654func (cs *clientStream) SendMsg(m interface{}) (err error) {
337 defer func() { 655 defer func() {
338 if err != nil { 656 if err != nil && err != io.EOF {
657 // Call finish on the client stream for errors generated by this SendMsg
658 // call, as these indicate problems created by this client. (Transport
659 // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
660 // error will be returned from RecvMsg eventually in that case, or be
661 // retried.)
339 cs.finish(err) 662 cs.finish(err)
340 } 663 }
341 if err == nil {
342 return
343 }
344 if err == io.EOF {
345 // Specialize the process for server streaming. SendMesg is only called
346 // once when creating the stream object. io.EOF needs to be skipped when
347 // the rpc is early finished (before the stream object is created.).
348 // TODO: It is probably better to move this into the generated code.
349 if !cs.desc.ClientStreams && cs.desc.ServerStreams {
350 err = nil
351 }
352 return
353 }
354 if _, ok := err.(transport.ConnectionError); !ok {
355 cs.closeTransportStream(err)
356 }
357 err = toRPCErr(err)
358 }() 664 }()
359 var outPayload *stats.OutPayload 665 if cs.sentLast {
360 if cs.statsHandler != nil { 666 return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
361 outPayload = &stats.OutPayload{
362 Client: true,
363 }
364 } 667 }
365 out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload) 668 if !cs.desc.ClientStreams {
366 defer func() { 669 cs.sentLast = true
367 if cs.cbuf != nil { 670 }
368 cs.cbuf.Reset() 671 data, err := encode(cs.codec, m)
369 }
370 }()
371 if err != nil { 672 if err != nil {
372 return err 673 return err
373 } 674 }
374 if cs.c.maxSendMessageSize == nil { 675 compData, err := compress(data, cs.cp, cs.comp)
375 return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") 676 if err != nil {
677 return err
376 } 678 }
377 if len(out) > *cs.c.maxSendMessageSize { 679 hdr, payload := msgHeader(data, compData)
378 return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), *cs.c.maxSendMessageSize) 680 // TODO(dfawley): should we be checking len(data) instead?
681 if len(payload) > *cs.callInfo.maxSendMessageSize {
682 return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
379 } 683 }
380 err = cs.t.Write(cs.s, out, &transport.Options{Last: false}) 684 msgBytes := data // Store the pointer before setting to nil. For binary logging.
381 if err == nil && outPayload != nil { 685 op := func(a *csAttempt) error {
382 outPayload.SentTime = time.Now() 686 err := a.sendMsg(m, hdr, payload, data)
383 cs.statsHandler.HandleRPC(cs.statsCtx, outPayload) 687 // nil out the message and uncomp when replaying; they are only needed for
688 // stats which is disabled for subsequent attempts.
689 m, data = nil, nil
690 return err
384 } 691 }
385 return err 692 err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
693 if cs.binlog != nil && err == nil {
694 cs.binlog.Log(&binarylog.ClientMessage{
695 OnClientSide: true,
696 Message: msgBytes,
697 })
698 }
699 return
386} 700}
387 701
388func (cs *clientStream) RecvMsg(m interface{}) (err error) { 702func (cs *clientStream) RecvMsg(m interface{}) error {
389 var inPayload *stats.InPayload 703 if cs.binlog != nil && !cs.serverHeaderBinlogged {
390 if cs.statsHandler != nil { 704 // Call Header() to binary log header if it's not already logged.
391 inPayload = &stats.InPayload{ 705 cs.Header()
392 Client: true,
393 }
394 } 706 }
395 if cs.c.maxReceiveMessageSize == nil { 707 var recvInfo *payloadInfo
396 return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") 708 if cs.binlog != nil {
709 recvInfo = &payloadInfo{}
397 } 710 }
398 err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload) 711 err := cs.withRetry(func(a *csAttempt) error {
399 defer func() { 712 return a.recvMsg(m, recvInfo)
400 // err != nil indicates the termination of the stream. 713 }, cs.commitAttemptLocked)
401 if err != nil { 714 if cs.binlog != nil && err == nil {
402 cs.finish(err) 715 cs.binlog.Log(&binarylog.ServerMessage{
716 OnClientSide: true,
717 Message: recvInfo.uncompressedBytes,
718 })
719 }
720 if err != nil || !cs.desc.ServerStreams {
721 // err != nil or non-server-streaming indicates end of stream.
722 cs.finish(err)
723
724 if cs.binlog != nil {
725 // finish will not log Trailer. Log Trailer here.
726 logEntry := &binarylog.ServerTrailer{
727 OnClientSide: true,
728 Trailer: cs.Trailer(),
729 Err: err,
730 }
731 if logEntry.Err == io.EOF {
732 logEntry.Err = nil
733 }
734 if peer, ok := peer.FromContext(cs.Context()); ok {
735 logEntry.PeerAddr = peer.Addr
736 }
737 cs.binlog.Log(logEntry)
403 } 738 }
404 }() 739 }
740 return err
741}
742
743func (cs *clientStream) CloseSend() error {
744 if cs.sentLast {
745 // TODO: return an error and finish the stream instead, due to API misuse?
746 return nil
747 }
748 cs.sentLast = true
749 op := func(a *csAttempt) error {
750 a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
751 // Always return nil; io.EOF is the only error that might make sense
752 // instead, but there is no need to signal the client to call RecvMsg
753 // as the only use left for the stream after CloseSend is to call
754 // RecvMsg. This also matches historical behavior.
755 return nil
756 }
757 cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
758 if cs.binlog != nil {
759 cs.binlog.Log(&binarylog.ClientHalfClose{
760 OnClientSide: true,
761 })
762 }
763 // We never returned an error here for reasons.
764 return nil
765}
766
767func (cs *clientStream) finish(err error) {
768 if err == io.EOF {
769 // Ending a stream with EOF indicates a success.
770 err = nil
771 }
772 cs.mu.Lock()
773 if cs.finished {
774 cs.mu.Unlock()
775 return
776 }
777 cs.finished = true
778 cs.commitAttemptLocked()
779 cs.mu.Unlock()
780 // For binary logging. only log cancel in finish (could be caused by RPC ctx
781 // canceled or ClientConn closed). Trailer will be logged in RecvMsg.
782 //
783 // Only one of cancel or trailer needs to be logged. In the cases where
784 // users don't call RecvMsg, users must have already canceled the RPC.
785 if cs.binlog != nil && status.Code(err) == codes.Canceled {
786 cs.binlog.Log(&binarylog.Cancel{
787 OnClientSide: true,
788 })
789 }
405 if err == nil { 790 if err == nil {
406 if cs.tracing { 791 cs.retryThrottler.successfulRPC()
407 cs.mu.Lock() 792 }
408 if cs.trInfo.tr != nil { 793 if channelz.IsOn() {
409 cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) 794 if err != nil {
410 } 795 cs.cc.incrCallsFailed()
411 cs.mu.Unlock() 796 } else {
797 cs.cc.incrCallsSucceeded()
412 } 798 }
413 if inPayload != nil { 799 }
414 cs.statsHandler.HandleRPC(cs.statsCtx, inPayload) 800 if cs.attempt != nil {
801 cs.attempt.finish(err)
802 }
803 // after functions all rely upon having a stream.
804 if cs.attempt.s != nil {
805 for _, o := range cs.opts {
806 o.after(cs.callInfo)
415 } 807 }
416 if !cs.desc.ClientStreams || cs.desc.ServerStreams { 808 }
417 return 809 cs.cancel()
810}
811
812func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
813 cs := a.cs
814 if EnableTracing {
815 a.mu.Lock()
816 if a.trInfo.tr != nil {
817 a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
418 } 818 }
419 // Special handling for client streaming rpc. 819 a.mu.Unlock()
420 // This recv expects EOF or errors, so we don't collect inPayload. 820 }
421 if cs.c.maxReceiveMessageSize == nil { 821 if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
422 return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") 822 if !cs.desc.ClientStreams {
823 // For non-client-streaming RPCs, we return nil instead of EOF on error
824 // because the generated code requires it. finish is not called; RecvMsg()
825 // will call it with the stream's status independently.
826 return nil
423 } 827 }
424 err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil) 828 return io.EOF
425 cs.closeTransportStream(err) 829 }
426 if err == nil { 830 if a.statsHandler != nil {
427 return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) 831 a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
832 }
833 if channelz.IsOn() {
834 a.t.IncrMsgSent()
835 }
836 return nil
837}
838
839func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
840 cs := a.cs
841 if a.statsHandler != nil && payInfo == nil {
842 payInfo = &payloadInfo{}
843 }
844
845 if !a.decompSet {
846 // Block until we receive headers containing received message encoding.
847 if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
848 if a.dc == nil || a.dc.Type() != ct {
849 // No configured decompressor, or it does not match the incoming
850 // message encoding; attempt to find a registered compressor that does.
851 a.dc = nil
852 a.decomp = encoding.GetCompressor(ct)
853 }
854 } else {
855 // No compression is used; disable our decompressor.
856 a.dc = nil
428 } 857 }
858 // Only initialize this state once per stream.
859 a.decompSet = true
860 }
861 err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
862 if err != nil {
429 if err == io.EOF { 863 if err == io.EOF {
430 if se := cs.s.Status().Err(); se != nil { 864 if statusErr := a.s.Status().Err(); statusErr != nil {
431 return se 865 return statusErr
432 } 866 }
433 cs.finish(err) 867 return io.EOF // indicates successful end of stream.
434 return nil
435 } 868 }
436 return toRPCErr(err) 869 return toRPCErr(err)
437 } 870 }
438 if _, ok := err.(transport.ConnectionError); !ok { 871 if EnableTracing {
439 cs.closeTransportStream(err) 872 a.mu.Lock()
873 if a.trInfo.tr != nil {
874 a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
875 }
876 a.mu.Unlock()
877 }
878 if a.statsHandler != nil {
879 a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{
880 Client: true,
881 RecvTime: time.Now(),
882 Payload: m,
883 // TODO truncate large payload.
884 Data: payInfo.uncompressedBytes,
885 Length: len(payInfo.uncompressedBytes),
886 })
887 }
888 if channelz.IsOn() {
889 a.t.IncrMsgRecv()
890 }
891 if cs.desc.ServerStreams {
892 // Subsequent messages should be received by subsequent RecvMsg calls.
893 return nil
894 }
895 // Special handling for non-server-stream rpcs.
896 // This recv expects EOF or errors, so we don't collect inPayload.
897 err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
898 if err == nil {
899 return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
440 } 900 }
441 if err == io.EOF { 901 if err == io.EOF {
442 if statusErr := cs.s.Status().Err(); statusErr != nil { 902 return a.s.Status().Err() // non-server streaming Recv returns nil on success
443 return statusErr
444 }
445 // Returns io.EOF to indicate the end of the stream.
446 return
447 } 903 }
448 return toRPCErr(err) 904 return toRPCErr(err)
449} 905}
450 906
451func (cs *clientStream) CloseSend() (err error) { 907func (a *csAttempt) finish(err error) {
452 err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) 908 a.mu.Lock()
909 if a.finished {
910 a.mu.Unlock()
911 return
912 }
913 a.finished = true
914 if err == io.EOF {
915 // Ending a stream with EOF indicates a success.
916 err = nil
917 }
918 if a.s != nil {
919 a.t.CloseStream(a.s, err)
920 }
921
922 if a.done != nil {
923 br := false
924 var tr metadata.MD
925 if a.s != nil {
926 br = a.s.BytesReceived()
927 tr = a.s.Trailer()
928 }
929 a.done(balancer.DoneInfo{
930 Err: err,
931 Trailer: tr,
932 BytesSent: a.s != nil,
933 BytesReceived: br,
934 })
935 }
936 if a.statsHandler != nil {
937 end := &stats.End{
938 Client: true,
939 BeginTime: a.cs.beginTime,
940 EndTime: time.Now(),
941 Error: err,
942 }
943 a.statsHandler.HandleRPC(a.cs.ctx, end)
944 }
945 if a.trInfo.tr != nil {
946 if err == nil {
947 a.trInfo.tr.LazyPrintf("RPC: [OK]")
948 } else {
949 a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
950 a.trInfo.tr.SetError()
951 }
952 a.trInfo.tr.Finish()
953 a.trInfo.tr = nil
954 }
955 a.mu.Unlock()
956}
957
958func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, opts ...CallOption) (_ ClientStream, err error) {
959 ac.mu.Lock()
960 if ac.transport != t {
961 ac.mu.Unlock()
962 return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
963 }
964 // transition to CONNECTING state when an attempt starts
965 if ac.state != connectivity.Connecting {
966 ac.updateConnectivityState(connectivity.Connecting)
967 ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
968 }
969 ac.mu.Unlock()
970
971 if t == nil {
972 // TODO: return RPC error here?
973 return nil, errors.New("transport provided is nil")
974 }
975 // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
976 c := &callInfo{}
977
978 for _, o := range opts {
979 if err := o.before(c); err != nil {
980 return nil, toRPCErr(err)
981 }
982 }
983 c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
984 c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
985
986 // Possible context leak:
987 // The cancel function for the child context we create will only be called
988 // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
989 // an error is generated by SendMsg.
990 // https://github.com/grpc/grpc-go/issues/1818.
991 ctx, cancel := context.WithCancel(ctx)
453 defer func() { 992 defer func() {
454 if err != nil { 993 if err != nil {
455 cs.finish(err) 994 cancel()
456 } 995 }
457 }() 996 }()
458 if err == nil || err == io.EOF { 997
459 return nil 998 if err := setCallInfoCodec(c); err != nil {
999 return nil, err
460 } 1000 }
461 if _, ok := err.(transport.ConnectionError); !ok { 1001
462 cs.closeTransportStream(err) 1002 callHdr := &transport.CallHdr{
1003 Host: ac.cc.authority,
1004 Method: method,
1005 ContentSubtype: c.contentSubtype,
463 } 1006 }
464 err = toRPCErr(err) 1007
465 return 1008 // Set our outgoing compression according to the UseCompressor CallOption, if
1009 // set. In that case, also find the compressor from the encoding package.
1010 // Otherwise, use the compressor configured by the WithCompressor DialOption,
1011 // if set.
1012 var cp Compressor
1013 var comp encoding.Compressor
1014 if ct := c.compressorType; ct != "" {
1015 callHdr.SendCompress = ct
1016 if ct != encoding.Identity {
1017 comp = encoding.GetCompressor(ct)
1018 if comp == nil {
1019 return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
1020 }
1021 }
1022 } else if ac.cc.dopts.cp != nil {
1023 callHdr.SendCompress = ac.cc.dopts.cp.Type()
1024 cp = ac.cc.dopts.cp
1025 }
1026 if c.creds != nil {
1027 callHdr.Creds = c.creds
1028 }
1029
1030 as := &addrConnStream{
1031 callHdr: callHdr,
1032 ac: ac,
1033 ctx: ctx,
1034 cancel: cancel,
1035 opts: opts,
1036 callInfo: c,
1037 desc: desc,
1038 codec: c.codec,
1039 cp: cp,
1040 comp: comp,
1041 t: t,
1042 }
1043
1044 as.callInfo.stream = as
1045 s, err := as.t.NewStream(as.ctx, as.callHdr)
1046 if err != nil {
1047 err = toRPCErr(err)
1048 return nil, err
1049 }
1050 as.s = s
1051 as.p = &parser{r: s}
1052 ac.incrCallsStarted()
1053 if desc != unaryStreamDesc {
1054 // Listen on cc and stream contexts to cleanup when the user closes the
1055 // ClientConn or cancels the stream context. In all other cases, an error
1056 // should already be injected into the recv buffer by the transport, which
1057 // the client will eventually receive, and then we will cancel the stream's
1058 // context in clientStream.finish.
1059 go func() {
1060 select {
1061 case <-ac.ctx.Done():
1062 as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
1063 case <-ctx.Done():
1064 as.finish(toRPCErr(ctx.Err()))
1065 }
1066 }()
1067 }
1068 return as, nil
466} 1069}
467 1070
468func (cs *clientStream) closeTransportStream(err error) { 1071type addrConnStream struct {
469 cs.mu.Lock() 1072 s *transport.Stream
470 if cs.closed { 1073 ac *addrConn
471 cs.mu.Unlock() 1074 callHdr *transport.CallHdr
472 return 1075 cancel context.CancelFunc
1076 opts []CallOption
1077 callInfo *callInfo
1078 t transport.ClientTransport
1079 ctx context.Context
1080 sentLast bool
1081 desc *StreamDesc
1082 codec baseCodec
1083 cp Compressor
1084 comp encoding.Compressor
1085 decompSet bool
1086 dc Decompressor
1087 decomp encoding.Compressor
1088 p *parser
1089 done func(balancer.DoneInfo)
1090 mu sync.Mutex
1091 finished bool
1092}
1093
1094func (as *addrConnStream) Header() (metadata.MD, error) {
1095 m, err := as.s.Header()
1096 if err != nil {
1097 as.finish(toRPCErr(err))
473 } 1098 }
474 cs.closed = true 1099 return m, err
475 cs.mu.Unlock()
476 cs.t.CloseStream(cs.s, err)
477} 1100}
478 1101
479func (cs *clientStream) finish(err error) { 1102func (as *addrConnStream) Trailer() metadata.MD {
480 cs.mu.Lock() 1103 return as.s.Trailer()
481 defer cs.mu.Unlock() 1104}
482 if cs.finished { 1105
483 return 1106func (as *addrConnStream) CloseSend() error {
1107 if as.sentLast {
1108 // TODO: return an error and finish the stream instead, due to API misuse?
1109 return nil
484 } 1110 }
485 cs.finished = true 1111 as.sentLast = true
1112
1113 as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
1114 // Always return nil; io.EOF is the only error that might make sense
1115 // instead, but there is no need to signal the client to call RecvMsg
1116 // as the only use left for the stream after CloseSend is to call
1117 // RecvMsg. This also matches historical behavior.
1118 return nil
1119}
1120
1121func (as *addrConnStream) Context() context.Context {
1122 return as.s.Context()
1123}
1124
1125func (as *addrConnStream) SendMsg(m interface{}) (err error) {
486 defer func() { 1126 defer func() {
487 if cs.cancel != nil { 1127 if err != nil && err != io.EOF {
488 cs.cancel() 1128 // Call finish on the client stream for errors generated by this SendMsg
1129 // call, as these indicate problems created by this client. (Transport
1130 // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
1131 // error will be returned from RecvMsg eventually in that case, or be
1132 // retried.)
1133 as.finish(err)
489 } 1134 }
490 }() 1135 }()
491 for _, o := range cs.opts { 1136 if as.sentLast {
492 o.after(&cs.c) 1137 return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
493 } 1138 }
494 if cs.put != nil { 1139 if !as.desc.ClientStreams {
495 updateRPCInfoInContext(cs.s.Context(), rpcInfo{ 1140 as.sentLast = true
496 bytesSent: cs.s.BytesSent(),
497 bytesReceived: cs.s.BytesReceived(),
498 })
499 cs.put()
500 cs.put = nil
501 } 1141 }
502 if cs.statsHandler != nil { 1142 data, err := encode(as.codec, m)
503 end := &stats.End{ 1143 if err != nil {
504 Client: true, 1144 return err
505 EndTime: time.Now(), 1145 }
506 } 1146 compData, err := compress(data, as.cp, as.comp)
507 if err != io.EOF { 1147 if err != nil {
508 // end.Error is nil if the RPC finished successfully. 1148 return err
509 end.Error = toRPCErr(err) 1149 }
1150 hdr, payld := msgHeader(data, compData)
1151 // TODO(dfawley): should we be checking len(data) instead?
1152 if len(payld) > *as.callInfo.maxSendMessageSize {
1153 return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
1154 }
1155
1156 if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
1157 if !as.desc.ClientStreams {
1158 // For non-client-streaming RPCs, we return nil instead of EOF on error
1159 // because the generated code requires it. finish is not called; RecvMsg()
1160 // will call it with the stream's status independently.
1161 return nil
510 } 1162 }
511 cs.statsHandler.HandleRPC(cs.statsCtx, end) 1163 return io.EOF
512 } 1164 }
513 if !cs.tracing { 1165
514 return 1166 if channelz.IsOn() {
1167 as.t.IncrMsgSent()
515 } 1168 }
516 if cs.trInfo.tr != nil { 1169 return nil
517 if err == nil || err == io.EOF { 1170}
518 cs.trInfo.tr.LazyPrintf("RPC: [OK]") 1171
1172func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
1173 defer func() {
1174 if err != nil || !as.desc.ServerStreams {
1175 // err != nil or non-server-streaming indicates end of stream.
1176 as.finish(err)
1177 }
1178 }()
1179
1180 if !as.decompSet {
1181 // Block until we receive headers containing received message encoding.
1182 if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
1183 if as.dc == nil || as.dc.Type() != ct {
1184 // No configured decompressor, or it does not match the incoming
1185 // message encoding; attempt to find a registered compressor that does.
1186 as.dc = nil
1187 as.decomp = encoding.GetCompressor(ct)
1188 }
519 } else { 1189 } else {
520 cs.trInfo.tr.LazyPrintf("RPC: [%v]", err) 1190 // No compression is used; disable our decompressor.
521 cs.trInfo.tr.SetError() 1191 as.dc = nil
522 } 1192 }
523 cs.trInfo.tr.Finish() 1193 // Only initialize this state once per stream.
524 cs.trInfo.tr = nil 1194 as.decompSet = true
1195 }
1196 err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
1197 if err != nil {
1198 if err == io.EOF {
1199 if statusErr := as.s.Status().Err(); statusErr != nil {
1200 return statusErr
1201 }
1202 return io.EOF // indicates successful end of stream.
1203 }
1204 return toRPCErr(err)
1205 }
1206
1207 if channelz.IsOn() {
1208 as.t.IncrMsgRecv()
525 } 1209 }
1210 if as.desc.ServerStreams {
1211 // Subsequent messages should be received by subsequent RecvMsg calls.
1212 return nil
1213 }
1214
1215 // Special handling for non-server-stream rpcs.
1216 // This recv expects EOF or errors, so we don't collect inPayload.
1217 err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
1218 if err == nil {
1219 return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
1220 }
1221 if err == io.EOF {
1222 return as.s.Status().Err() // non-server streaming Recv returns nil on success
1223 }
1224 return toRPCErr(err)
526} 1225}
527 1226
528// ServerStream defines the interface a server stream has to satisfy. 1227func (as *addrConnStream) finish(err error) {
1228 as.mu.Lock()
1229 if as.finished {
1230 as.mu.Unlock()
1231 return
1232 }
1233 as.finished = true
1234 if err == io.EOF {
1235 // Ending a stream with EOF indicates a success.
1236 err = nil
1237 }
1238 if as.s != nil {
1239 as.t.CloseStream(as.s, err)
1240 }
1241
1242 if err != nil {
1243 as.ac.incrCallsFailed()
1244 } else {
1245 as.ac.incrCallsSucceeded()
1246 }
1247 as.cancel()
1248 as.mu.Unlock()
1249}
1250
1251// ServerStream defines the server-side behavior of a streaming RPC.
1252//
1253// All errors returned from ServerStream methods are compatible with the
1254// status package.
529type ServerStream interface { 1255type ServerStream interface {
530 // SetHeader sets the header metadata. It may be called multiple times. 1256 // SetHeader sets the header metadata. It may be called multiple times.
531 // When call multiple times, all the provided metadata will be merged. 1257 // When call multiple times, all the provided metadata will be merged.
@@ -541,29 +1267,67 @@ type ServerStream interface {
541 // SetTrailer sets the trailer metadata which will be sent with the RPC status. 1267 // SetTrailer sets the trailer metadata which will be sent with the RPC status.
542 // When called more than once, all the provided metadata will be merged. 1268 // When called more than once, all the provided metadata will be merged.
543 SetTrailer(metadata.MD) 1269 SetTrailer(metadata.MD)
544 Stream 1270 // Context returns the context for this stream.
1271 Context() context.Context
1272 // SendMsg sends a message. On error, SendMsg aborts the stream and the
1273 // error is returned directly.
1274 //
1275 // SendMsg blocks until:
1276 // - There is sufficient flow control to schedule m with the transport, or
1277 // - The stream is done, or
1278 // - The stream breaks.
1279 //
1280 // SendMsg does not wait until the message is received by the client. An
1281 // untimely stream closure may result in lost messages.
1282 //
1283 // It is safe to have a goroutine calling SendMsg and another goroutine
1284 // calling RecvMsg on the same stream at the same time, but it is not safe
1285 // to call SendMsg on the same stream in different goroutines.
1286 SendMsg(m interface{}) error
1287 // RecvMsg blocks until it receives a message into m or the stream is
1288 // done. It returns io.EOF when the client has performed a CloseSend. On
1289 // any non-EOF error, the stream is aborted and the error contains the
1290 // RPC status.
1291 //
1292 // It is safe to have a goroutine calling SendMsg and another goroutine
1293 // calling RecvMsg on the same stream at the same time, but it is not
1294 // safe to call RecvMsg on the same stream in different goroutines.
1295 RecvMsg(m interface{}) error
545} 1296}
546 1297
547// serverStream implements a server side Stream. 1298// serverStream implements a server side Stream.
548type serverStream struct { 1299type serverStream struct {
549 t transport.ServerTransport 1300 ctx context.Context
550 s *transport.Stream 1301 t transport.ServerTransport
551 p *parser 1302 s *transport.Stream
552 codec Codec 1303 p *parser
553 cp Compressor 1304 codec baseCodec
554 dc Decompressor 1305
555 cbuf *bytes.Buffer 1306 cp Compressor
1307 dc Decompressor
1308 comp encoding.Compressor
1309 decomp encoding.Compressor
1310
556 maxReceiveMessageSize int 1311 maxReceiveMessageSize int
557 maxSendMessageSize int 1312 maxSendMessageSize int
558 trInfo *traceInfo 1313 trInfo *traceInfo
559 1314
560 statsHandler stats.Handler 1315 statsHandler stats.Handler
561 1316
1317 binlog *binarylog.MethodLogger
1318 // serverHeaderBinlogged indicates whether server header has been logged. It
1319 // will happen when one of the following two happens: stream.SendHeader(),
1320 // stream.Send().
1321 //
1322 // It's only checked in send and sendHeader, doesn't need to be
1323 // synchronized.
1324 serverHeaderBinlogged bool
1325
562 mu sync.Mutex // protects trInfo.tr after the service handler runs. 1326 mu sync.Mutex // protects trInfo.tr after the service handler runs.
563} 1327}
564 1328
565func (ss *serverStream) Context() context.Context { 1329func (ss *serverStream) Context() context.Context {
566 return ss.s.Context() 1330 return ss.ctx
567} 1331}
568 1332
569func (ss *serverStream) SetHeader(md metadata.MD) error { 1333func (ss *serverStream) SetHeader(md metadata.MD) error {
@@ -574,7 +1338,15 @@ func (ss *serverStream) SetHeader(md metadata.MD) error {
574} 1338}
575 1339
576func (ss *serverStream) SendHeader(md metadata.MD) error { 1340func (ss *serverStream) SendHeader(md metadata.MD) error {
577 return ss.t.WriteHeader(ss.s, md) 1341 err := ss.t.WriteHeader(ss.s, md)
1342 if ss.binlog != nil && !ss.serverHeaderBinlogged {
1343 h, _ := ss.s.Header()
1344 ss.binlog.Log(&binarylog.ServerHeader{
1345 Header: h,
1346 })
1347 ss.serverHeaderBinlogged = true
1348 }
1349 return err
578} 1350}
579 1351
580func (ss *serverStream) SetTrailer(md metadata.MD) { 1352func (ss *serverStream) SetTrailer(md metadata.MD) {
@@ -582,7 +1354,6 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
582 return 1354 return
583 } 1355 }
584 ss.s.SetTrailer(md) 1356 ss.s.SetTrailer(md)
585 return
586} 1357}
587 1358
588func (ss *serverStream) SendMsg(m interface{}) (err error) { 1359func (ss *serverStream) SendMsg(m interface{}) (err error) {
@@ -599,29 +1370,50 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
599 } 1370 }
600 ss.mu.Unlock() 1371 ss.mu.Unlock()
601 } 1372 }
602 }() 1373 if err != nil && err != io.EOF {
603 var outPayload *stats.OutPayload 1374 st, _ := status.FromError(toRPCErr(err))
604 if ss.statsHandler != nil { 1375 ss.t.WriteStatus(ss.s, st)
605 outPayload = &stats.OutPayload{} 1376 // Non-user specified status was sent out. This should be an error
606 } 1377 // case (as a server side Cancel maybe).
607 out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload) 1378 //
608 defer func() { 1379 // This is not handled specifically now. User will return a final
609 if ss.cbuf != nil { 1380 // status from the service handler, we will log that error instead.
610 ss.cbuf.Reset() 1381 // This behavior is similar to an interceptor.
1382 }
1383 if channelz.IsOn() && err == nil {
1384 ss.t.IncrMsgSent()
611 } 1385 }
612 }() 1386 }()
1387 data, err := encode(ss.codec, m)
1388 if err != nil {
1389 return err
1390 }
1391 compData, err := compress(data, ss.cp, ss.comp)
613 if err != nil { 1392 if err != nil {
614 return err 1393 return err
615 } 1394 }
616 if len(out) > ss.maxSendMessageSize { 1395 hdr, payload := msgHeader(data, compData)
617 return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), ss.maxSendMessageSize) 1396 // TODO(dfawley): should we be checking len(data) instead?
1397 if len(payload) > ss.maxSendMessageSize {
1398 return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
618 } 1399 }
619 if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil { 1400 if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
620 return toRPCErr(err) 1401 return toRPCErr(err)
621 } 1402 }
622 if outPayload != nil { 1403 if ss.binlog != nil {
623 outPayload.SentTime = time.Now() 1404 if !ss.serverHeaderBinlogged {
624 ss.statsHandler.HandleRPC(ss.s.Context(), outPayload) 1405 h, _ := ss.s.Header()
1406 ss.binlog.Log(&binarylog.ServerHeader{
1407 Header: h,
1408 })
1409 ss.serverHeaderBinlogged = true
1410 }
1411 ss.binlog.Log(&binarylog.ServerMessage{
1412 Message: data,
1413 })
1414 }
1415 if ss.statsHandler != nil {
1416 ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
625 } 1417 }
626 return nil 1418 return nil
627} 1419}
@@ -640,22 +1432,55 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
640 } 1432 }
641 ss.mu.Unlock() 1433 ss.mu.Unlock()
642 } 1434 }
1435 if err != nil && err != io.EOF {
1436 st, _ := status.FromError(toRPCErr(err))
1437 ss.t.WriteStatus(ss.s, st)
1438 // Non-user specified status was sent out. This should be an error
1439 // case (as a server side Cancel maybe).
1440 //
1441 // This is not handled specifically now. User will return a final
1442 // status from the service handler, we will log that error instead.
1443 // This behavior is similar to an interceptor.
1444 }
1445 if channelz.IsOn() && err == nil {
1446 ss.t.IncrMsgRecv()
1447 }
643 }() 1448 }()
644 var inPayload *stats.InPayload 1449 var payInfo *payloadInfo
645 if ss.statsHandler != nil { 1450 if ss.statsHandler != nil || ss.binlog != nil {
646 inPayload = &stats.InPayload{} 1451 payInfo = &payloadInfo{}
647 } 1452 }
648 if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil { 1453 if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
649 if err == io.EOF { 1454 if err == io.EOF {
1455 if ss.binlog != nil {
1456 ss.binlog.Log(&binarylog.ClientHalfClose{})
1457 }
650 return err 1458 return err
651 } 1459 }
652 if err == io.ErrUnexpectedEOF { 1460 if err == io.ErrUnexpectedEOF {
653 err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) 1461 err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
654 } 1462 }
655 return toRPCErr(err) 1463 return toRPCErr(err)
656 } 1464 }
657 if inPayload != nil { 1465 if ss.statsHandler != nil {
658 ss.statsHandler.HandleRPC(ss.s.Context(), inPayload) 1466 ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
1467 RecvTime: time.Now(),
1468 Payload: m,
1469 // TODO truncate large payload.
1470 Data: payInfo.uncompressedBytes,
1471 Length: len(payInfo.uncompressedBytes),
1472 })
1473 }
1474 if ss.binlog != nil {
1475 ss.binlog.Log(&binarylog.ClientMessage{
1476 Message: payInfo.uncompressedBytes,
1477 })
659 } 1478 }
660 return nil 1479 return nil
661} 1480}
1481
1482// MethodFromServerStream returns the method string for the input stream.
1483// The returned string is in the format of "/service/method".
1484func MethodFromServerStream(stream ServerStream) (string, bool) {
1485 return Method(stream.Context())
1486}
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
index decb678..584360f 100644
--- a/vendor/google.golang.org/grpc/tap/tap.go
+++ b/vendor/google.golang.org/grpc/tap/tap.go
@@ -21,7 +21,7 @@
21package tap 21package tap
22 22
23import ( 23import (
24 "golang.org/x/net/context" 24 "context"
25) 25)
26 26
27// Info defines the relevant information needed by the handles. 27// Info defines the relevant information needed by the handles.
@@ -32,8 +32,20 @@ type Info struct {
32 // TODO: More to be added. 32 // TODO: More to be added.
33} 33}
34 34
35// ServerInHandle defines the function which runs when a new stream is created 35// ServerInHandle defines the function which runs before a new stream is created
36// on the server side. Note that it is executed in the per-connection I/O goroutine(s) instead 36// on the server side. If it returns a non-nil error, the stream will not be
37// of per-RPC goroutine. Therefore, users should NOT have any blocking/time-consuming 37// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM.
38// work in this handle. Otherwise all the RPCs would slow down. 38// The client will receive an RPC error "code = Unavailable, desc = stream
39// terminated by RST_STREAM with error code: REFUSED_STREAM".
40//
41// It's intended to be used in situations where you don't want to waste the
42// resources to accept the new stream (e.g. rate-limiting). And the content of
43// the error will be ignored and won't be sent back to the client. For other
44// general usages, please use interceptors.
45//
46// Note that it is executed in the per-connection I/O goroutine(s) instead of
47// per-RPC goroutine. Therefore, users should NOT have any
48// blocking/time-consuming work in this handle. Otherwise all the RPCs would
49// slow down. Also, for the same reason, this handle won't be called
50// concurrently by gRPC.
39type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) 51type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error)
diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go
new file mode 100644
index 0000000..bdb5d81
--- /dev/null
+++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go
@@ -0,0 +1,244 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package bufconn provides a net.Conn implemented by a buffer and related
20// dialing and listening functionality.
21package bufconn
22
23import (
24 "fmt"
25 "io"
26 "net"
27 "sync"
28 "time"
29)
30
31// Listener implements a net.Listener that creates local, buffered net.Conns
32// via its Accept and Dial method.
33type Listener struct {
34 mu sync.Mutex
35 sz int
36 ch chan net.Conn
37 done chan struct{}
38}
39
40var errClosed = fmt.Errorf("Closed")
41
42// Listen returns a Listener that can only be contacted by its own Dialers and
43// creates buffered connections between the two.
44func Listen(sz int) *Listener {
45 return &Listener{sz: sz, ch: make(chan net.Conn), done: make(chan struct{})}
46}
47
48// Accept blocks until Dial is called, then returns a net.Conn for the server
49// half of the connection.
50func (l *Listener) Accept() (net.Conn, error) {
51 select {
52 case <-l.done:
53 return nil, errClosed
54 case c := <-l.ch:
55 return c, nil
56 }
57}
58
59// Close stops the listener.
60func (l *Listener) Close() error {
61 l.mu.Lock()
62 defer l.mu.Unlock()
63 select {
64 case <-l.done:
65 // Already closed.
66 break
67 default:
68 close(l.done)
69 }
70 return nil
71}
72
73// Addr reports the address of the listener.
74func (l *Listener) Addr() net.Addr { return addr{} }
75
76// Dial creates an in-memory full-duplex network connection, unblocks Accept by
77// providing it the server half of the connection, and returns the client half
78// of the connection.
79func (l *Listener) Dial() (net.Conn, error) {
80 p1, p2 := newPipe(l.sz), newPipe(l.sz)
81 select {
82 case <-l.done:
83 return nil, errClosed
84 case l.ch <- &conn{p1, p2}:
85 return &conn{p2, p1}, nil
86 }
87}
88
89type pipe struct {
90 mu sync.Mutex
91
92 // buf contains the data in the pipe. It is a ring buffer of fixed capacity,
93 // with r and w pointing to the offset to read and write, respsectively.
94 //
95 // Data is read between [r, w) and written to [w, r), wrapping around the end
96 // of the slice if necessary.
97 //
98 // The buffer is empty if r == len(buf), otherwise if r == w, it is full.
99 //
100 // w and r are always in the range [0, cap(buf)) and [0, len(buf)].
101 buf []byte
102 w, r int
103
104 wwait sync.Cond
105 rwait sync.Cond
106
107 closed bool
108 writeClosed bool
109}
110
111func newPipe(sz int) *pipe {
112 p := &pipe{buf: make([]byte, 0, sz)}
113 p.wwait.L = &p.mu
114 p.rwait.L = &p.mu
115 return p
116}
117
118func (p *pipe) empty() bool {
119 return p.r == len(p.buf)
120}
121
122func (p *pipe) full() bool {
123 return p.r < len(p.buf) && p.r == p.w
124}
125
126func (p *pipe) Read(b []byte) (n int, err error) {
127 p.mu.Lock()
128 defer p.mu.Unlock()
129 // Block until p has data.
130 for {
131 if p.closed {
132 return 0, io.ErrClosedPipe
133 }
134 if !p.empty() {
135 break
136 }
137 if p.writeClosed {
138 return 0, io.EOF
139 }
140 p.rwait.Wait()
141 }
142 wasFull := p.full()
143
144 n = copy(b, p.buf[p.r:len(p.buf)])
145 p.r += n
146 if p.r == cap(p.buf) {
147 p.r = 0
148 p.buf = p.buf[:p.w]
149 }
150
151 // Signal a blocked writer, if any
152 if wasFull {
153 p.wwait.Signal()
154 }
155
156 return n, nil
157}
158
159func (p *pipe) Write(b []byte) (n int, err error) {
160 p.mu.Lock()
161 defer p.mu.Unlock()
162 if p.closed {
163 return 0, io.ErrClosedPipe
164 }
165 for len(b) > 0 {
166 // Block until p is not full.
167 for {
168 if p.closed || p.writeClosed {
169 return 0, io.ErrClosedPipe
170 }
171 if !p.full() {
172 break
173 }
174 p.wwait.Wait()
175 }
176 wasEmpty := p.empty()
177
178 end := cap(p.buf)
179 if p.w < p.r {
180 end = p.r
181 }
182 x := copy(p.buf[p.w:end], b)
183 b = b[x:]
184 n += x
185 p.w += x
186 if p.w > len(p.buf) {
187 p.buf = p.buf[:p.w]
188 }
189 if p.w == cap(p.buf) {
190 p.w = 0
191 }
192
193 // Signal a blocked reader, if any.
194 if wasEmpty {
195 p.rwait.Signal()
196 }
197 }
198 return n, nil
199}
200
201func (p *pipe) Close() error {
202 p.mu.Lock()
203 defer p.mu.Unlock()
204 p.closed = true
205 // Signal all blocked readers and writers to return an error.
206 p.rwait.Broadcast()
207 p.wwait.Broadcast()
208 return nil
209}
210
211func (p *pipe) closeWrite() error {
212 p.mu.Lock()
213 defer p.mu.Unlock()
214 p.writeClosed = true
215 // Signal all blocked readers and writers to return an error.
216 p.rwait.Broadcast()
217 p.wwait.Broadcast()
218 return nil
219}
220
221type conn struct {
222 io.Reader
223 io.Writer
224}
225
226func (c *conn) Close() error {
227 err1 := c.Reader.(*pipe).Close()
228 err2 := c.Writer.(*pipe).closeWrite()
229 if err1 != nil {
230 return err1
231 }
232 return err2
233}
234
235func (*conn) LocalAddr() net.Addr { return addr{} }
236func (*conn) RemoteAddr() net.Addr { return addr{} }
237func (c *conn) SetDeadline(t time.Time) error { return fmt.Errorf("unsupported") }
238func (c *conn) SetReadDeadline(t time.Time) error { return fmt.Errorf("unsupported") }
239func (c *conn) SetWriteDeadline(t time.Time) error { return fmt.Errorf("unsupported") }
240
241type addr struct{}
242
243func (addr) Network() string { return "bufconn" }
244func (addr) String() string { return "bufconn" }
diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
index b419c9e..c1c96de 100644
--- a/vendor/google.golang.org/grpc/trace.go
+++ b/vendor/google.golang.org/grpc/trace.go
@@ -31,7 +31,7 @@ import (
31 31
32// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. 32// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
33// This should only be set before any RPCs are sent or received by this program. 33// This should only be set before any RPCs are sent or received by this program.
34var EnableTracing = true 34var EnableTracing bool
35 35
36// methodFamily returns the trace family for the given method. 36// methodFamily returns the trace family for the given method.
37// It turns "/pkg.Service/GetFoo" into "pkg.Service". 37// It turns "/pkg.Service/GetFoo" into "pkg.Service".
@@ -76,6 +76,15 @@ func (f *firstLine) String() string {
76 return line.String() 76 return line.String()
77} 77}
78 78
79const truncateSize = 100
80
81func truncate(x string, l int) string {
82 if l > len(x) {
83 return x
84 }
85 return x[:l]
86}
87
79// payload represents an RPC request or response payload. 88// payload represents an RPC request or response payload.
80type payload struct { 89type payload struct {
81 sent bool // whether this is an outgoing payload 90 sent bool // whether this is an outgoing payload
@@ -85,9 +94,9 @@ type payload struct {
85 94
86func (p payload) String() string { 95func (p payload) String() string {
87 if p.sent { 96 if p.sent {
88 return fmt.Sprintf("sent: %v", p.msg) 97 return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize)
89 } 98 }
90 return fmt.Sprintf("recv: %v", p.msg) 99 return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize)
91} 100}
92 101
93type fmtStringer struct { 102type fmtStringer struct {
diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go
deleted file mode 100644
index 7cffee1..0000000
--- a/vendor/google.golang.org/grpc/transport/go16.go
+++ /dev/null
@@ -1,45 +0,0 @@
1// +build go1.6,!go1.7
2
3/*
4 *
5 * Copyright 2016 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package transport
22
23import (
24 "net"
25
26 "google.golang.org/grpc/codes"
27
28 "golang.org/x/net/context"
29)
30
31// dialContext connects to the address on the named network.
32func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
33 return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
34}
35
36// ContextErr converts the error from context package into a StreamError.
37func ContextErr(err error) StreamError {
38 switch err {
39 case context.DeadlineExceeded:
40 return streamErrorf(codes.DeadlineExceeded, "%v", err)
41 case context.Canceled:
42 return streamErrorf(codes.Canceled, "%v", err)
43 }
44 return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
45}
diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go
deleted file mode 100644
index 2464e69..0000000
--- a/vendor/google.golang.org/grpc/transport/go17.go
+++ /dev/null
@@ -1,46 +0,0 @@
1// +build go1.7
2
3/*
4 *
5 * Copyright 2016 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package transport
22
23import (
24 "context"
25 "net"
26
27 "google.golang.org/grpc/codes"
28
29 netctx "golang.org/x/net/context"
30)
31
32// dialContext connects to the address on the named network.
33func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
34 return (&net.Dialer{}).DialContext(ctx, network, address)
35}
36
37// ContextErr converts the error from context package into a StreamError.
38func ContextErr(err error) StreamError {
39 switch err {
40 case context.DeadlineExceeded, netctx.DeadlineExceeded:
41 return streamErrorf(codes.DeadlineExceeded, "%v", err)
42 case context.Canceled, netctx.Canceled:
43 return streamErrorf(codes.Canceled, "%v", err)
44 }
45 return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
46}
diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go
deleted file mode 100644
index 516ea06..0000000
--- a/vendor/google.golang.org/grpc/transport/http2_client.go
+++ /dev/null
@@ -1,1369 +0,0 @@
1/*
2 *
3 * Copyright 2014 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package transport
20
21import (
22 "bytes"
23 "io"
24 "math"
25 "net"
26 "strings"
27 "sync"
28 "sync/atomic"
29 "time"
30
31 "golang.org/x/net/context"
32 "golang.org/x/net/http2"
33 "golang.org/x/net/http2/hpack"
34 "google.golang.org/grpc/codes"
35 "google.golang.org/grpc/credentials"
36 "google.golang.org/grpc/keepalive"
37 "google.golang.org/grpc/metadata"
38 "google.golang.org/grpc/peer"
39 "google.golang.org/grpc/stats"
40 "google.golang.org/grpc/status"
41)
42
43// http2Client implements the ClientTransport interface with HTTP2.
44type http2Client struct {
45 ctx context.Context
46 target string // server name/addr
47 userAgent string
48 md interface{}
49 conn net.Conn // underlying communication channel
50 remoteAddr net.Addr
51 localAddr net.Addr
52 authInfo credentials.AuthInfo // auth info about the connection
53 nextID uint32 // the next stream ID to be used
54
55 // writableChan synchronizes write access to the transport.
56 // A writer acquires the write lock by sending a value on writableChan
57 // and releases it by receiving from writableChan.
58 writableChan chan int
59 // shutdownChan is closed when Close is called.
60 // Blocking operations should select on shutdownChan to avoid
61 // blocking forever after Close.
62 // TODO(zhaoq): Maybe have a channel context?
63 shutdownChan chan struct{}
64 // errorChan is closed to notify the I/O error to the caller.
65 errorChan chan struct{}
66 // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
67 // that the server sent GoAway on this transport.
68 goAway chan struct{}
69 // awakenKeepalive is used to wake up keepalive when after it has gone dormant.
70 awakenKeepalive chan struct{}
71
72 framer *framer
73 hBuf *bytes.Buffer // the buffer for HPACK encoding
74 hEnc *hpack.Encoder // HPACK encoder
75
76 // controlBuf delivers all the control related tasks (e.g., window
77 // updates, reset streams, and various settings) to the controller.
78 controlBuf *controlBuffer
79 fc *inFlow
80 // sendQuotaPool provides flow control to outbound message.
81 sendQuotaPool *quotaPool
82 // streamsQuota limits the max number of concurrent streams.
83 streamsQuota *quotaPool
84
85 // The scheme used: https if TLS is on, http otherwise.
86 scheme string
87
88 isSecure bool
89
90 creds []credentials.PerRPCCredentials
91
92 // Boolean to keep track of reading activity on transport.
93 // 1 is true and 0 is false.
94 activity uint32 // Accessed atomically.
95 kp keepalive.ClientParameters
96
97 statsHandler stats.Handler
98
99 initialWindowSize int32
100
101 bdpEst *bdpEstimator
102 outQuotaVersion uint32
103
104 mu sync.Mutex // guard the following variables
105 state transportState // the state of underlying connection
106 activeStreams map[uint32]*Stream
107 // The max number of concurrent streams
108 maxStreams int
109 // the per-stream outbound flow control window size set by the peer.
110 streamSendQuota uint32
111 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
112 prevGoAwayID uint32
113 // goAwayReason records the http2.ErrCode and debug data received with the
114 // GoAway frame.
115 goAwayReason GoAwayReason
116}
117
118func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
119 if fn != nil {
120 return fn(ctx, addr)
121 }
122 return dialContext(ctx, "tcp", addr)
123}
124
125func isTemporary(err error) bool {
126 switch err {
127 case io.EOF:
128 // Connection closures may be resolved upon retry, and are thus
129 // treated as temporary.
130 return true
131 case context.DeadlineExceeded:
132 // In Go 1.7, context.DeadlineExceeded implements Timeout(), and this
133 // special case is not needed. Until then, we need to keep this
134 // clause.
135 return true
136 }
137
138 switch err := err.(type) {
139 case interface {
140 Temporary() bool
141 }:
142 return err.Temporary()
143 case interface {
144 Timeout() bool
145 }:
146 // Timeouts may be resolved upon retry, and are thus treated as
147 // temporary.
148 return err.Timeout()
149 }
150 return false
151}
152
153// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
154// and starts to receive messages on it. Non-nil error returns if construction
155// fails.
156func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (_ ClientTransport, err error) {
157 scheme := "http"
158 conn, err := dial(ctx, opts.Dialer, addr.Addr)
159 if err != nil {
160 if opts.FailOnNonTempDialError {
161 return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
162 }
163 return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
164 }
165 // Any further errors will close the underlying connection
166 defer func(conn net.Conn) {
167 if err != nil {
168 conn.Close()
169 }
170 }(conn)
171 var (
172 isSecure bool
173 authInfo credentials.AuthInfo
174 )
175 if creds := opts.TransportCredentials; creds != nil {
176 scheme = "https"
177 conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn)
178 if err != nil {
179 // Credentials handshake errors are typically considered permanent
180 // to avoid retrying on e.g. bad certificates.
181 temp := isTemporary(err)
182 return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err)
183 }
184 isSecure = true
185 }
186 kp := opts.KeepaliveParams
187 // Validate keepalive parameters.
188 if kp.Time == 0 {
189 kp.Time = defaultClientKeepaliveTime
190 }
191 if kp.Timeout == 0 {
192 kp.Timeout = defaultClientKeepaliveTimeout
193 }
194 dynamicWindow := true
195 icwz := int32(initialWindowSize)
196 if opts.InitialConnWindowSize >= defaultWindowSize {
197 icwz = opts.InitialConnWindowSize
198 dynamicWindow = false
199 }
200 var buf bytes.Buffer
201 t := &http2Client{
202 ctx: ctx,
203 target: addr.Addr,
204 userAgent: opts.UserAgent,
205 md: addr.Metadata,
206 conn: conn,
207 remoteAddr: conn.RemoteAddr(),
208 localAddr: conn.LocalAddr(),
209 authInfo: authInfo,
210 // The client initiated stream id is odd starting from 1.
211 nextID: 1,
212 writableChan: make(chan int, 1),
213 shutdownChan: make(chan struct{}),
214 errorChan: make(chan struct{}),
215 goAway: make(chan struct{}),
216 awakenKeepalive: make(chan struct{}, 1),
217 framer: newFramer(conn),
218 hBuf: &buf,
219 hEnc: hpack.NewEncoder(&buf),
220 controlBuf: newControlBuffer(),
221 fc: &inFlow{limit: uint32(icwz)},
222 sendQuotaPool: newQuotaPool(defaultWindowSize),
223 scheme: scheme,
224 state: reachable,
225 activeStreams: make(map[uint32]*Stream),
226 isSecure: isSecure,
227 creds: opts.PerRPCCredentials,
228 maxStreams: defaultMaxStreamsClient,
229 streamsQuota: newQuotaPool(defaultMaxStreamsClient),
230 streamSendQuota: defaultWindowSize,
231 kp: kp,
232 statsHandler: opts.StatsHandler,
233 initialWindowSize: initialWindowSize,
234 }
235 if opts.InitialWindowSize >= defaultWindowSize {
236 t.initialWindowSize = opts.InitialWindowSize
237 dynamicWindow = false
238 }
239 if dynamicWindow {
240 t.bdpEst = &bdpEstimator{
241 bdp: initialWindowSize,
242 updateFlowControl: t.updateFlowControl,
243 }
244 }
245 // Make sure awakenKeepalive can't be written upon.
246 // keepalive routine will make it writable, if need be.
247 t.awakenKeepalive <- struct{}{}
248 if t.statsHandler != nil {
249 t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
250 RemoteAddr: t.remoteAddr,
251 LocalAddr: t.localAddr,
252 })
253 connBegin := &stats.ConnBegin{
254 Client: true,
255 }
256 t.statsHandler.HandleConn(t.ctx, connBegin)
257 }
258 // Start the reader goroutine for incoming message. Each transport has
259 // a dedicated goroutine which reads HTTP2 frame from network. Then it
260 // dispatches the frame to the corresponding stream entity.
261 go t.reader()
262 // Send connection preface to server.
263 n, err := t.conn.Write(clientPreface)
264 if err != nil {
265 t.Close()
266 return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
267 }
268 if n != len(clientPreface) {
269 t.Close()
270 return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
271 }
272 if t.initialWindowSize != defaultWindowSize {
273 err = t.framer.writeSettings(true, http2.Setting{
274 ID: http2.SettingInitialWindowSize,
275 Val: uint32(t.initialWindowSize),
276 })
277 } else {
278 err = t.framer.writeSettings(true)
279 }
280 if err != nil {
281 t.Close()
282 return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
283 }
284 // Adjust the connection flow control window if needed.
285 if delta := uint32(icwz - defaultWindowSize); delta > 0 {
286 if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil {
287 t.Close()
288 return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
289 }
290 }
291 go t.controller()
292 if t.kp.Time != infinity {
293 go t.keepalive()
294 }
295 t.writableChan <- 0
296 return t, nil
297}
298
299func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
300 // TODO(zhaoq): Handle uint32 overflow of Stream.id.
301 s := &Stream{
302 id: t.nextID,
303 done: make(chan struct{}),
304 goAway: make(chan struct{}),
305 method: callHdr.Method,
306 sendCompress: callHdr.SendCompress,
307 buf: newRecvBuffer(),
308 fc: &inFlow{limit: uint32(t.initialWindowSize)},
309 sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
310 headerChan: make(chan struct{}),
311 }
312 t.nextID += 2
313 s.requestRead = func(n int) {
314 t.adjustWindow(s, uint32(n))
315 }
316 // The client side stream context should have exactly the same life cycle with the user provided context.
317 // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
318 // So we use the original context here instead of creating a copy.
319 s.ctx = ctx
320 s.trReader = &transportReader{
321 reader: &recvBufferReader{
322 ctx: s.ctx,
323 goAway: s.goAway,
324 recv: s.buf,
325 },
326 windowHandler: func(n int) {
327 t.updateWindow(s, uint32(n))
328 },
329 }
330
331 return s
332}
333
334// NewStream creates a stream and registers it into the transport as "active"
335// streams.
336func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
337 pr := &peer.Peer{
338 Addr: t.remoteAddr,
339 }
340 // Attach Auth info if there is any.
341 if t.authInfo != nil {
342 pr.AuthInfo = t.authInfo
343 }
344 ctx = peer.NewContext(ctx, pr)
345 var (
346 authData = make(map[string]string)
347 audience string
348 )
349 // Create an audience string only if needed.
350 if len(t.creds) > 0 || callHdr.Creds != nil {
351 // Construct URI required to get auth request metadata.
352 var port string
353 if pos := strings.LastIndex(t.target, ":"); pos != -1 {
354 // Omit port if it is the default one.
355 if t.target[pos+1:] != "443" {
356 port = ":" + t.target[pos+1:]
357 }
358 }
359 pos := strings.LastIndex(callHdr.Method, "/")
360 if pos == -1 {
361 pos = len(callHdr.Method)
362 }
363 audience = "https://" + callHdr.Host + port + callHdr.Method[:pos]
364 }
365 for _, c := range t.creds {
366 data, err := c.GetRequestMetadata(ctx, audience)
367 if err != nil {
368 return nil, streamErrorf(codes.Internal, "transport: %v", err)
369 }
370 for k, v := range data {
371 // Capital header names are illegal in HTTP/2.
372 k = strings.ToLower(k)
373 authData[k] = v
374 }
375 }
376 callAuthData := make(map[string]string)
377 // Check if credentials.PerRPCCredentials were provided via call options.
378 // Note: if these credentials are provided both via dial options and call
379 // options, then both sets of credentials will be applied.
380 if callCreds := callHdr.Creds; callCreds != nil {
381 if !t.isSecure && callCreds.RequireTransportSecurity() {
382 return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure conneciton")
383 }
384 data, err := callCreds.GetRequestMetadata(ctx, audience)
385 if err != nil {
386 return nil, streamErrorf(codes.Internal, "transport: %v", err)
387 }
388 for k, v := range data {
389 // Capital header names are illegal in HTTP/2
390 k = strings.ToLower(k)
391 callAuthData[k] = v
392 }
393 }
394 t.mu.Lock()
395 if t.activeStreams == nil {
396 t.mu.Unlock()
397 return nil, ErrConnClosing
398 }
399 if t.state == draining {
400 t.mu.Unlock()
401 return nil, ErrStreamDrain
402 }
403 if t.state != reachable {
404 t.mu.Unlock()
405 return nil, ErrConnClosing
406 }
407 t.mu.Unlock()
408 sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire())
409 if err != nil {
410 return nil, err
411 }
412 // Returns the quota balance back.
413 if sq > 1 {
414 t.streamsQuota.add(sq - 1)
415 }
416 if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
417 // Return the quota back now because there is no stream returned to the caller.
418 if _, ok := err.(StreamError); ok {
419 t.streamsQuota.add(1)
420 }
421 return nil, err
422 }
423 t.mu.Lock()
424 if t.state == draining {
425 t.mu.Unlock()
426 t.streamsQuota.add(1)
427 // Need to make t writable again so that the rpc in flight can still proceed.
428 t.writableChan <- 0
429 return nil, ErrStreamDrain
430 }
431 if t.state != reachable {
432 t.mu.Unlock()
433 return nil, ErrConnClosing
434 }
435 s := t.newStream(ctx, callHdr)
436 t.activeStreams[s.id] = s
437 // If the number of active streams change from 0 to 1, then check if keepalive
438 // has gone dormant. If so, wake it up.
439 if len(t.activeStreams) == 1 {
440 select {
441 case t.awakenKeepalive <- struct{}{}:
442 t.framer.writePing(false, false, [8]byte{})
443 default:
444 }
445 }
446
447 t.mu.Unlock()
448
449 // HPACK encodes various headers. Note that once WriteField(...) is
450 // called, the corresponding headers/continuation frame has to be sent
451 // because hpack.Encoder is stateful.
452 t.hBuf.Reset()
453 t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"})
454 t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme})
455 t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method})
456 t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
457 t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
458 t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
459 t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"})
460
461 if callHdr.SendCompress != "" {
462 t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
463 }
464 if dl, ok := ctx.Deadline(); ok {
465 // Send out timeout regardless its value. The server can detect timeout context by itself.
466 timeout := dl.Sub(time.Now())
467 t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
468 }
469
470 for k, v := range authData {
471 t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
472 }
473 for k, v := range callAuthData {
474 t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
475 }
476 var (
477 endHeaders bool
478 )
479 if md, ok := metadata.FromOutgoingContext(ctx); ok {
480 for k, vv := range md {
481 // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
482 if isReservedHeader(k) {
483 continue
484 }
485 for _, v := range vv {
486 t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
487 }
488 }
489 }
490 if md, ok := t.md.(*metadata.MD); ok {
491 for k, vv := range *md {
492 if isReservedHeader(k) {
493 continue
494 }
495 for _, v := range vv {
496 t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
497 }
498 }
499 }
500 first := true
501 bufLen := t.hBuf.Len()
502 // Sends the headers in a single batch even when they span multiple frames.
503 for !endHeaders {
504 size := t.hBuf.Len()
505 if size > http2MaxFrameLen {
506 size = http2MaxFrameLen
507 } else {
508 endHeaders = true
509 }
510 var flush bool
511 if callHdr.Flush && endHeaders {
512 flush = true
513 }
514 if first {
515 // Sends a HeadersFrame to server to start a new stream.
516 p := http2.HeadersFrameParam{
517 StreamID: s.id,
518 BlockFragment: t.hBuf.Next(size),
519 EndStream: false,
520 EndHeaders: endHeaders,
521 }
522 // Do a force flush for the buffered frames iff it is the last headers frame
523 // and there is header metadata to be sent. Otherwise, there is flushing until
524 // the corresponding data frame is written.
525 err = t.framer.writeHeaders(flush, p)
526 first = false
527 } else {
528 // Sends Continuation frames for the leftover headers.
529 err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size))
530 }
531 if err != nil {
532 t.notifyError(err)
533 return nil, connectionErrorf(true, err, "transport: %v", err)
534 }
535 }
536 s.mu.Lock()
537 s.bytesSent = true
538 s.mu.Unlock()
539
540 if t.statsHandler != nil {
541 outHeader := &stats.OutHeader{
542 Client: true,
543 WireLength: bufLen,
544 FullMethod: callHdr.Method,
545 RemoteAddr: t.remoteAddr,
546 LocalAddr: t.localAddr,
547 Compression: callHdr.SendCompress,
548 }
549 t.statsHandler.HandleRPC(s.ctx, outHeader)
550 }
551 t.writableChan <- 0
552 return s, nil
553}
554
555// CloseStream clears the footprint of a stream when the stream is not needed any more.
556// This must not be executed in reader's goroutine.
557func (t *http2Client) CloseStream(s *Stream, err error) {
558 t.mu.Lock()
559 if t.activeStreams == nil {
560 t.mu.Unlock()
561 return
562 }
563 if err != nil {
564 // notify in-flight streams, before the deletion
565 s.write(recvMsg{err: err})
566 }
567 delete(t.activeStreams, s.id)
568 if t.state == draining && len(t.activeStreams) == 0 {
569 // The transport is draining and s is the last live stream on t.
570 t.mu.Unlock()
571 t.Close()
572 return
573 }
574 t.mu.Unlock()
575 // rstStream is true in case the stream is being closed at the client-side
576 // and the server needs to be intimated about it by sending a RST_STREAM
577 // frame.
578 // To make sure this frame is written to the wire before the headers of the
579 // next stream waiting for streamsQuota, we add to streamsQuota pool only
580 // after having acquired the writableChan to send RST_STREAM out (look at
581 // the controller() routine).
582 var rstStream bool
583 var rstError http2.ErrCode
584 defer func() {
585 // In case, the client doesn't have to send RST_STREAM to server
586 // we can safely add back to streamsQuota pool now.
587 if !rstStream {
588 t.streamsQuota.add(1)
589 return
590 }
591 t.controlBuf.put(&resetStream{s.id, rstError})
592 }()
593 s.mu.Lock()
594 rstStream = s.rstStream
595 rstError = s.rstError
596 if s.state == streamDone {
597 s.mu.Unlock()
598 return
599 }
600 if !s.headerDone {
601 close(s.headerChan)
602 s.headerDone = true
603 }
604 s.state = streamDone
605 s.mu.Unlock()
606 if _, ok := err.(StreamError); ok {
607 rstStream = true
608 rstError = http2.ErrCodeCancel
609 }
610}
611
612// Close kicks off the shutdown process of the transport. This should be called
613// only once on a transport. Once it is called, the transport should not be
614// accessed any more.
615func (t *http2Client) Close() (err error) {
616 t.mu.Lock()
617 if t.state == closing {
618 t.mu.Unlock()
619 return
620 }
621 if t.state == reachable || t.state == draining {
622 close(t.errorChan)
623 }
624 t.state = closing
625 t.mu.Unlock()
626 close(t.shutdownChan)
627 err = t.conn.Close()
628 t.mu.Lock()
629 streams := t.activeStreams
630 t.activeStreams = nil
631 t.mu.Unlock()
632 // Notify all active streams.
633 for _, s := range streams {
634 s.mu.Lock()
635 if !s.headerDone {
636 close(s.headerChan)
637 s.headerDone = true
638 }
639 s.mu.Unlock()
640 s.write(recvMsg{err: ErrConnClosing})
641 }
642 if t.statsHandler != nil {
643 connEnd := &stats.ConnEnd{
644 Client: true,
645 }
646 t.statsHandler.HandleConn(t.ctx, connEnd)
647 }
648 return
649}
650
651func (t *http2Client) GracefulClose() error {
652 t.mu.Lock()
653 switch t.state {
654 case unreachable:
655 // The server may close the connection concurrently. t is not available for
656 // any streams. Close it now.
657 t.mu.Unlock()
658 t.Close()
659 return nil
660 case closing:
661 t.mu.Unlock()
662 return nil
663 }
664 if t.state == draining {
665 t.mu.Unlock()
666 return nil
667 }
668 t.state = draining
669 active := len(t.activeStreams)
670 t.mu.Unlock()
671 if active == 0 {
672 return t.Close()
673 }
674 return nil
675}
676
677// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
678// should proceed only if Write returns nil.
679// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later
680// if it improves the performance.
681func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
682 r := bytes.NewBuffer(data)
683 var (
684 p []byte
685 oqv uint32
686 )
687 for {
688 oqv = atomic.LoadUint32(&t.outQuotaVersion)
689 if r.Len() > 0 || p != nil {
690 size := http2MaxFrameLen
691 // Wait until the stream has some quota to send the data.
692 sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire())
693 if err != nil {
694 return err
695 }
696 // Wait until the transport has some quota to send the data.
697 tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire())
698 if err != nil {
699 return err
700 }
701 if sq < size {
702 size = sq
703 }
704 if tq < size {
705 size = tq
706 }
707 if p == nil {
708 p = r.Next(size)
709 }
710 ps := len(p)
711 if ps < sq {
712 // Overbooked stream quota. Return it back.
713 s.sendQuotaPool.add(sq - ps)
714 }
715 if ps < tq {
716 // Overbooked transport quota. Return it back.
717 t.sendQuotaPool.add(tq - ps)
718 }
719 }
720 var (
721 endStream bool
722 forceFlush bool
723 )
724 if opts.Last && r.Len() == 0 {
725 endStream = true
726 }
727 // Indicate there is a writer who is about to write a data frame.
728 t.framer.adjustNumWriters(1)
729 // Got some quota. Try to acquire writing privilege on the transport.
730 if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil {
731 if _, ok := err.(StreamError); ok || err == io.EOF {
732 // Return the connection quota back.
733 t.sendQuotaPool.add(len(p))
734 }
735 if t.framer.adjustNumWriters(-1) == 0 {
736 // This writer is the last one in this batch and has the
737 // responsibility to flush the buffered frames. It queues
738 // a flush request to controlBuf instead of flushing directly
739 // in order to avoid the race with other writing or flushing.
740 t.controlBuf.put(&flushIO{})
741 }
742 return err
743 }
744 select {
745 case <-s.ctx.Done():
746 t.sendQuotaPool.add(len(p))
747 if t.framer.adjustNumWriters(-1) == 0 {
748 t.controlBuf.put(&flushIO{})
749 }
750 t.writableChan <- 0
751 return ContextErr(s.ctx.Err())
752 default:
753 }
754 if oqv != atomic.LoadUint32(&t.outQuotaVersion) {
755 // InitialWindowSize settings frame must have been received after we
756 // acquired send quota but before we got the writable channel.
757 // We must forsake this write.
758 t.sendQuotaPool.add(len(p))
759 s.sendQuotaPool.add(len(p))
760 if t.framer.adjustNumWriters(-1) == 0 {
761 t.controlBuf.put(&flushIO{})
762 }
763 t.writableChan <- 0
764 continue
765 }
766 if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 {
767 // Do a force flush iff this is last frame for the entire gRPC message
768 // and the caller is the only writer at this moment.
769 forceFlush = true
770 }
771 // If WriteData fails, all the pending streams will be handled
772 // by http2Client.Close(). No explicit CloseStream() needs to be
773 // invoked.
774 if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil {
775 t.notifyError(err)
776 return connectionErrorf(true, err, "transport: %v", err)
777 }
778 p = nil
779 if t.framer.adjustNumWriters(-1) == 0 {
780 t.framer.flushWrite()
781 }
782 t.writableChan <- 0
783 if r.Len() == 0 {
784 break
785 }
786 }
787 if !opts.Last {
788 return nil
789 }
790 s.mu.Lock()
791 if s.state != streamDone {
792 s.state = streamWriteDone
793 }
794 s.mu.Unlock()
795 return nil
796}
797
798func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
799 t.mu.Lock()
800 defer t.mu.Unlock()
801 s, ok := t.activeStreams[f.Header().StreamID]
802 return s, ok
803}
804
805// adjustWindow sends out extra window update over the initial window size
806// of stream if the application is requesting data larger in size than
807// the window.
808func (t *http2Client) adjustWindow(s *Stream, n uint32) {
809 s.mu.Lock()
810 defer s.mu.Unlock()
811 if s.state == streamDone {
812 return
813 }
814 if w := s.fc.maybeAdjust(n); w > 0 {
815 // Piggyback conneciton's window update along.
816 if cw := t.fc.resetPendingUpdate(); cw > 0 {
817 t.controlBuf.put(&windowUpdate{0, cw, false})
818 }
819 t.controlBuf.put(&windowUpdate{s.id, w, true})
820 }
821}
822
823// updateWindow adjusts the inbound quota for the stream and the transport.
824// Window updates will deliver to the controller for sending when
825// the cumulative quota exceeds the corresponding threshold.
826func (t *http2Client) updateWindow(s *Stream, n uint32) {
827 s.mu.Lock()
828 defer s.mu.Unlock()
829 if s.state == streamDone {
830 return
831 }
832 if w := s.fc.onRead(n); w > 0 {
833 if cw := t.fc.resetPendingUpdate(); cw > 0 {
834 t.controlBuf.put(&windowUpdate{0, cw, false})
835 }
836 t.controlBuf.put(&windowUpdate{s.id, w, true})
837 }
838}
839
840// updateFlowControl updates the incoming flow control windows
841// for the transport and the stream based on the current bdp
842// estimation.
843func (t *http2Client) updateFlowControl(n uint32) {
844 t.mu.Lock()
845 for _, s := range t.activeStreams {
846 s.fc.newLimit(n)
847 }
848 t.initialWindowSize = int32(n)
849 t.mu.Unlock()
850 t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n), false})
851 t.controlBuf.put(&settings{
852 ack: false,
853 ss: []http2.Setting{
854 {
855 ID: http2.SettingInitialWindowSize,
856 Val: uint32(n),
857 },
858 },
859 })
860}
861
862func (t *http2Client) handleData(f *http2.DataFrame) {
863 size := f.Header().Length
864 var sendBDPPing bool
865 if t.bdpEst != nil {
866 sendBDPPing = t.bdpEst.add(uint32(size))
867 }
868 // Decouple connection's flow control from application's read.
869 // An update on connection's flow control should not depend on
870 // whether user application has read the data or not. Such a
871 // restriction is already imposed on the stream's flow control,
872 // and therefore the sender will be blocked anyways.
873 // Decoupling the connection flow control will prevent other
874 // active(fast) streams from starving in presence of slow or
875 // inactive streams.
876 //
877 // Furthermore, if a bdpPing is being sent out we can piggyback
878 // connection's window update for the bytes we just received.
879 if sendBDPPing {
880 t.controlBuf.put(&windowUpdate{0, uint32(size), false})
881 t.controlBuf.put(bdpPing)
882 } else {
883 if err := t.fc.onData(uint32(size)); err != nil {
884 t.notifyError(connectionErrorf(true, err, "%v", err))
885 return
886 }
887 if w := t.fc.onRead(uint32(size)); w > 0 {
888 t.controlBuf.put(&windowUpdate{0, w, true})
889 }
890 }
891 // Select the right stream to dispatch.
892 s, ok := t.getStream(f)
893 if !ok {
894 return
895 }
896 if size > 0 {
897 s.mu.Lock()
898 if s.state == streamDone {
899 s.mu.Unlock()
900 return
901 }
902 if err := s.fc.onData(uint32(size)); err != nil {
903 s.rstStream = true
904 s.rstError = http2.ErrCodeFlowControl
905 s.finish(status.New(codes.Internal, err.Error()))
906 s.mu.Unlock()
907 s.write(recvMsg{err: io.EOF})
908 return
909 }
910 if f.Header().Flags.Has(http2.FlagDataPadded) {
911 if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
912 t.controlBuf.put(&windowUpdate{s.id, w, true})
913 }
914 }
915 s.mu.Unlock()
916 // TODO(bradfitz, zhaoq): A copy is required here because there is no
917 // guarantee f.Data() is consumed before the arrival of next frame.
918 // Can this copy be eliminated?
919 if len(f.Data()) > 0 {
920 data := make([]byte, len(f.Data()))
921 copy(data, f.Data())
922 s.write(recvMsg{data: data})
923 }
924 }
925 // The server has closed the stream without sending trailers. Record that
926 // the read direction is closed, and set the status appropriately.
927 if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
928 s.mu.Lock()
929 if s.state == streamDone {
930 s.mu.Unlock()
931 return
932 }
933 s.finish(status.New(codes.Internal, "server closed the stream without sending trailers"))
934 s.mu.Unlock()
935 s.write(recvMsg{err: io.EOF})
936 }
937}
938
939func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
940 s, ok := t.getStream(f)
941 if !ok {
942 return
943 }
944 s.mu.Lock()
945 if s.state == streamDone {
946 s.mu.Unlock()
947 return
948 }
949 if !s.headerDone {
950 close(s.headerChan)
951 s.headerDone = true
952 }
953 statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)]
954 if !ok {
955 warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
956 statusCode = codes.Unknown
957 }
958 s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %d", f.ErrCode))
959 s.mu.Unlock()
960 s.write(recvMsg{err: io.EOF})
961}
962
963func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
964 if f.IsAck() {
965 return
966 }
967 var ss []http2.Setting
968 f.ForeachSetting(func(s http2.Setting) error {
969 ss = append(ss, s)
970 return nil
971 })
972 // The settings will be applied once the ack is sent.
973 t.controlBuf.put(&settings{ack: true, ss: ss})
974}
975
976func (t *http2Client) handlePing(f *http2.PingFrame) {
977 if f.IsAck() {
978 // Maybe it's a BDP ping.
979 if t.bdpEst != nil {
980 t.bdpEst.calculate(f.Data)
981 }
982 return
983 }
984 pingAck := &ping{ack: true}
985 copy(pingAck.data[:], f.Data[:])
986 t.controlBuf.put(pingAck)
987}
988
989func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
990 t.mu.Lock()
991 if t.state != reachable && t.state != draining {
992 t.mu.Unlock()
993 return
994 }
995 if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
996 infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
997 }
998 id := f.LastStreamID
999 if id > 0 && id%2 != 1 {
1000 t.mu.Unlock()
1001 t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID))
1002 return
1003 }
1004 // A client can recieve multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387).
1005 // The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay
1006 // with the ID of the last stream the server will process.
1007 // Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we
1008 // close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server
1009 // was being sent don't get killed.
1010 select {
1011 case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
1012 // If there are multiple GoAways the first one should always have an ID greater than the following ones.
1013 if id > t.prevGoAwayID {
1014 t.mu.Unlock()
1015 t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID))
1016 return
1017 }
1018 default:
1019 t.setGoAwayReason(f)
1020 close(t.goAway)
1021 t.state = draining
1022 }
1023 // All streams with IDs greater than the GoAwayId
1024 // and smaller than the previous GoAway ID should be killed.
1025 upperLimit := t.prevGoAwayID
1026 if upperLimit == 0 { // This is the first GoAway Frame.
1027 upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
1028 }
1029 for streamID, stream := range t.activeStreams {
1030 if streamID > id && streamID <= upperLimit {
1031 close(stream.goAway)
1032 }
1033 }
1034 t.prevGoAwayID = id
1035 active := len(t.activeStreams)
1036 t.mu.Unlock()
1037 if active == 0 {
1038 t.Close()
1039 }
1040}
1041
1042// setGoAwayReason sets the value of t.goAwayReason based
1043// on the GoAway frame received.
1044// It expects a lock on transport's mutext to be held by
1045// the caller.
1046func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
1047 t.goAwayReason = NoReason
1048 switch f.ErrCode {
1049 case http2.ErrCodeEnhanceYourCalm:
1050 if string(f.DebugData()) == "too_many_pings" {
1051 t.goAwayReason = TooManyPings
1052 }
1053 }
1054}
1055
1056func (t *http2Client) GetGoAwayReason() GoAwayReason {
1057 t.mu.Lock()
1058 defer t.mu.Unlock()
1059 return t.goAwayReason
1060}
1061
1062func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
1063 id := f.Header().StreamID
1064 incr := f.Increment
1065 if id == 0 {
1066 t.sendQuotaPool.add(int(incr))
1067 return
1068 }
1069 if s, ok := t.getStream(f); ok {
1070 s.sendQuotaPool.add(int(incr))
1071 }
1072}
1073
1074// operateHeaders takes action on the decoded headers.
1075func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
1076 s, ok := t.getStream(frame)
1077 if !ok {
1078 return
1079 }
1080 s.mu.Lock()
1081 s.bytesReceived = true
1082 s.mu.Unlock()
1083 var state decodeState
1084 if err := state.decodeResponseHeader(frame); err != nil {
1085 s.mu.Lock()
1086 if !s.headerDone {
1087 close(s.headerChan)
1088 s.headerDone = true
1089 }
1090 s.mu.Unlock()
1091 s.write(recvMsg{err: err})
1092 // Something wrong. Stops reading even when there is remaining.
1093 return
1094 }
1095
1096 endStream := frame.StreamEnded()
1097 var isHeader bool
1098 defer func() {
1099 if t.statsHandler != nil {
1100 if isHeader {
1101 inHeader := &stats.InHeader{
1102 Client: true,
1103 WireLength: int(frame.Header().Length),
1104 }
1105 t.statsHandler.HandleRPC(s.ctx, inHeader)
1106 } else {
1107 inTrailer := &stats.InTrailer{
1108 Client: true,
1109 WireLength: int(frame.Header().Length),
1110 }
1111 t.statsHandler.HandleRPC(s.ctx, inTrailer)
1112 }
1113 }
1114 }()
1115
1116 s.mu.Lock()
1117 if !endStream {
1118 s.recvCompress = state.encoding
1119 }
1120 if !s.headerDone {
1121 if !endStream && len(state.mdata) > 0 {
1122 s.header = state.mdata
1123 }
1124 close(s.headerChan)
1125 s.headerDone = true
1126 isHeader = true
1127 }
1128 if !endStream || s.state == streamDone {
1129 s.mu.Unlock()
1130 return
1131 }
1132
1133 if len(state.mdata) > 0 {
1134 s.trailer = state.mdata
1135 }
1136 s.finish(state.status())
1137 s.mu.Unlock()
1138 s.write(recvMsg{err: io.EOF})
1139}
1140
1141func handleMalformedHTTP2(s *Stream, err error) {
1142 s.mu.Lock()
1143 if !s.headerDone {
1144 close(s.headerChan)
1145 s.headerDone = true
1146 }
1147 s.mu.Unlock()
1148 s.write(recvMsg{err: err})
1149}
1150
1151// reader runs as a separate goroutine in charge of reading data from network
1152// connection.
1153//
1154// TODO(zhaoq): currently one reader per transport. Investigate whether this is
1155// optimal.
1156// TODO(zhaoq): Check the validity of the incoming frame sequence.
1157func (t *http2Client) reader() {
1158 // Check the validity of server preface.
1159 frame, err := t.framer.readFrame()
1160 if err != nil {
1161 t.notifyError(err)
1162 return
1163 }
1164 atomic.CompareAndSwapUint32(&t.activity, 0, 1)
1165 sf, ok := frame.(*http2.SettingsFrame)
1166 if !ok {
1167 t.notifyError(err)
1168 return
1169 }
1170 t.handleSettings(sf)
1171
1172 // loop to keep reading incoming messages on this transport.
1173 for {
1174 frame, err := t.framer.readFrame()
1175 atomic.CompareAndSwapUint32(&t.activity, 0, 1)
1176 if err != nil {
1177 // Abort an active stream if the http2.Framer returns a
1178 // http2.StreamError. This can happen only if the server's response
1179 // is malformed http2.
1180 if se, ok := err.(http2.StreamError); ok {
1181 t.mu.Lock()
1182 s := t.activeStreams[se.StreamID]
1183 t.mu.Unlock()
1184 if s != nil {
1185 // use error detail to provide better err message
1186 handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail()))
1187 }
1188 continue
1189 } else {
1190 // Transport error.
1191 t.notifyError(err)
1192 return
1193 }
1194 }
1195 switch frame := frame.(type) {
1196 case *http2.MetaHeadersFrame:
1197 t.operateHeaders(frame)
1198 case *http2.DataFrame:
1199 t.handleData(frame)
1200 case *http2.RSTStreamFrame:
1201 t.handleRSTStream(frame)
1202 case *http2.SettingsFrame:
1203 t.handleSettings(frame)
1204 case *http2.PingFrame:
1205 t.handlePing(frame)
1206 case *http2.GoAwayFrame:
1207 t.handleGoAway(frame)
1208 case *http2.WindowUpdateFrame:
1209 t.handleWindowUpdate(frame)
1210 default:
1211 errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
1212 }
1213 }
1214}
1215
1216func (t *http2Client) applySettings(ss []http2.Setting) {
1217 for _, s := range ss {
1218 switch s.ID {
1219 case http2.SettingMaxConcurrentStreams:
1220 // TODO(zhaoq): This is a hack to avoid significant refactoring of the
1221 // code to deal with the unrealistic int32 overflow. Probably will try
1222 // to find a better way to handle this later.
1223 if s.Val > math.MaxInt32 {
1224 s.Val = math.MaxInt32
1225 }
1226 t.mu.Lock()
1227 ms := t.maxStreams
1228 t.maxStreams = int(s.Val)
1229 t.mu.Unlock()
1230 t.streamsQuota.add(int(s.Val) - ms)
1231 case http2.SettingInitialWindowSize:
1232 t.mu.Lock()
1233 for _, stream := range t.activeStreams {
1234 // Adjust the sending quota for each stream.
1235 stream.sendQuotaPool.add(int(s.Val) - int(t.streamSendQuota))
1236 }
1237 t.streamSendQuota = s.Val
1238 t.mu.Unlock()
1239 atomic.AddUint32(&t.outQuotaVersion, 1)
1240 }
1241 }
1242}
1243
1244// controller running in a separate goroutine takes charge of sending control
1245// frames (e.g., window update, reset stream, setting, etc.) to the server.
1246func (t *http2Client) controller() {
1247 for {
1248 select {
1249 case i := <-t.controlBuf.get():
1250 t.controlBuf.load()
1251 select {
1252 case <-t.writableChan:
1253 switch i := i.(type) {
1254 case *windowUpdate:
1255 t.framer.writeWindowUpdate(i.flush, i.streamID, i.increment)
1256 case *settings:
1257 if i.ack {
1258 t.framer.writeSettingsAck(true)
1259 t.applySettings(i.ss)
1260 } else {
1261 t.framer.writeSettings(true, i.ss...)
1262 }
1263 case *resetStream:
1264 // If the server needs to be to intimated about stream closing,
1265 // then we need to make sure the RST_STREAM frame is written to
1266 // the wire before the headers of the next stream waiting on
1267 // streamQuota. We ensure this by adding to the streamsQuota pool
1268 // only after having acquired the writableChan to send RST_STREAM.
1269 t.streamsQuota.add(1)
1270 t.framer.writeRSTStream(true, i.streamID, i.code)
1271 case *flushIO:
1272 t.framer.flushWrite()
1273 case *ping:
1274 if !i.ack {
1275 t.bdpEst.timesnap(i.data)
1276 }
1277 t.framer.writePing(true, i.ack, i.data)
1278 default:
1279 errorf("transport: http2Client.controller got unexpected item type %v\n", i)
1280 }
1281 t.writableChan <- 0
1282 continue
1283 case <-t.shutdownChan:
1284 return
1285 }
1286 case <-t.shutdownChan:
1287 return
1288 }
1289 }
1290}
1291
1292// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
1293func (t *http2Client) keepalive() {
1294 p := &ping{data: [8]byte{}}
1295 timer := time.NewTimer(t.kp.Time)
1296 for {
1297 select {
1298 case <-timer.C:
1299 if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
1300 timer.Reset(t.kp.Time)
1301 continue
1302 }
1303 // Check if keepalive should go dormant.
1304 t.mu.Lock()
1305 if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
1306 // Make awakenKeepalive writable.
1307 <-t.awakenKeepalive
1308 t.mu.Unlock()
1309 select {
1310 case <-t.awakenKeepalive:
1311 // If the control gets here a ping has been sent
1312 // need to reset the timer with keepalive.Timeout.
1313 case <-t.shutdownChan:
1314 return
1315 }
1316 } else {
1317 t.mu.Unlock()
1318 // Send ping.
1319 t.controlBuf.put(p)
1320 }
1321
1322 // By the time control gets here a ping has been sent one way or the other.
1323 timer.Reset(t.kp.Timeout)
1324 select {
1325 case <-timer.C:
1326 if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
1327 timer.Reset(t.kp.Time)
1328 continue
1329 }
1330 t.Close()
1331 return
1332 case <-t.shutdownChan:
1333 if !timer.Stop() {
1334 <-timer.C
1335 }
1336 return
1337 }
1338 case <-t.shutdownChan:
1339 if !timer.Stop() {
1340 <-timer.C
1341 }
1342 return
1343 }
1344 }
1345}
1346
1347func (t *http2Client) Error() <-chan struct{} {
1348 return t.errorChan
1349}
1350
1351func (t *http2Client) GoAway() <-chan struct{} {
1352 return t.goAway
1353}
1354
1355func (t *http2Client) notifyError(err error) {
1356 t.mu.Lock()
1357 // make sure t.errorChan is closed only once.
1358 if t.state == draining {
1359 t.mu.Unlock()
1360 t.Close()
1361 return
1362 }
1363 if t.state == reachable {
1364 t.state = unreachable
1365 close(t.errorChan)
1366 infof("transport: http2Client.notifyError got notified that the client transport was broken %v.", err)
1367 }
1368 t.mu.Unlock()
1369}
diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/version.go
index b5a0f84..45eace5 100644
--- a/vendor/google.golang.org/grpc/naming/go18.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -1,8 +1,6 @@
1// +build go1.8
2
3/* 1/*
4 * 2 *
5 * Copyright 2017 gRPC authors. 3 * Copyright 2018 gRPC authors.
6 * 4 *
7 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License. 6 * you may not use this file except in compliance with the License.
@@ -18,11 +16,7 @@
18 * 16 *
19 */ 17 */
20 18
21package naming 19package grpc
22
23import "net"
24 20
25var ( 21// Version is the current grpc version.
26 lookupHost = net.DefaultResolver.LookupHost 22const Version = "1.18.0"
27 lookupSRV = net.DefaultResolver.LookupSRV
28)
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
new file mode 100644
index 0000000..94a5064
--- /dev/null
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -0,0 +1,141 @@
1#!/bin/bash
2
3if [[ `uname -a` = *"Darwin"* ]]; then
4 echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047"
5 exit 1
6fi
7
8set -ex # Exit on error; debugging enabled.
9set -o pipefail # Fail a pipe if any sub-command fails.
10
11die() {
12 echo "$@" >&2
13 exit 1
14}
15
16# Check to make sure it's safe to modify the user's git repo.
17if git status --porcelain | read; then
18 die "Uncommitted or untracked files found; commit changes first"
19fi
20
21if [[ -d "${GOPATH}/src" ]]; then
22 die "\${GOPATH}/src (${GOPATH}/src) exists; this script will delete it."
23fi
24
25# Undo any edits made by this script.
26cleanup() {
27 rm -rf "${GOPATH}/src"
28 git reset --hard HEAD
29}
30trap cleanup EXIT
31
32fail_on_output() {
33 tee /dev/stderr | (! read)
34}
35
36PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}"
37
38if [[ "$1" = "-install" ]]; then
39 # Check for module support
40 if go help mod >& /dev/null; then
41 go install \
42 golang.org/x/lint/golint \
43 golang.org/x/tools/cmd/goimports \
44 honnef.co/go/tools/cmd/staticcheck \
45 github.com/client9/misspell/cmd/misspell \
46 github.com/golang/protobuf/protoc-gen-go
47 else
48 # Ye olde `go get` incantation.
49 # Note: this gets the latest version of all tools (vs. the pinned versions
50 # with Go modules).
51 go get -u \
52 golang.org/x/lint/golint \
53 golang.org/x/tools/cmd/goimports \
54 honnef.co/go/tools/cmd/staticcheck \
55 github.com/client9/misspell/cmd/misspell \
56 github.com/golang/protobuf/protoc-gen-go
57 fi
58 if [[ -z "${VET_SKIP_PROTO}" ]]; then
59 if [[ "${TRAVIS}" = "true" ]]; then
60 PROTOBUF_VERSION=3.3.0
61 PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
62 pushd /home/travis
63 wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
64 unzip ${PROTOC_FILENAME}
65 bin/protoc --version
66 popd
67 elif ! which protoc > /dev/null; then
68 die "Please install protoc into your path"
69 fi
70 fi
71 exit 0
72elif [[ "$#" -ne 0 ]]; then
73 die "Unknown argument(s): $*"
74fi
75
76# - Ensure all source files contain a copyright message.
77git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | fail_on_output
78
79# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
80(! grep 'func Test[^(]' *_test.go)
81(! grep 'func Test[^(]' test/*.go)
82
83# - Do not import math/rand for real library code. Use internal/grpcrand for
84# thread safety.
85git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand')
86
87# - Ensure all ptypes proto packages are renamed when importing.
88git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/")
89
90# - Check imports that are illegal in appengine (until Go 1.11).
91# TODO: Remove when we drop Go 1.10 support
92go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go
93
94# - gofmt, goimports, golint (with exceptions for generated code), go vet.
95gofmt -s -d -l . 2>&1 | fail_on_output
96goimports -l . 2>&1 | fail_on_output
97golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:")
98go tool vet -all .
99
100# - Check that generated proto files are up to date.
101if [[ -z "${VET_SKIP_PROTO}" ]]; then
102 PATH="/home/travis/bin:${PATH}" make proto && \
103 git status --porcelain 2>&1 | fail_on_output || \
104 (git status; git --no-pager diff; exit 1)
105fi
106
107# - Check that our module is tidy.
108if go help mod >& /dev/null; then
109 go mod tidy && \
110 git status --porcelain 2>&1 | fail_on_output || \
111 (git status; git --no-pager diff; exit 1)
112fi
113
114# - Collection of static analysis checks
115### HACK HACK HACK: Remove once staticcheck works with modules.
116# Make a symlink in ${GOPATH}/src to its ${GOPATH}/pkg/mod equivalent for every package we use.
117for x in $(find "${GOPATH}/pkg/mod" -name '*@*' | grep -v \/mod\/cache\/); do
118 pkg="$(echo ${x#"${GOPATH}/pkg/mod/"} | cut -f1 -d@)";
119 # If multiple versions exist, just use the existing one.
120 if [[ -L "${GOPATH}/src/${pkg}" ]]; then continue; fi
121 mkdir -p "$(dirname "${GOPATH}/src/${pkg}")";
122 ln -s $x "${GOPATH}/src/${pkg}";
123done
124### END HACK HACK HACK
125
126# TODO(menghanl): fix errors in transport_test.
127staticcheck -go 1.9 -ignore '
128balancer.go:SA1019
129balancer_test.go:SA1019
130clientconn_test.go:SA1019
131balancer/roundrobin/roundrobin_test.go:SA1019
132benchmark/benchmain/main.go:SA1019
133internal/transport/handler_server.go:SA1019
134internal/transport/handler_server_test.go:SA1019
135internal/transport/transport_test.go:SA2002
136stats/stats_test.go:SA1019
137test/channelz_test.go:SA1019
138test/end2end_test.go:SA1019
139test/healthcheck_test.go:SA1019
140' ./...
141misspell -error .