aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/google.golang.org/grpc/internal
diff options
context:
space:
mode:
authorNathan Dench <ndenc2@gmail.com>2019-05-24 15:16:44 +1000
committerNathan Dench <ndenc2@gmail.com>2019-05-24 15:16:44 +1000
commit107c1cdb09c575aa2f61d97f48d8587eb6bada4c (patch)
treeca7d008643efc555c388baeaf1d986e0b6b3e28c /vendor/google.golang.org/grpc/internal
parent844b5a68d8af4791755b8f0ad293cc99f5959183 (diff)
downloadterraform-provider-statuscake-107c1cdb09c575aa2f61d97f48d8587eb6bada4c.tar.gz
terraform-provider-statuscake-107c1cdb09c575aa2f61d97f48d8587eb6bada4c.tar.zst
terraform-provider-statuscake-107c1cdb09c575aa2f61d97f48d8587eb6bada4c.zip
Upgrade to 0.12
Diffstat (limited to 'vendor/google.golang.org/grpc/internal')
-rw-r--r--vendor/google.golang.org/grpc/internal/backoff/backoff.go78
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/binarylog.go167
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go42
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/env_config.go210
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/method_logger.go426
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh33
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/sink.go162
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/util.go41
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/funcs.go699
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types.go702
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types_linux.go53
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go44
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/util_linux.go39
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go26
-rw-r--r--vendor/google.golang.org/grpc/internal/envconfig/envconfig.go70
-rw-r--r--vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go56
-rw-r--r--vendor/google.golang.org/grpc/internal/grpcsync/event.go61
-rw-r--r--vendor/google.golang.org/grpc/internal/internal.go44
-rw-r--r--vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go114
-rw-r--r--vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go63
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go141
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/controlbuf.go852
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/defaults.go49
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/flowcontrol.go218
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/handler_server.go449
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_client.go1380
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_server.go1180
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http_util.go623
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/log.go44
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/transport.go758
30 files changed, 8810 insertions, 14 deletions
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
new file mode 100644
index 0000000..1bd0cce
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -0,0 +1,78 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package backoff implement the backoff strategy for gRPC.
20//
21// This is kept in internal until the gRPC project decides whether or not to
22// allow alternative backoff strategies.
23package backoff
24
25import (
26 "time"
27
28 "google.golang.org/grpc/internal/grpcrand"
29)
30
31// Strategy defines the methodology for backing off after a grpc connection
32// failure.
33//
34type Strategy interface {
35 // Backoff returns the amount of time to wait before the next retry given
36 // the number of consecutive failures.
37 Backoff(retries int) time.Duration
38}
39
40const (
41 // baseDelay is the amount of time to wait before retrying after the first
42 // failure.
43 baseDelay = 1.0 * time.Second
44 // factor is applied to the backoff after each retry.
45 factor = 1.6
46 // jitter provides a range to randomize backoff delays.
47 jitter = 0.2
48)
49
50// Exponential implements exponential backoff algorithm as defined in
51// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
52type Exponential struct {
53 // MaxDelay is the upper bound of backoff delay.
54 MaxDelay time.Duration
55}
56
57// Backoff returns the amount of time to wait before the next retry given the
58// number of retries.
59func (bc Exponential) Backoff(retries int) time.Duration {
60 if retries == 0 {
61 return baseDelay
62 }
63 backoff, max := float64(baseDelay), float64(bc.MaxDelay)
64 for backoff < max && retries > 0 {
65 backoff *= factor
66 retries--
67 }
68 if backoff > max {
69 backoff = max
70 }
71 // Randomize backoff delays so that if a cluster of requests start at
72 // the same time, they won't operate in lockstep.
73 backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
74 if backoff < 0 {
75 return 0
76 }
77 return time.Duration(backoff)
78}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
new file mode 100644
index 0000000..fee6aec
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -0,0 +1,167 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package binarylog implementation binary logging as defined in
20// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
21package binarylog
22
23import (
24 "fmt"
25 "os"
26
27 "google.golang.org/grpc/grpclog"
28)
29
30// Logger is the global binary logger. It can be used to get binary logger for
31// each method.
32type Logger interface {
33 getMethodLogger(methodName string) *MethodLogger
34}
35
36// binLogger is the global binary logger for the binary. One of this should be
37// built at init time from the configuration (environment varialbe or flags).
38//
39// It is used to get a methodLogger for each individual method.
40var binLogger Logger
41
42// SetLogger sets the binarg logger.
43//
44// Only call this at init time.
45func SetLogger(l Logger) {
46 binLogger = l
47}
48
49// GetMethodLogger returns the methodLogger for the given methodName.
50//
51// methodName should be in the format of "/service/method".
52//
53// Each methodLogger returned by this method is a new instance. This is to
54// generate sequence id within the call.
55func GetMethodLogger(methodName string) *MethodLogger {
56 if binLogger == nil {
57 return nil
58 }
59 return binLogger.getMethodLogger(methodName)
60}
61
62func init() {
63 const envStr = "GRPC_BINARY_LOG_FILTER"
64 configStr := os.Getenv(envStr)
65 binLogger = NewLoggerFromConfigString(configStr)
66}
67
68type methodLoggerConfig struct {
69 // Max length of header and message.
70 hdr, msg uint64
71}
72
73type logger struct {
74 all *methodLoggerConfig
75 services map[string]*methodLoggerConfig
76 methods map[string]*methodLoggerConfig
77
78 blacklist map[string]struct{}
79}
80
81// newEmptyLogger creates an empty logger. The map fields need to be filled in
82// using the set* functions.
83func newEmptyLogger() *logger {
84 return &logger{}
85}
86
87// Set method logger for "*".
88func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
89 if l.all != nil {
90 return fmt.Errorf("conflicting global rules found")
91 }
92 l.all = ml
93 return nil
94}
95
96// Set method logger for "service/*".
97//
98// New methodLogger with same service overrides the old one.
99func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
100 if _, ok := l.services[service]; ok {
101 return fmt.Errorf("conflicting rules for service %v found", service)
102 }
103 if l.services == nil {
104 l.services = make(map[string]*methodLoggerConfig)
105 }
106 l.services[service] = ml
107 return nil
108}
109
110// Set method logger for "service/method".
111//
112// New methodLogger with same method overrides the old one.
113func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
114 if _, ok := l.blacklist[method]; ok {
115 return fmt.Errorf("conflicting rules for method %v found", method)
116 }
117 if _, ok := l.methods[method]; ok {
118 return fmt.Errorf("conflicting rules for method %v found", method)
119 }
120 if l.methods == nil {
121 l.methods = make(map[string]*methodLoggerConfig)
122 }
123 l.methods[method] = ml
124 return nil
125}
126
127// Set blacklist method for "-service/method".
128func (l *logger) setBlacklist(method string) error {
129 if _, ok := l.blacklist[method]; ok {
130 return fmt.Errorf("conflicting rules for method %v found", method)
131 }
132 if _, ok := l.methods[method]; ok {
133 return fmt.Errorf("conflicting rules for method %v found", method)
134 }
135 if l.blacklist == nil {
136 l.blacklist = make(map[string]struct{})
137 }
138 l.blacklist[method] = struct{}{}
139 return nil
140}
141
142// getMethodLogger returns the methodLogger for the given methodName.
143//
144// methodName should be in the format of "/service/method".
145//
146// Each methodLogger returned by this method is a new instance. This is to
147// generate sequence id within the call.
148func (l *logger) getMethodLogger(methodName string) *MethodLogger {
149 s, m, err := parseMethodName(methodName)
150 if err != nil {
151 grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
152 return nil
153 }
154 if ml, ok := l.methods[s+"/"+m]; ok {
155 return newMethodLogger(ml.hdr, ml.msg)
156 }
157 if _, ok := l.blacklist[s+"/"+m]; ok {
158 return nil
159 }
160 if ml, ok := l.services[s]; ok {
161 return newMethodLogger(ml.hdr, ml.msg)
162 }
163 if l.all == nil {
164 return nil
165 }
166 return newMethodLogger(l.all.hdr, l.all.msg)
167}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
new file mode 100644
index 0000000..1ee00a3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
@@ -0,0 +1,42 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// This file contains exported variables/functions that are exported for testing
20// only.
21//
22// An ideal way for this would be to put those in a *_test.go but in binarylog
23// package. But this doesn't work with staticcheck with go module. Error was:
24// "MdToMetadataProto not declared by package binarylog". This could be caused
25// by the way staticcheck looks for files for a certain package, which doesn't
26// support *_test.go files.
27//
28// Move those to binary_test.go when staticcheck is fixed.
29
30package binarylog
31
32var (
33 // AllLogger is a logger that logs all headers/messages for all RPCs. It's
34 // for testing only.
35 AllLogger = NewLoggerFromConfigString("*")
36 // MdToMetadataProto converts metadata to a binary logging proto message.
37 // It's for testing only.
38 MdToMetadataProto = mdToMetadataProto
39 // AddrToProto converts an address to a binary logging proto message. It's
40 // for testing only.
41 AddrToProto = addrToProto
42)
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
new file mode 100644
index 0000000..eb188ea
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -0,0 +1,210 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package binarylog
20
21import (
22 "errors"
23 "fmt"
24 "regexp"
25 "strconv"
26 "strings"
27
28 "google.golang.org/grpc/grpclog"
29)
30
31// NewLoggerFromConfigString reads the string and build a logger. It can be used
32// to build a new logger and assign it to binarylog.Logger.
33//
34// Example filter config strings:
35// - "" Nothing will be logged
36// - "*" All headers and messages will be fully logged.
37// - "*{h}" Only headers will be logged.
38// - "*{m:256}" Only the first 256 bytes of each message will be logged.
39// - "Foo/*" Logs every method in service Foo
40// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
41// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
42// /Foo/Bar, logs all headers and messages in every other method in service
43// Foo.
44//
45// If two configs exist for one certain method or service, the one specified
46// later overrides the privous config.
47func NewLoggerFromConfigString(s string) Logger {
48 if s == "" {
49 return nil
50 }
51 l := newEmptyLogger()
52 methods := strings.Split(s, ",")
53 for _, method := range methods {
54 if err := l.fillMethodLoggerWithConfigString(method); err != nil {
55 grpclog.Warningf("failed to parse binary log config: %v", err)
56 return nil
57 }
58 }
59 return l
60}
61
62// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
63// it to the right map in the logger.
64func (l *logger) fillMethodLoggerWithConfigString(config string) error {
65 // "" is invalid.
66 if config == "" {
67 return errors.New("empty string is not a valid method binary logging config")
68 }
69
70 // "-service/method", blacklist, no * or {} allowed.
71 if config[0] == '-' {
72 s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
73 if err != nil {
74 return fmt.Errorf("invalid config: %q, %v", config, err)
75 }
76 if m == "*" {
77 return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
78 }
79 if suffix != "" {
80 return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
81 }
82 if err := l.setBlacklist(s + "/" + m); err != nil {
83 return fmt.Errorf("invalid config: %v", err)
84 }
85 return nil
86 }
87
88 // "*{h:256;m:256}"
89 if config[0] == '*' {
90 hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
91 if err != nil {
92 return fmt.Errorf("invalid config: %q, %v", config, err)
93 }
94 if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
95 return fmt.Errorf("invalid config: %v", err)
96 }
97 return nil
98 }
99
100 s, m, suffix, err := parseMethodConfigAndSuffix(config)
101 if err != nil {
102 return fmt.Errorf("invalid config: %q, %v", config, err)
103 }
104 hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
105 if err != nil {
106 return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
107 }
108 if m == "*" {
109 if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
110 return fmt.Errorf("invalid config: %v", err)
111 }
112 } else {
113 if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
114 return fmt.Errorf("invalid config: %v", err)
115 }
116 }
117 return nil
118}
119
120const (
121 // TODO: this const is only used by env_config now. But could be useful for
122 // other config. Move to binarylog.go if necessary.
123 maxUInt = ^uint64(0)
124
125 // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
126 // expected output.
127 longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
128
129 // For suffix from above, "{h:123,m:123}". See test for expected output.
130 optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123".
131 headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$`
132 messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$`
133 headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
134)
135
136var (
137 longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr)
138 headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr)
139 messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr)
140 headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
141)
142
143// Turn "service/method{h;m}" into "service", "method", "{h;m}".
144func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
145 // Regexp result:
146 //
147 // in: "p.s/m{h:123,m:123}",
148 // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
149 match := longMethodConfigRegexp.FindStringSubmatch(c)
150 if match == nil {
151 return "", "", "", fmt.Errorf("%q contains invalid substring", c)
152 }
153 service = match[1]
154 method = match[2]
155 suffix = match[3]
156 return
157}
158
159// Turn "{h:123;m:345}" into 123, 345.
160//
161// Return maxUInt if length is unspecified.
162func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
163 if c == "" {
164 return maxUInt, maxUInt, nil
165 }
166 // Header config only.
167 if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
168 if s := match[1]; s != "" {
169 hdrLenStr, err = strconv.ParseUint(s, 10, 64)
170 if err != nil {
171 return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
172 }
173 return hdrLenStr, 0, nil
174 }
175 return maxUInt, 0, nil
176 }
177
178 // Message config only.
179 if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
180 if s := match[1]; s != "" {
181 msgLenStr, err = strconv.ParseUint(s, 10, 64)
182 if err != nil {
183 return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
184 }
185 return 0, msgLenStr, nil
186 }
187 return 0, maxUInt, nil
188 }
189
190 // Header and message config both.
191 if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
192 // Both hdr and msg are specified, but one or two of them might be empty.
193 hdrLenStr = maxUInt
194 msgLenStr = maxUInt
195 if s := match[1]; s != "" {
196 hdrLenStr, err = strconv.ParseUint(s, 10, 64)
197 if err != nil {
198 return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
199 }
200 }
201 if s := match[2]; s != "" {
202 msgLenStr, err = strconv.ParseUint(s, 10, 64)
203 if err != nil {
204 return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
205 }
206 }
207 return hdrLenStr, msgLenStr, nil
208 }
209 return 0, 0, fmt.Errorf("%q contains invalid substring", c)
210}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
new file mode 100644
index 0000000..b06cdd4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -0,0 +1,426 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package binarylog
20
21import (
22 "net"
23 "strings"
24 "sync/atomic"
25 "time"
26
27 "github.com/golang/protobuf/proto"
28 "github.com/golang/protobuf/ptypes"
29 pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
30 "google.golang.org/grpc/grpclog"
31 "google.golang.org/grpc/metadata"
32 "google.golang.org/grpc/status"
33)
34
35type callIDGenerator struct {
36 id uint64
37}
38
39func (g *callIDGenerator) next() uint64 {
40 id := atomic.AddUint64(&g.id, 1)
41 return id
42}
43
44// reset is for testing only, and doesn't need to be thread safe.
45func (g *callIDGenerator) reset() {
46 g.id = 0
47}
48
49var idGen callIDGenerator
50
51// MethodLogger is the sub-logger for each method.
52type MethodLogger struct {
53 headerMaxLen, messageMaxLen uint64
54
55 callID uint64
56 idWithinCallGen *callIDGenerator
57
58 sink Sink // TODO(blog): make this plugable.
59}
60
61func newMethodLogger(h, m uint64) *MethodLogger {
62 return &MethodLogger{
63 headerMaxLen: h,
64 messageMaxLen: m,
65
66 callID: idGen.next(),
67 idWithinCallGen: &callIDGenerator{},
68
69 sink: defaultSink, // TODO(blog): make it plugable.
70 }
71}
72
73// Log creates a proto binary log entry, and logs it to the sink.
74func (ml *MethodLogger) Log(c LogEntryConfig) {
75 m := c.toProto()
76 timestamp, _ := ptypes.TimestampProto(time.Now())
77 m.Timestamp = timestamp
78 m.CallId = ml.callID
79 m.SequenceIdWithinCall = ml.idWithinCallGen.next()
80
81 switch pay := m.Payload.(type) {
82 case *pb.GrpcLogEntry_ClientHeader:
83 m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
84 case *pb.GrpcLogEntry_ServerHeader:
85 m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
86 case *pb.GrpcLogEntry_Message:
87 m.PayloadTruncated = ml.truncateMessage(pay.Message)
88 }
89
90 ml.sink.Write(m)
91}
92
93func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
94 if ml.headerMaxLen == maxUInt {
95 return false
96 }
97 var (
98 bytesLimit = ml.headerMaxLen
99 index int
100 )
101 // At the end of the loop, index will be the first entry where the total
102 // size is greater than the limit:
103 //
104 // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
105 for ; index < len(mdPb.Entry); index++ {
106 entry := mdPb.Entry[index]
107 if entry.Key == "grpc-trace-bin" {
108 // "grpc-trace-bin" is a special key. It's kept in the log entry,
109 // but not counted towards the size limit.
110 continue
111 }
112 currentEntryLen := uint64(len(entry.Value))
113 if currentEntryLen > bytesLimit {
114 break
115 }
116 bytesLimit -= currentEntryLen
117 }
118 truncated = index < len(mdPb.Entry)
119 mdPb.Entry = mdPb.Entry[:index]
120 return truncated
121}
122
123func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
124 if ml.messageMaxLen == maxUInt {
125 return false
126 }
127 if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
128 return false
129 }
130 msgPb.Data = msgPb.Data[:ml.messageMaxLen]
131 return true
132}
133
134// LogEntryConfig represents the configuration for binary log entry.
135type LogEntryConfig interface {
136 toProto() *pb.GrpcLogEntry
137}
138
139// ClientHeader configs the binary log entry to be a ClientHeader entry.
140type ClientHeader struct {
141 OnClientSide bool
142 Header metadata.MD
143 MethodName string
144 Authority string
145 Timeout time.Duration
146 // PeerAddr is required only when it's on server side.
147 PeerAddr net.Addr
148}
149
150func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
151 // This function doesn't need to set all the fields (e.g. seq ID). The Log
152 // function will set the fields when necessary.
153 clientHeader := &pb.ClientHeader{
154 Metadata: mdToMetadataProto(c.Header),
155 MethodName: c.MethodName,
156 Authority: c.Authority,
157 }
158 if c.Timeout > 0 {
159 clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
160 }
161 ret := &pb.GrpcLogEntry{
162 Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
163 Payload: &pb.GrpcLogEntry_ClientHeader{
164 ClientHeader: clientHeader,
165 },
166 }
167 if c.OnClientSide {
168 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
169 } else {
170 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
171 }
172 if c.PeerAddr != nil {
173 ret.Peer = addrToProto(c.PeerAddr)
174 }
175 return ret
176}
177
178// ServerHeader configs the binary log entry to be a ServerHeader entry.
179type ServerHeader struct {
180 OnClientSide bool
181 Header metadata.MD
182 // PeerAddr is required only when it's on client side.
183 PeerAddr net.Addr
184}
185
186func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
187 ret := &pb.GrpcLogEntry{
188 Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
189 Payload: &pb.GrpcLogEntry_ServerHeader{
190 ServerHeader: &pb.ServerHeader{
191 Metadata: mdToMetadataProto(c.Header),
192 },
193 },
194 }
195 if c.OnClientSide {
196 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
197 } else {
198 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
199 }
200 if c.PeerAddr != nil {
201 ret.Peer = addrToProto(c.PeerAddr)
202 }
203 return ret
204}
205
206// ClientMessage configs the binary log entry to be a ClientMessage entry.
207type ClientMessage struct {
208 OnClientSide bool
209 // Message can be a proto.Message or []byte. Other messages formats are not
210 // supported.
211 Message interface{}
212}
213
214func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
215 var (
216 data []byte
217 err error
218 )
219 if m, ok := c.Message.(proto.Message); ok {
220 data, err = proto.Marshal(m)
221 if err != nil {
222 grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
223 }
224 } else if b, ok := c.Message.([]byte); ok {
225 data = b
226 } else {
227 grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
228 }
229 ret := &pb.GrpcLogEntry{
230 Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
231 Payload: &pb.GrpcLogEntry_Message{
232 Message: &pb.Message{
233 Length: uint32(len(data)),
234 Data: data,
235 },
236 },
237 }
238 if c.OnClientSide {
239 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
240 } else {
241 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
242 }
243 return ret
244}
245
246// ServerMessage configs the binary log entry to be a ServerMessage entry.
247type ServerMessage struct {
248 OnClientSide bool
249 // Message can be a proto.Message or []byte. Other messages formats are not
250 // supported.
251 Message interface{}
252}
253
254func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
255 var (
256 data []byte
257 err error
258 )
259 if m, ok := c.Message.(proto.Message); ok {
260 data, err = proto.Marshal(m)
261 if err != nil {
262 grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
263 }
264 } else if b, ok := c.Message.([]byte); ok {
265 data = b
266 } else {
267 grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
268 }
269 ret := &pb.GrpcLogEntry{
270 Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
271 Payload: &pb.GrpcLogEntry_Message{
272 Message: &pb.Message{
273 Length: uint32(len(data)),
274 Data: data,
275 },
276 },
277 }
278 if c.OnClientSide {
279 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
280 } else {
281 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
282 }
283 return ret
284}
285
286// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
287type ClientHalfClose struct {
288 OnClientSide bool
289}
290
291func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
292 ret := &pb.GrpcLogEntry{
293 Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
294 Payload: nil, // No payload here.
295 }
296 if c.OnClientSide {
297 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
298 } else {
299 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
300 }
301 return ret
302}
303
304// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
305type ServerTrailer struct {
306 OnClientSide bool
307 Trailer metadata.MD
308 // Err is the status error.
309 Err error
310 // PeerAddr is required only when it's on client side and the RPC is trailer
311 // only.
312 PeerAddr net.Addr
313}
314
315func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
316 st, ok := status.FromError(c.Err)
317 if !ok {
318 grpclog.Info("binarylogging: error in trailer is not a status error")
319 }
320 var (
321 detailsBytes []byte
322 err error
323 )
324 stProto := st.Proto()
325 if stProto != nil && len(stProto.Details) != 0 {
326 detailsBytes, err = proto.Marshal(stProto)
327 if err != nil {
328 grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
329 }
330 }
331 ret := &pb.GrpcLogEntry{
332 Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
333 Payload: &pb.GrpcLogEntry_Trailer{
334 Trailer: &pb.Trailer{
335 Metadata: mdToMetadataProto(c.Trailer),
336 StatusCode: uint32(st.Code()),
337 StatusMessage: st.Message(),
338 StatusDetails: detailsBytes,
339 },
340 },
341 }
342 if c.OnClientSide {
343 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
344 } else {
345 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
346 }
347 if c.PeerAddr != nil {
348 ret.Peer = addrToProto(c.PeerAddr)
349 }
350 return ret
351}
352
353// Cancel configs the binary log entry to be a Cancel entry.
354type Cancel struct {
355 OnClientSide bool
356}
357
358func (c *Cancel) toProto() *pb.GrpcLogEntry {
359 ret := &pb.GrpcLogEntry{
360 Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
361 Payload: nil,
362 }
363 if c.OnClientSide {
364 ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
365 } else {
366 ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
367 }
368 return ret
369}
370
371// metadataKeyOmit returns whether the metadata entry with this key should be
372// omitted.
373func metadataKeyOmit(key string) bool {
374 switch key {
375 case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
376 return true
377 case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
378 return false
379 }
380 if strings.HasPrefix(key, "grpc-") {
381 return true
382 }
383 return false
384}
385
386func mdToMetadataProto(md metadata.MD) *pb.Metadata {
387 ret := &pb.Metadata{}
388 for k, vv := range md {
389 if metadataKeyOmit(k) {
390 continue
391 }
392 for _, v := range vv {
393 ret.Entry = append(ret.Entry,
394 &pb.MetadataEntry{
395 Key: k,
396 Value: []byte(v),
397 },
398 )
399 }
400 }
401 return ret
402}
403
404func addrToProto(addr net.Addr) *pb.Address {
405 ret := &pb.Address{}
406 switch a := addr.(type) {
407 case *net.TCPAddr:
408 if a.IP.To4() != nil {
409 ret.Type = pb.Address_TYPE_IPV4
410 } else if a.IP.To16() != nil {
411 ret.Type = pb.Address_TYPE_IPV6
412 } else {
413 ret.Type = pb.Address_TYPE_UNKNOWN
414 // Do not set address and port fields.
415 break
416 }
417 ret.Address = a.IP.String()
418 ret.IpPort = uint32(a.Port)
419 case *net.UnixAddr:
420 ret.Type = pb.Address_TYPE_UNIX
421 ret.Address = a.String()
422 default:
423 ret.Type = pb.Address_TYPE_UNKNOWN
424 }
425 return ret
426}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
new file mode 100644
index 0000000..113d40c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
@@ -0,0 +1,33 @@
1#!/bin/bash
2# Copyright 2018 gRPC authors.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16set -eux -o pipefail
17
18TMP=$(mktemp -d)
19
20function finish {
21 rm -rf "$TMP"
22}
23trap finish EXIT
24
25pushd "$TMP"
26mkdir -p grpc/binarylog/grpc_binarylog_v1
27curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
28
29protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
30popd
31rm -f ./grpc_binarylog_v1/*.pb.go
32cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/
33
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
new file mode 100644
index 0000000..20d044f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -0,0 +1,162 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package binarylog
20
21import (
22 "bufio"
23 "encoding/binary"
24 "fmt"
25 "io"
26 "io/ioutil"
27 "sync"
28 "time"
29
30 "github.com/golang/protobuf/proto"
31 pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
32 "google.golang.org/grpc/grpclog"
33)
34
35var (
36 defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
37)
38
39// SetDefaultSink sets the sink where binary logs will be written to.
40//
41// Not thread safe. Only set during initialization.
42func SetDefaultSink(s Sink) {
43 if defaultSink != nil {
44 defaultSink.Close()
45 }
46 defaultSink = s
47}
48
49// Sink writes log entry into the binary log sink.
50type Sink interface {
51 // Write will be called to write the log entry into the sink.
52 //
53 // It should be thread-safe so it can be called in parallel.
54 Write(*pb.GrpcLogEntry) error
55 // Close will be called when the Sink is replaced by a new Sink.
56 Close() error
57}
58
59type noopSink struct{}
60
61func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
62func (ns *noopSink) Close() error { return nil }
63
64// newWriterSink creates a binary log sink with the given writer.
65//
66// Write() marshalls the proto message and writes it to the given writer. Each
67// message is prefixed with a 4 byte big endian unsigned integer as the length.
68//
69// No buffer is done, Close() doesn't try to close the writer.
70func newWriterSink(w io.Writer) *writerSink {
71 return &writerSink{out: w}
72}
73
74type writerSink struct {
75 out io.Writer
76}
77
78func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
79 b, err := proto.Marshal(e)
80 if err != nil {
81 grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
82 }
83 hdr := make([]byte, 4)
84 binary.BigEndian.PutUint32(hdr, uint32(len(b)))
85 if _, err := ws.out.Write(hdr); err != nil {
86 return err
87 }
88 if _, err := ws.out.Write(b); err != nil {
89 return err
90 }
91 return nil
92}
93
94func (ws *writerSink) Close() error { return nil }
95
96type bufWriteCloserSink struct {
97 mu sync.Mutex
98 closer io.Closer
99 out *writerSink // out is built on buf.
100 buf *bufio.Writer // buf is kept for flush.
101
102 writeStartOnce sync.Once
103 writeTicker *time.Ticker
104}
105
106func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
107 // Start the write loop when Write is called.
108 fs.writeStartOnce.Do(fs.startFlushGoroutine)
109 fs.mu.Lock()
110 if err := fs.out.Write(e); err != nil {
111 fs.mu.Unlock()
112 return err
113 }
114 fs.mu.Unlock()
115 return nil
116}
117
118const (
119 bufFlushDuration = 60 * time.Second
120)
121
122func (fs *bufWriteCloserSink) startFlushGoroutine() {
123 fs.writeTicker = time.NewTicker(bufFlushDuration)
124 go func() {
125 for range fs.writeTicker.C {
126 fs.mu.Lock()
127 fs.buf.Flush()
128 fs.mu.Unlock()
129 }
130 }()
131}
132
133func (fs *bufWriteCloserSink) Close() error {
134 if fs.writeTicker != nil {
135 fs.writeTicker.Stop()
136 }
137 fs.mu.Lock()
138 fs.buf.Flush()
139 fs.closer.Close()
140 fs.out.Close()
141 fs.mu.Unlock()
142 return nil
143}
144
145func newBufWriteCloserSink(o io.WriteCloser) Sink {
146 bufW := bufio.NewWriter(o)
147 return &bufWriteCloserSink{
148 closer: o,
149 out: newWriterSink(bufW),
150 buf: bufW,
151 }
152}
153
154// NewTempFileSink creates a temp file and returns a Sink that writes to this
155// file.
156func NewTempFileSink() (Sink, error) {
157 tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
158 if err != nil {
159 return nil, fmt.Errorf("failed to create temp file: %v", err)
160 }
161 return newBufWriteCloserSink(tempFile), nil
162}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go
new file mode 100644
index 0000000..15dc780
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/util.go
@@ -0,0 +1,41 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package binarylog
20
21import (
22 "errors"
23 "strings"
24)
25
26// parseMethodName splits service and method from the input. It expects format
27// "/service/method".
28//
29// TODO: move to internal/grpcutil.
30func parseMethodName(methodName string) (service, method string, _ error) {
31 if !strings.HasPrefix(methodName, "/") {
32 return "", "", errors.New("invalid method name: should start with /")
33 }
34 methodName = methodName[1:]
35
36 pos := strings.LastIndex(methodName, "/")
37 if pos < 0 {
38 return "", "", errors.New("invalid method name: suffix /method is missing")
39 }
40 return methodName[:pos], methodName[pos+1:], nil
41}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
new file mode 100644
index 0000000..041520d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -0,0 +1,699 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package channelz defines APIs for enabling channelz service, entry
20// registration/deletion, and accessing channelz data. It also defines channelz
21// metric struct formats.
22//
23// All APIs in this package are experimental.
24package channelz
25
26import (
27 "sort"
28 "sync"
29 "sync/atomic"
30 "time"
31
32 "google.golang.org/grpc/grpclog"
33)
34
35const (
36 defaultMaxTraceEntry int32 = 30
37)
38
39var (
40 db dbWrapper
41 idGen idGenerator
42 // EntryPerPage defines the number of channelz entries to be shown on a web page.
43 EntryPerPage = int64(50)
44 curState int32
45 maxTraceEntry = defaultMaxTraceEntry
46)
47
48// TurnOn turns on channelz data collection.
49func TurnOn() {
50 if !IsOn() {
51 NewChannelzStorage()
52 atomic.StoreInt32(&curState, 1)
53 }
54}
55
56// IsOn returns whether channelz data collection is on.
57func IsOn() bool {
58 return atomic.CompareAndSwapInt32(&curState, 1, 1)
59}
60
61// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
62// Setting it to 0 will disable channel tracing.
63func SetMaxTraceEntry(i int32) {
64 atomic.StoreInt32(&maxTraceEntry, i)
65}
66
67// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
68func ResetMaxTraceEntryToDefault() {
69 atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
70}
71
72func getMaxTraceEntry() int {
73 i := atomic.LoadInt32(&maxTraceEntry)
74 return int(i)
75}
76
77// dbWarpper wraps around a reference to internal channelz data storage, and
78// provide synchronized functionality to set and get the reference.
79type dbWrapper struct {
80 mu sync.RWMutex
81 DB *channelMap
82}
83
84func (d *dbWrapper) set(db *channelMap) {
85 d.mu.Lock()
86 d.DB = db
87 d.mu.Unlock()
88}
89
90func (d *dbWrapper) get() *channelMap {
91 d.mu.RLock()
92 defer d.mu.RUnlock()
93 return d.DB
94}
95
96// NewChannelzStorage initializes channelz data storage and id generator.
97//
98// Note: This function is exported for testing purpose only. User should not call
99// it in most cases.
100func NewChannelzStorage() {
101 db.set(&channelMap{
102 topLevelChannels: make(map[int64]struct{}),
103 channels: make(map[int64]*channel),
104 listenSockets: make(map[int64]*listenSocket),
105 normalSockets: make(map[int64]*normalSocket),
106 servers: make(map[int64]*server),
107 subChannels: make(map[int64]*subChannel),
108 })
109 idGen.reset()
110}
111
112// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
113// boolean indicating whether there's more top channels to be queried for.
114//
115// The arg id specifies that only top channel with id at or above it will be included
116// in the result. The returned slice is up to a length of the arg maxResults or
117// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
118func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
119 return db.get().GetTopChannels(id, maxResults)
120}
121
122// GetServers returns a slice of server's ServerMetric, along with a
123// boolean indicating whether there's more servers to be queried for.
124//
125// The arg id specifies that only server with id at or above it will be included
126// in the result. The returned slice is up to a length of the arg maxResults or
127// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
128func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
129 return db.get().GetServers(id, maxResults)
130}
131
132// GetServerSockets returns a slice of server's (identified by id) normal socket's
133// SocketMetric, along with a boolean indicating whether there's more sockets to
134// be queried for.
135//
136// The arg startID specifies that only sockets with id at or above it will be
137// included in the result. The returned slice is up to a length of the arg maxResults
138// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
139func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
140 return db.get().GetServerSockets(id, startID, maxResults)
141}
142
143// GetChannel returns the ChannelMetric for the channel (identified by id).
144func GetChannel(id int64) *ChannelMetric {
145 return db.get().GetChannel(id)
146}
147
148// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
149func GetSubChannel(id int64) *SubChannelMetric {
150 return db.get().GetSubChannel(id)
151}
152
153// GetSocket returns the SocketInternalMetric for the socket (identified by id).
154func GetSocket(id int64) *SocketMetric {
155 return db.get().GetSocket(id)
156}
157
158// GetServer returns the ServerMetric for the server (identified by id).
159func GetServer(id int64) *ServerMetric {
160 return db.get().GetServer(id)
161}
162
163// RegisterChannel registers the given channel c in channelz database with ref
164// as its reference name, and add it to the child list of its parent (identified
165// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
166// assigned to this channel.
167func RegisterChannel(c Channel, pid int64, ref string) int64 {
168 id := idGen.genID()
169 cn := &channel{
170 refName: ref,
171 c: c,
172 subChans: make(map[int64]string),
173 nestedChans: make(map[int64]string),
174 id: id,
175 pid: pid,
176 trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
177 }
178 if pid == 0 {
179 db.get().addChannel(id, cn, true, pid, ref)
180 } else {
181 db.get().addChannel(id, cn, false, pid, ref)
182 }
183 return id
184}
185
186// RegisterSubChannel registers the given channel c in channelz database with ref
187// as its reference name, and add it to the child list of its parent (identified
188// by pid). It returns the unique channelz tracking id assigned to this subchannel.
189func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
190 if pid == 0 {
191 grpclog.Error("a SubChannel's parent id cannot be 0")
192 return 0
193 }
194 id := idGen.genID()
195 sc := &subChannel{
196 refName: ref,
197 c: c,
198 sockets: make(map[int64]string),
199 id: id,
200 pid: pid,
201 trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
202 }
203 db.get().addSubChannel(id, sc, pid, ref)
204 return id
205}
206
207// RegisterServer registers the given server s in channelz database. It returns
208// the unique channelz tracking id assigned to this server.
209func RegisterServer(s Server, ref string) int64 {
210 id := idGen.genID()
211 svr := &server{
212 refName: ref,
213 s: s,
214 sockets: make(map[int64]string),
215 listenSockets: make(map[int64]string),
216 id: id,
217 }
218 db.get().addServer(id, svr)
219 return id
220}
221
222// RegisterListenSocket registers the given listen socket s in channelz database
223// with ref as its reference name, and add it to the child list of its parent
224// (identified by pid). It returns the unique channelz tracking id assigned to
225// this listen socket.
226func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
227 if pid == 0 {
228 grpclog.Error("a ListenSocket's parent id cannot be 0")
229 return 0
230 }
231 id := idGen.genID()
232 ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
233 db.get().addListenSocket(id, ls, pid, ref)
234 return id
235}
236
237// RegisterNormalSocket registers the given normal socket s in channelz database
238// with ref as its reference name, and add it to the child list of its parent
239// (identified by pid). It returns the unique channelz tracking id assigned to
240// this normal socket.
241func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
242 if pid == 0 {
243 grpclog.Error("a NormalSocket's parent id cannot be 0")
244 return 0
245 }
246 id := idGen.genID()
247 ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
248 db.get().addNormalSocket(id, ns, pid, ref)
249 return id
250}
251
252// RemoveEntry removes an entry with unique channelz trakcing id to be id from
253// channelz database.
254func RemoveEntry(id int64) {
255 db.get().removeEntry(id)
256}
257
258// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
259// to the channel trace.
260// The Parent field is optional. It is used for event that will be recorded in the entity's parent
261// trace also.
262type TraceEventDesc struct {
263 Desc string
264 Severity Severity
265 Parent *TraceEventDesc
266}
267
268// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
269func AddTraceEvent(id int64, desc *TraceEventDesc) {
270 if getMaxTraceEntry() == 0 {
271 return
272 }
273 db.get().traceEvent(id, desc)
274}
275
276// channelMap is the storage data structure for channelz.
277// Methods of channelMap can be divided in two two categories with respect to locking.
278// 1. Methods acquire the global lock.
279// 2. Methods that can only be called when global lock is held.
280// A second type of method need always to be called inside a first type of method.
281type channelMap struct {
282 mu sync.RWMutex
283 topLevelChannels map[int64]struct{}
284 servers map[int64]*server
285 channels map[int64]*channel
286 subChannels map[int64]*subChannel
287 listenSockets map[int64]*listenSocket
288 normalSockets map[int64]*normalSocket
289}
290
291func (c *channelMap) addServer(id int64, s *server) {
292 c.mu.Lock()
293 s.cm = c
294 c.servers[id] = s
295 c.mu.Unlock()
296}
297
298func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
299 c.mu.Lock()
300 cn.cm = c
301 cn.trace.cm = c
302 c.channels[id] = cn
303 if isTopChannel {
304 c.topLevelChannels[id] = struct{}{}
305 } else {
306 c.findEntry(pid).addChild(id, cn)
307 }
308 c.mu.Unlock()
309}
310
311func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
312 c.mu.Lock()
313 sc.cm = c
314 sc.trace.cm = c
315 c.subChannels[id] = sc
316 c.findEntry(pid).addChild(id, sc)
317 c.mu.Unlock()
318}
319
320func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
321 c.mu.Lock()
322 ls.cm = c
323 c.listenSockets[id] = ls
324 c.findEntry(pid).addChild(id, ls)
325 c.mu.Unlock()
326}
327
328func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
329 c.mu.Lock()
330 ns.cm = c
331 c.normalSockets[id] = ns
332 c.findEntry(pid).addChild(id, ns)
333 c.mu.Unlock()
334}
335
336// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
337// wait on the deletion of its children and until no other entity's channel trace references it.
338// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
339// shutting down server will lead to the server being also deleted.
340func (c *channelMap) removeEntry(id int64) {
341 c.mu.Lock()
342 c.findEntry(id).triggerDelete()
343 c.mu.Unlock()
344}
345
346// c.mu must be held by the caller
347func (c *channelMap) decrTraceRefCount(id int64) {
348 e := c.findEntry(id)
349 if v, ok := e.(tracedChannel); ok {
350 v.decrTraceRefCount()
351 e.deleteSelfIfReady()
352 }
353}
354
355// c.mu must be held by the caller.
356func (c *channelMap) findEntry(id int64) entry {
357 var v entry
358 var ok bool
359 if v, ok = c.channels[id]; ok {
360 return v
361 }
362 if v, ok = c.subChannels[id]; ok {
363 return v
364 }
365 if v, ok = c.servers[id]; ok {
366 return v
367 }
368 if v, ok = c.listenSockets[id]; ok {
369 return v
370 }
371 if v, ok = c.normalSockets[id]; ok {
372 return v
373 }
374 return &dummyEntry{idNotFound: id}
375}
376
377// c.mu must be held by the caller
378// deleteEntry simply deletes an entry from the channelMap. Before calling this
379// method, caller must check this entry is ready to be deleted, i.e removeEntry()
380// has been called on it, and no children still exist.
381// Conditionals are ordered by the expected frequency of deletion of each entity
382// type, in order to optimize performance.
383func (c *channelMap) deleteEntry(id int64) {
384 var ok bool
385 if _, ok = c.normalSockets[id]; ok {
386 delete(c.normalSockets, id)
387 return
388 }
389 if _, ok = c.subChannels[id]; ok {
390 delete(c.subChannels, id)
391 return
392 }
393 if _, ok = c.channels[id]; ok {
394 delete(c.channels, id)
395 delete(c.topLevelChannels, id)
396 return
397 }
398 if _, ok = c.listenSockets[id]; ok {
399 delete(c.listenSockets, id)
400 return
401 }
402 if _, ok = c.servers[id]; ok {
403 delete(c.servers, id)
404 return
405 }
406}
407
408func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
409 c.mu.Lock()
410 child := c.findEntry(id)
411 childTC, ok := child.(tracedChannel)
412 if !ok {
413 c.mu.Unlock()
414 return
415 }
416 childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
417 if desc.Parent != nil {
418 parent := c.findEntry(child.getParentID())
419 var chanType RefChannelType
420 switch child.(type) {
421 case *channel:
422 chanType = RefChannel
423 case *subChannel:
424 chanType = RefSubChannel
425 }
426 if parentTC, ok := parent.(tracedChannel); ok {
427 parentTC.getChannelTrace().append(&TraceEvent{
428 Desc: desc.Parent.Desc,
429 Severity: desc.Parent.Severity,
430 Timestamp: time.Now(),
431 RefID: id,
432 RefName: childTC.getRefName(),
433 RefType: chanType,
434 })
435 childTC.incrTraceRefCount()
436 }
437 }
438 c.mu.Unlock()
439}
440
441type int64Slice []int64
442
443func (s int64Slice) Len() int { return len(s) }
444func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
445func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
446
447func copyMap(m map[int64]string) map[int64]string {
448 n := make(map[int64]string)
449 for k, v := range m {
450 n[k] = v
451 }
452 return n
453}
454
455func min(a, b int64) int64 {
456 if a < b {
457 return a
458 }
459 return b
460}
461
462func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
463 if maxResults <= 0 {
464 maxResults = EntryPerPage
465 }
466 c.mu.RLock()
467 l := int64(len(c.topLevelChannels))
468 ids := make([]int64, 0, l)
469 cns := make([]*channel, 0, min(l, maxResults))
470
471 for k := range c.topLevelChannels {
472 ids = append(ids, k)
473 }
474 sort.Sort(int64Slice(ids))
475 idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
476 count := int64(0)
477 var end bool
478 var t []*ChannelMetric
479 for i, v := range ids[idx:] {
480 if count == maxResults {
481 break
482 }
483 if cn, ok := c.channels[v]; ok {
484 cns = append(cns, cn)
485 t = append(t, &ChannelMetric{
486 NestedChans: copyMap(cn.nestedChans),
487 SubChans: copyMap(cn.subChans),
488 })
489 count++
490 }
491 if i == len(ids[idx:])-1 {
492 end = true
493 break
494 }
495 }
496 c.mu.RUnlock()
497 if count == 0 {
498 end = true
499 }
500
501 for i, cn := range cns {
502 t[i].ChannelData = cn.c.ChannelzMetric()
503 t[i].ID = cn.id
504 t[i].RefName = cn.refName
505 t[i].Trace = cn.trace.dumpData()
506 }
507 return t, end
508}
509
510func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
511 if maxResults <= 0 {
512 maxResults = EntryPerPage
513 }
514 c.mu.RLock()
515 l := int64(len(c.servers))
516 ids := make([]int64, 0, l)
517 ss := make([]*server, 0, min(l, maxResults))
518 for k := range c.servers {
519 ids = append(ids, k)
520 }
521 sort.Sort(int64Slice(ids))
522 idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
523 count := int64(0)
524 var end bool
525 var s []*ServerMetric
526 for i, v := range ids[idx:] {
527 if count == maxResults {
528 break
529 }
530 if svr, ok := c.servers[v]; ok {
531 ss = append(ss, svr)
532 s = append(s, &ServerMetric{
533 ListenSockets: copyMap(svr.listenSockets),
534 })
535 count++
536 }
537 if i == len(ids[idx:])-1 {
538 end = true
539 break
540 }
541 }
542 c.mu.RUnlock()
543 if count == 0 {
544 end = true
545 }
546
547 for i, svr := range ss {
548 s[i].ServerData = svr.s.ChannelzMetric()
549 s[i].ID = svr.id
550 s[i].RefName = svr.refName
551 }
552 return s, end
553}
554
555func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
556 if maxResults <= 0 {
557 maxResults = EntryPerPage
558 }
559 var svr *server
560 var ok bool
561 c.mu.RLock()
562 if svr, ok = c.servers[id]; !ok {
563 // server with id doesn't exist.
564 c.mu.RUnlock()
565 return nil, true
566 }
567 svrskts := svr.sockets
568 l := int64(len(svrskts))
569 ids := make([]int64, 0, l)
570 sks := make([]*normalSocket, 0, min(l, maxResults))
571 for k := range svrskts {
572 ids = append(ids, k)
573 }
574 sort.Sort(int64Slice(ids))
575 idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
576 count := int64(0)
577 var end bool
578 for i, v := range ids[idx:] {
579 if count == maxResults {
580 break
581 }
582 if ns, ok := c.normalSockets[v]; ok {
583 sks = append(sks, ns)
584 count++
585 }
586 if i == len(ids[idx:])-1 {
587 end = true
588 break
589 }
590 }
591 c.mu.RUnlock()
592 if count == 0 {
593 end = true
594 }
595 var s []*SocketMetric
596 for _, ns := range sks {
597 sm := &SocketMetric{}
598 sm.SocketData = ns.s.ChannelzMetric()
599 sm.ID = ns.id
600 sm.RefName = ns.refName
601 s = append(s, sm)
602 }
603 return s, end
604}
605
606func (c *channelMap) GetChannel(id int64) *ChannelMetric {
607 cm := &ChannelMetric{}
608 var cn *channel
609 var ok bool
610 c.mu.RLock()
611 if cn, ok = c.channels[id]; !ok {
612 // channel with id doesn't exist.
613 c.mu.RUnlock()
614 return nil
615 }
616 cm.NestedChans = copyMap(cn.nestedChans)
617 cm.SubChans = copyMap(cn.subChans)
618 // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
619 // holding the lock to prevent potential data race.
620 chanCopy := cn.c
621 c.mu.RUnlock()
622 cm.ChannelData = chanCopy.ChannelzMetric()
623 cm.ID = cn.id
624 cm.RefName = cn.refName
625 cm.Trace = cn.trace.dumpData()
626 return cm
627}
628
629func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
630 cm := &SubChannelMetric{}
631 var sc *subChannel
632 var ok bool
633 c.mu.RLock()
634 if sc, ok = c.subChannels[id]; !ok {
635 // subchannel with id doesn't exist.
636 c.mu.RUnlock()
637 return nil
638 }
639 cm.Sockets = copyMap(sc.sockets)
640 // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
641 // holding the lock to prevent potential data race.
642 chanCopy := sc.c
643 c.mu.RUnlock()
644 cm.ChannelData = chanCopy.ChannelzMetric()
645 cm.ID = sc.id
646 cm.RefName = sc.refName
647 cm.Trace = sc.trace.dumpData()
648 return cm
649}
650
651func (c *channelMap) GetSocket(id int64) *SocketMetric {
652 sm := &SocketMetric{}
653 c.mu.RLock()
654 if ls, ok := c.listenSockets[id]; ok {
655 c.mu.RUnlock()
656 sm.SocketData = ls.s.ChannelzMetric()
657 sm.ID = ls.id
658 sm.RefName = ls.refName
659 return sm
660 }
661 if ns, ok := c.normalSockets[id]; ok {
662 c.mu.RUnlock()
663 sm.SocketData = ns.s.ChannelzMetric()
664 sm.ID = ns.id
665 sm.RefName = ns.refName
666 return sm
667 }
668 c.mu.RUnlock()
669 return nil
670}
671
672func (c *channelMap) GetServer(id int64) *ServerMetric {
673 sm := &ServerMetric{}
674 var svr *server
675 var ok bool
676 c.mu.RLock()
677 if svr, ok = c.servers[id]; !ok {
678 c.mu.RUnlock()
679 return nil
680 }
681 sm.ListenSockets = copyMap(svr.listenSockets)
682 c.mu.RUnlock()
683 sm.ID = svr.id
684 sm.RefName = svr.refName
685 sm.ServerData = svr.s.ChannelzMetric()
686 return sm
687}
688
689type idGenerator struct {
690 id int64
691}
692
693func (i *idGenerator) reset() {
694 atomic.StoreInt64(&i.id, 0)
695}
696
697func (i *idGenerator) genID() int64 {
698 return atomic.AddInt64(&i.id, 1)
699}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
new file mode 100644
index 0000000..17c2274
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types.go
@@ -0,0 +1,702 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package channelz
20
21import (
22 "net"
23 "sync"
24 "sync/atomic"
25 "time"
26
27 "google.golang.org/grpc/connectivity"
28 "google.golang.org/grpc/credentials"
29 "google.golang.org/grpc/grpclog"
30)
31
32// entry represents a node in the channelz database.
33type entry interface {
34 // addChild adds a child e, whose channelz id is id to child list
35 addChild(id int64, e entry)
36 // deleteChild deletes a child with channelz id to be id from child list
37 deleteChild(id int64)
38 // triggerDelete tries to delete self from channelz database. However, if child
39 // list is not empty, then deletion from the database is on hold until the last
40 // child is deleted from database.
41 triggerDelete()
42 // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
43 // list is now empty. If both conditions are met, then delete self from database.
44 deleteSelfIfReady()
45 // getParentID returns parent ID of the entry. 0 value parent ID means no parent.
46 getParentID() int64
47}
48
49// dummyEntry is a fake entry to handle entry not found case.
50type dummyEntry struct {
51 idNotFound int64
52}
53
54func (d *dummyEntry) addChild(id int64, e entry) {
55 // Note: It is possible for a normal program to reach here under race condition.
56 // For example, there could be a race between ClientConn.Close() info being propagated
57 // to addrConn and http2Client. ClientConn.Close() cancel the context and result
58 // in http2Client to error. The error info is then caught by transport monitor
59 // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
60 // the addrConn will create a new transport. And when registering the new transport in
61 // channelz, its parent addrConn could have already been torn down and deleted
62 // from channelz tracking, and thus reach the code here.
63 grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
64}
65
66func (d *dummyEntry) deleteChild(id int64) {
67 // It is possible for a normal program to reach here under race condition.
68 // Refer to the example described in addChild().
69 grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
70}
71
72func (d *dummyEntry) triggerDelete() {
73 grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
74}
75
76func (*dummyEntry) deleteSelfIfReady() {
77 // code should not reach here. deleteSelfIfReady is always called on an existing entry.
78}
79
80func (*dummyEntry) getParentID() int64 {
81 return 0
82}
83
84// ChannelMetric defines the info channelz provides for a specific Channel, which
85// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
86// child list, etc.
87type ChannelMetric struct {
88 // ID is the channelz id of this channel.
89 ID int64
90 // RefName is the human readable reference string of this channel.
91 RefName string
92 // ChannelData contains channel internal metric reported by the channel through
93 // ChannelzMetric().
94 ChannelData *ChannelInternalMetric
95 // NestedChans tracks the nested channel type children of this channel in the format of
96 // a map from nested channel channelz id to corresponding reference string.
97 NestedChans map[int64]string
98 // SubChans tracks the subchannel type children of this channel in the format of a
99 // map from subchannel channelz id to corresponding reference string.
100 SubChans map[int64]string
101 // Sockets tracks the socket type children of this channel in the format of a map
102 // from socket channelz id to corresponding reference string.
103 // Note current grpc implementation doesn't allow channel having sockets directly,
104 // therefore, this is field is unused.
105 Sockets map[int64]string
106 // Trace contains the most recent traced events.
107 Trace *ChannelTrace
108}
109
110// SubChannelMetric defines the info channelz provides for a specific SubChannel,
111// which includes ChannelInternalMetric and channelz-specific data, such as
112// channelz id, child list, etc.
113type SubChannelMetric struct {
114 // ID is the channelz id of this subchannel.
115 ID int64
116 // RefName is the human readable reference string of this subchannel.
117 RefName string
118 // ChannelData contains subchannel internal metric reported by the subchannel
119 // through ChannelzMetric().
120 ChannelData *ChannelInternalMetric
121 // NestedChans tracks the nested channel type children of this subchannel in the format of
122 // a map from nested channel channelz id to corresponding reference string.
123 // Note current grpc implementation doesn't allow subchannel to have nested channels
124 // as children, therefore, this field is unused.
125 NestedChans map[int64]string
126 // SubChans tracks the subchannel type children of this subchannel in the format of a
127 // map from subchannel channelz id to corresponding reference string.
128 // Note current grpc implementation doesn't allow subchannel to have subchannels
129 // as children, therefore, this field is unused.
130 SubChans map[int64]string
131 // Sockets tracks the socket type children of this subchannel in the format of a map
132 // from socket channelz id to corresponding reference string.
133 Sockets map[int64]string
134 // Trace contains the most recent traced events.
135 Trace *ChannelTrace
136}
137
138// ChannelInternalMetric defines the struct that the implementor of Channel interface
139// should return from ChannelzMetric().
140type ChannelInternalMetric struct {
141 // current connectivity state of the channel.
142 State connectivity.State
143 // The target this channel originally tried to connect to. May be absent
144 Target string
145 // The number of calls started on the channel.
146 CallsStarted int64
147 // The number of calls that have completed with an OK status.
148 CallsSucceeded int64
149 // The number of calls that have a completed with a non-OK status.
150 CallsFailed int64
151 // The last time a call was started on the channel.
152 LastCallStartedTimestamp time.Time
153}
154
155// ChannelTrace stores traced events on a channel/subchannel and related info.
156type ChannelTrace struct {
157 // EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
158 EventNum int64
159 // CreationTime is the creation time of the trace.
160 CreationTime time.Time
161 // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
162 // oldest one)
163 Events []*TraceEvent
164}
165
166// TraceEvent represent a single trace event
167type TraceEvent struct {
168 // Desc is a simple description of the trace event.
169 Desc string
170 // Severity states the severity of this trace event.
171 Severity Severity
172 // Timestamp is the event time.
173 Timestamp time.Time
174 // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
175 // involved in this event.
176 // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
177 RefID int64
178 // RefName is the reference name for the entity that gets referenced in the event.
179 RefName string
180 // RefType indicates the referenced entity type, i.e Channel or SubChannel.
181 RefType RefChannelType
182}
183
184// Channel is the interface that should be satisfied in order to be tracked by
185// channelz as Channel or SubChannel.
186type Channel interface {
187 ChannelzMetric() *ChannelInternalMetric
188}
189
190type dummyChannel struct{}
191
192func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
193 return &ChannelInternalMetric{}
194}
195
196type channel struct {
197 refName string
198 c Channel
199 closeCalled bool
200 nestedChans map[int64]string
201 subChans map[int64]string
202 id int64
203 pid int64
204 cm *channelMap
205 trace *channelTrace
206 // traceRefCount is the number of trace events that reference this channel.
207 // Non-zero traceRefCount means the trace of this channel cannot be deleted.
208 traceRefCount int32
209}
210
211func (c *channel) addChild(id int64, e entry) {
212 switch v := e.(type) {
213 case *subChannel:
214 c.subChans[id] = v.refName
215 case *channel:
216 c.nestedChans[id] = v.refName
217 default:
218 grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
219 }
220}
221
222func (c *channel) deleteChild(id int64) {
223 delete(c.subChans, id)
224 delete(c.nestedChans, id)
225 c.deleteSelfIfReady()
226}
227
228func (c *channel) triggerDelete() {
229 c.closeCalled = true
230 c.deleteSelfIfReady()
231}
232
233func (c *channel) getParentID() int64 {
234 return c.pid
235}
236
237// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
238// deleting the channel reference from its parent's child list.
239//
240// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
241// corresponding grpc object has been invoked, and the channel does not have any children left.
242//
243// The returned boolean value indicates whether the channel has been successfully deleted from tree.
244func (c *channel) deleteSelfFromTree() (deleted bool) {
245 if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
246 return false
247 }
248 // not top channel
249 if c.pid != 0 {
250 c.cm.findEntry(c.pid).deleteChild(c.id)
251 }
252 return true
253}
254
255// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
256// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
257// channel, and its memory will be garbage collected.
258//
259// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
260// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
261// the trace of the referenced entity must not be deleted. In order to release the resource allocated
262// by grpc, the reference to the grpc object is reset to a dummy object.
263//
264// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
265//
266// It returns a bool to indicate whether the channel can be safely deleted from map.
267func (c *channel) deleteSelfFromMap() (delete bool) {
268 if c.getTraceRefCount() != 0 {
269 c.c = &dummyChannel{}
270 return false
271 }
272 return true
273}
274
275// deleteSelfIfReady tries to delete the channel itself from the channelz database.
276// The delete process includes two steps:
277// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
278// parent's child list.
279// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
280// will return entry not found error.
281func (c *channel) deleteSelfIfReady() {
282 if !c.deleteSelfFromTree() {
283 return
284 }
285 if !c.deleteSelfFromMap() {
286 return
287 }
288 c.cm.deleteEntry(c.id)
289 c.trace.clear()
290}
291
292func (c *channel) getChannelTrace() *channelTrace {
293 return c.trace
294}
295
296func (c *channel) incrTraceRefCount() {
297 atomic.AddInt32(&c.traceRefCount, 1)
298}
299
300func (c *channel) decrTraceRefCount() {
301 atomic.AddInt32(&c.traceRefCount, -1)
302}
303
304func (c *channel) getTraceRefCount() int {
305 i := atomic.LoadInt32(&c.traceRefCount)
306 return int(i)
307}
308
309func (c *channel) getRefName() string {
310 return c.refName
311}
312
313type subChannel struct {
314 refName string
315 c Channel
316 closeCalled bool
317 sockets map[int64]string
318 id int64
319 pid int64
320 cm *channelMap
321 trace *channelTrace
322 traceRefCount int32
323}
324
325func (sc *subChannel) addChild(id int64, e entry) {
326 if v, ok := e.(*normalSocket); ok {
327 sc.sockets[id] = v.refName
328 } else {
329 grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
330 }
331}
332
333func (sc *subChannel) deleteChild(id int64) {
334 delete(sc.sockets, id)
335 sc.deleteSelfIfReady()
336}
337
338func (sc *subChannel) triggerDelete() {
339 sc.closeCalled = true
340 sc.deleteSelfIfReady()
341}
342
343func (sc *subChannel) getParentID() int64 {
344 return sc.pid
345}
346
347// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
348// means deleting the subchannel reference from its parent's child list.
349//
350// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
351// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
352//
353// The returned boolean value indicates whether the channel has been successfully deleted from tree.
354func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
355 if !sc.closeCalled || len(sc.sockets) != 0 {
356 return false
357 }
358 sc.cm.findEntry(sc.pid).deleteChild(sc.id)
359 return true
360}
361
362// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
363// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
364// the subchannel, and its memory will be garbage collected.
365//
366// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
367// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
368// the trace of the referenced entity must not be deleted. In order to release the resource allocated
369// by grpc, the reference to the grpc object is reset to a dummy object.
370//
371// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
372//
373// It returns a bool to indicate whether the channel can be safely deleted from map.
374func (sc *subChannel) deleteSelfFromMap() (delete bool) {
375 if sc.getTraceRefCount() != 0 {
376 // free the grpc struct (i.e. addrConn)
377 sc.c = &dummyChannel{}
378 return false
379 }
380 return true
381}
382
383// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
384// The delete process includes two steps:
385// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
386// its parent's child list.
387// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
388// by id will return entry not found error.
389func (sc *subChannel) deleteSelfIfReady() {
390 if !sc.deleteSelfFromTree() {
391 return
392 }
393 if !sc.deleteSelfFromMap() {
394 return
395 }
396 sc.cm.deleteEntry(sc.id)
397 sc.trace.clear()
398}
399
400func (sc *subChannel) getChannelTrace() *channelTrace {
401 return sc.trace
402}
403
404func (sc *subChannel) incrTraceRefCount() {
405 atomic.AddInt32(&sc.traceRefCount, 1)
406}
407
408func (sc *subChannel) decrTraceRefCount() {
409 atomic.AddInt32(&sc.traceRefCount, -1)
410}
411
412func (sc *subChannel) getTraceRefCount() int {
413 i := atomic.LoadInt32(&sc.traceRefCount)
414 return int(i)
415}
416
417func (sc *subChannel) getRefName() string {
418 return sc.refName
419}
420
421// SocketMetric defines the info channelz provides for a specific Socket, which
422// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
423type SocketMetric struct {
424 // ID is the channelz id of this socket.
425 ID int64
426 // RefName is the human readable reference string of this socket.
427 RefName string
428 // SocketData contains socket internal metric reported by the socket through
429 // ChannelzMetric().
430 SocketData *SocketInternalMetric
431}
432
433// SocketInternalMetric defines the struct that the implementor of Socket interface
434// should return from ChannelzMetric().
435type SocketInternalMetric struct {
436 // The number of streams that have been started.
437 StreamsStarted int64
438 // The number of streams that have ended successfully:
439 // On client side, receiving frame with eos bit set.
440 // On server side, sending frame with eos bit set.
441 StreamsSucceeded int64
442 // The number of streams that have ended unsuccessfully:
443 // On client side, termination without receiving frame with eos bit set.
444 // On server side, termination without sending frame with eos bit set.
445 StreamsFailed int64
446 // The number of messages successfully sent on this socket.
447 MessagesSent int64
448 MessagesReceived int64
449 // The number of keep alives sent. This is typically implemented with HTTP/2
450 // ping messages.
451 KeepAlivesSent int64
452 // The last time a stream was created by this endpoint. Usually unset for
453 // servers.
454 LastLocalStreamCreatedTimestamp time.Time
455 // The last time a stream was created by the remote endpoint. Usually unset
456 // for clients.
457 LastRemoteStreamCreatedTimestamp time.Time
458 // The last time a message was sent by this endpoint.
459 LastMessageSentTimestamp time.Time
460 // The last time a message was received by this endpoint.
461 LastMessageReceivedTimestamp time.Time
462 // The amount of window, granted to the local endpoint by the remote endpoint.
463 // This may be slightly out of date due to network latency. This does NOT
464 // include stream level or TCP level flow control info.
465 LocalFlowControlWindow int64
466 // The amount of window, granted to the remote endpoint by the local endpoint.
467 // This may be slightly out of date due to network latency. This does NOT
468 // include stream level or TCP level flow control info.
469 RemoteFlowControlWindow int64
470 // The locally bound address.
471 LocalAddr net.Addr
472 // The remote bound address. May be absent.
473 RemoteAddr net.Addr
474 // Optional, represents the name of the remote endpoint, if different than
475 // the original target name.
476 RemoteName string
477 SocketOptions *SocketOptionData
478 Security credentials.ChannelzSecurityValue
479}
480
481// Socket is the interface that should be satisfied in order to be tracked by
482// channelz as Socket.
483type Socket interface {
484 ChannelzMetric() *SocketInternalMetric
485}
486
487type listenSocket struct {
488 refName string
489 s Socket
490 id int64
491 pid int64
492 cm *channelMap
493}
494
495func (ls *listenSocket) addChild(id int64, e entry) {
496 grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
497}
498
499func (ls *listenSocket) deleteChild(id int64) {
500 grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
501}
502
503func (ls *listenSocket) triggerDelete() {
504 ls.cm.deleteEntry(ls.id)
505 ls.cm.findEntry(ls.pid).deleteChild(ls.id)
506}
507
508func (ls *listenSocket) deleteSelfIfReady() {
509 grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
510}
511
512func (ls *listenSocket) getParentID() int64 {
513 return ls.pid
514}
515
516type normalSocket struct {
517 refName string
518 s Socket
519 id int64
520 pid int64
521 cm *channelMap
522}
523
524func (ns *normalSocket) addChild(id int64, e entry) {
525 grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
526}
527
528func (ns *normalSocket) deleteChild(id int64) {
529 grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
530}
531
532func (ns *normalSocket) triggerDelete() {
533 ns.cm.deleteEntry(ns.id)
534 ns.cm.findEntry(ns.pid).deleteChild(ns.id)
535}
536
537func (ns *normalSocket) deleteSelfIfReady() {
538 grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
539}
540
541func (ns *normalSocket) getParentID() int64 {
542 return ns.pid
543}
544
545// ServerMetric defines the info channelz provides for a specific Server, which
546// includes ServerInternalMetric and channelz-specific data, such as channelz id,
547// child list, etc.
548type ServerMetric struct {
549 // ID is the channelz id of this server.
550 ID int64
551 // RefName is the human readable reference string of this server.
552 RefName string
553 // ServerData contains server internal metric reported by the server through
554 // ChannelzMetric().
555 ServerData *ServerInternalMetric
556 // ListenSockets tracks the listener socket type children of this server in the
557 // format of a map from socket channelz id to corresponding reference string.
558 ListenSockets map[int64]string
559}
560
561// ServerInternalMetric defines the struct that the implementor of Server interface
562// should return from ChannelzMetric().
563type ServerInternalMetric struct {
564 // The number of incoming calls started on the server.
565 CallsStarted int64
566 // The number of incoming calls that have completed with an OK status.
567 CallsSucceeded int64
568 // The number of incoming calls that have a completed with a non-OK status.
569 CallsFailed int64
570 // The last time a call was started on the server.
571 LastCallStartedTimestamp time.Time
572}
573
574// Server is the interface to be satisfied in order to be tracked by channelz as
575// Server.
576type Server interface {
577 ChannelzMetric() *ServerInternalMetric
578}
579
580type server struct {
581 refName string
582 s Server
583 closeCalled bool
584 sockets map[int64]string
585 listenSockets map[int64]string
586 id int64
587 cm *channelMap
588}
589
590func (s *server) addChild(id int64, e entry) {
591 switch v := e.(type) {
592 case *normalSocket:
593 s.sockets[id] = v.refName
594 case *listenSocket:
595 s.listenSockets[id] = v.refName
596 default:
597 grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
598 }
599}
600
601func (s *server) deleteChild(id int64) {
602 delete(s.sockets, id)
603 delete(s.listenSockets, id)
604 s.deleteSelfIfReady()
605}
606
607func (s *server) triggerDelete() {
608 s.closeCalled = true
609 s.deleteSelfIfReady()
610}
611
612func (s *server) deleteSelfIfReady() {
613 if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
614 return
615 }
616 s.cm.deleteEntry(s.id)
617}
618
619func (s *server) getParentID() int64 {
620 return 0
621}
622
623type tracedChannel interface {
624 getChannelTrace() *channelTrace
625 incrTraceRefCount()
626 decrTraceRefCount()
627 getRefName() string
628}
629
630type channelTrace struct {
631 cm *channelMap
632 createdTime time.Time
633 eventCount int64
634 mu sync.Mutex
635 events []*TraceEvent
636}
637
638func (c *channelTrace) append(e *TraceEvent) {
639 c.mu.Lock()
640 if len(c.events) == getMaxTraceEntry() {
641 del := c.events[0]
642 c.events = c.events[1:]
643 if del.RefID != 0 {
644 // start recursive cleanup in a goroutine to not block the call originated from grpc.
645 go func() {
646 // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
647 c.cm.mu.Lock()
648 c.cm.decrTraceRefCount(del.RefID)
649 c.cm.mu.Unlock()
650 }()
651 }
652 }
653 e.Timestamp = time.Now()
654 c.events = append(c.events, e)
655 c.eventCount++
656 c.mu.Unlock()
657}
658
659func (c *channelTrace) clear() {
660 c.mu.Lock()
661 for _, e := range c.events {
662 if e.RefID != 0 {
663 // caller should have already held the c.cm.mu lock.
664 c.cm.decrTraceRefCount(e.RefID)
665 }
666 }
667 c.mu.Unlock()
668}
669
670// Severity is the severity level of a trace event.
671// The canonical enumeration of all valid values is here:
672// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
673type Severity int
674
675const (
676 // CtUNKNOWN indicates unknown severity of a trace event.
677 CtUNKNOWN Severity = iota
678 // CtINFO indicates info level severity of a trace event.
679 CtINFO
680 // CtWarning indicates warning level severity of a trace event.
681 CtWarning
682 // CtError indicates error level severity of a trace event.
683 CtError
684)
685
686// RefChannelType is the type of the entity being referenced in a trace event.
687type RefChannelType int
688
689const (
690 // RefChannel indicates the referenced entity is a Channel.
691 RefChannel RefChannelType = iota
692 // RefSubChannel indicates the referenced entity is a SubChannel.
693 RefSubChannel
694)
695
696func (c *channelTrace) dumpData() *ChannelTrace {
697 c.mu.Lock()
698 ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
699 ct.Events = c.events[:len(c.events)]
700 c.mu.Unlock()
701 return ct
702}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
new file mode 100644
index 0000000..692dd61
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
@@ -0,0 +1,53 @@
1// +build !appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package channelz
22
23import (
24 "syscall"
25
26 "golang.org/x/sys/unix"
27)
28
29// SocketOptionData defines the struct to hold socket option data, and related
30// getter function to obtain info from fd.
31type SocketOptionData struct {
32 Linger *unix.Linger
33 RecvTimeout *unix.Timeval
34 SendTimeout *unix.Timeval
35 TCPInfo *unix.TCPInfo
36}
37
38// Getsockopt defines the function to get socket options requested by channelz.
39// It is to be passed to syscall.RawConn.Control().
40func (s *SocketOptionData) Getsockopt(fd uintptr) {
41 if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
42 s.Linger = v
43 }
44 if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
45 s.RecvTimeout = v
46 }
47 if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
48 s.SendTimeout = v
49 }
50 if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
51 s.TCPInfo = v
52 }
53}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
new file mode 100644
index 0000000..79edbef
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
@@ -0,0 +1,44 @@
1// +build !linux appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package channelz
22
23import (
24 "sync"
25
26 "google.golang.org/grpc/grpclog"
27)
28
29var once sync.Once
30
31// SocketOptionData defines the struct to hold socket option data, and related
32// getter function to obtain info from fd.
33// Windows OS doesn't support Socket Option
34type SocketOptionData struct {
35}
36
37// Getsockopt defines the function to get socket options requested by channelz.
38// It is to be passed to syscall.RawConn.Control().
39// Windows OS doesn't support Socket Option
40func (s *SocketOptionData) Getsockopt(fd uintptr) {
41 once.Do(func() {
42 grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
43 })
44}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
new file mode 100644
index 0000000..fdf409d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
@@ -0,0 +1,39 @@
1// +build linux,!appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package channelz
22
23import (
24 "syscall"
25)
26
27// GetSocketOption gets the socket option info of the conn.
28func GetSocketOption(socket interface{}) *SocketOptionData {
29 c, ok := socket.(syscall.Conn)
30 if !ok {
31 return nil
32 }
33 data := &SocketOptionData{}
34 if rawConn, err := c.SyscallConn(); err == nil {
35 rawConn.Control(data.Getsockopt)
36 return data
37 }
38 return nil
39}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
new file mode 100644
index 0000000..8864a08
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
@@ -0,0 +1,26 @@
1// +build !linux appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package channelz
22
23// GetSocketOption gets the socket option info of the conn.
24func GetSocketOption(c interface{}) *SocketOptionData {
25 return nil
26}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
new file mode 100644
index 0000000..d2193b3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -0,0 +1,70 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package envconfig contains grpc settings configured by environment variables.
20package envconfig
21
22import (
23 "os"
24 "strings"
25)
26
27const (
28 prefix = "GRPC_GO_"
29 retryStr = prefix + "RETRY"
30 requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE"
31)
32
33// RequireHandshakeSetting describes the settings for handshaking.
34type RequireHandshakeSetting int
35
36const (
37 // RequireHandshakeHybrid (default, deprecated) indicates to not wait for
38 // handshake before considering a connection ready, but wait before
39 // considering successful.
40 RequireHandshakeHybrid RequireHandshakeSetting = iota
41 // RequireHandshakeOn (default after the 1.17 release) indicates to wait
42 // for handshake before considering a connection ready/successful.
43 RequireHandshakeOn
44 // RequireHandshakeOff indicates to not wait for handshake before
45 // considering a connection ready/successful.
46 RequireHandshakeOff
47)
48
49var (
50 // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
51 Retry = strings.EqualFold(os.Getenv(retryStr), "on")
52 // RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE
53 // environment variable.
54 //
55 // Will be removed after the 1.18 release.
56 RequireHandshake RequireHandshakeSetting
57)
58
59func init() {
60 switch strings.ToLower(os.Getenv(requireHandshakeStr)) {
61 case "on":
62 default:
63 RequireHandshake = RequireHandshakeOn
64 case "off":
65 RequireHandshake = RequireHandshakeOff
66 case "hybrid":
67 // Will be removed after the 1.17 release.
68 RequireHandshake = RequireHandshakeHybrid
69 }
70}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
new file mode 100644
index 0000000..200b115
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
@@ -0,0 +1,56 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package grpcrand implements math/rand functions in a concurrent-safe way
20// with a global random source, independent of math/rand's global source.
21package grpcrand
22
23import (
24 "math/rand"
25 "sync"
26 "time"
27)
28
29var (
30 r = rand.New(rand.NewSource(time.Now().UnixNano()))
31 mu sync.Mutex
32)
33
34// Int63n implements rand.Int63n on the grpcrand global source.
35func Int63n(n int64) int64 {
36 mu.Lock()
37 res := r.Int63n(n)
38 mu.Unlock()
39 return res
40}
41
42// Intn implements rand.Intn on the grpcrand global source.
43func Intn(n int) int {
44 mu.Lock()
45 res := r.Intn(n)
46 mu.Unlock()
47 return res
48}
49
50// Float64 implements rand.Float64 on the grpcrand global source.
51func Float64() float64 {
52 mu.Lock()
53 res := r.Float64()
54 mu.Unlock()
55 return res
56}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
new file mode 100644
index 0000000..fbe697c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
@@ -0,0 +1,61 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package grpcsync implements additional synchronization primitives built upon
20// the sync package.
21package grpcsync
22
23import (
24 "sync"
25 "sync/atomic"
26)
27
28// Event represents a one-time event that may occur in the future.
29type Event struct {
30 fired int32
31 c chan struct{}
32 o sync.Once
33}
34
35// Fire causes e to complete. It is safe to call multiple times, and
36// concurrently. It returns true iff this call to Fire caused the signaling
37// channel returned by Done to close.
38func (e *Event) Fire() bool {
39 ret := false
40 e.o.Do(func() {
41 atomic.StoreInt32(&e.fired, 1)
42 close(e.c)
43 ret = true
44 })
45 return ret
46}
47
48// Done returns a channel that will be closed when Fire is called.
49func (e *Event) Done() <-chan struct{} {
50 return e.c
51}
52
53// HasFired returns true if Fire has been called.
54func (e *Event) HasFired() bool {
55 return atomic.LoadInt32(&e.fired) == 1
56}
57
58// NewEvent returns a new, ready-to-use Event.
59func NewEvent() *Event {
60 return &Event{c: make(chan struct{})}
61}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 0708383..eaa54d4 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -15,20 +15,36 @@
15 * 15 *
16 */ 16 */
17 17
18// Package internal contains gRPC-internal code for testing, to avoid polluting 18// Package internal contains gRPC-internal code, to avoid polluting
19// the godoc of the top-level grpc package. 19// the godoc of the top-level grpc package. It must not import any grpc
20// symbols to avoid circular dependencies.
20package internal 21package internal
21 22
22// TestingCloseConns closes all existing transports but keeps 23import "context"
23// grpcServer.lis accepting new connections.
24//
25// The provided grpcServer must be of type *grpc.Server. It is untyped
26// for circular dependency reasons.
27var TestingCloseConns func(grpcServer interface{})
28 24
29// TestingUseHandlerImpl enables the http.Handler-based server implementation. 25var (
30// It must be called before Serve and requires TLS credentials. 26 // WithContextDialer is exported by dialoptions.go
31// 27 WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption
32// The provided grpcServer must be of type *grpc.Server. It is untyped 28 // WithResolverBuilder is exported by dialoptions.go
33// for circular dependency reasons. 29 WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
34var TestingUseHandlerImpl func(grpcServer interface{}) 30 // WithHealthCheckFunc is not exported by dialoptions.go
31 WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
32 // HealthCheckFunc is used to provide client-side LB channel health checking
33 HealthCheckFunc HealthChecker
34 // BalancerUnregister is exported by package balancer to unregister a balancer.
35 BalancerUnregister func(name string)
36)
37
38// HealthChecker defines the signature of the client-side LB channel health checking function.
39type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
40
41const (
42 // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
43 CredsBundleModeFallback = "fallback"
44 // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
45 // mode.
46 CredsBundleModeBalancer = "balancer"
47 // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
48 // that supports backend returned by grpclb balancer.
49 CredsBundleModeBackendFromBalancer = "backend-from-balancer"
50)
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
new file mode 100644
index 0000000..43281a3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
@@ -0,0 +1,114 @@
1// +build !appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21// Package syscall provides functionalities that grpc uses to get low-level operating system
22// stats/info.
23package syscall
24
25import (
26 "fmt"
27 "net"
28 "syscall"
29 "time"
30
31 "golang.org/x/sys/unix"
32 "google.golang.org/grpc/grpclog"
33)
34
35// GetCPUTime returns the how much CPU time has passed since the start of this process.
36func GetCPUTime() int64 {
37 var ts unix.Timespec
38 if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
39 grpclog.Fatal(err)
40 }
41 return ts.Nano()
42}
43
44// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
45type Rusage syscall.Rusage
46
47// GetRusage returns the resource usage of current process.
48func GetRusage() (rusage *Rusage) {
49 rusage = new(Rusage)
50 syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
51 return
52}
53
54// CPUTimeDiff returns the differences of user CPU time and system CPU time used
55// between two Rusage structs.
56func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
57 f := (*syscall.Rusage)(first)
58 l := (*syscall.Rusage)(latest)
59 var (
60 utimeDiffs = l.Utime.Sec - f.Utime.Sec
61 utimeDiffus = l.Utime.Usec - f.Utime.Usec
62 stimeDiffs = l.Stime.Sec - f.Stime.Sec
63 stimeDiffus = l.Stime.Usec - f.Stime.Usec
64 )
65
66 uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
67 sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
68
69 return uTimeElapsed, sTimeElapsed
70}
71
72// SetTCPUserTimeout sets the TCP user timeout on a connection's socket
73func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
74 tcpconn, ok := conn.(*net.TCPConn)
75 if !ok {
76 // not a TCP connection. exit early
77 return nil
78 }
79 rawConn, err := tcpconn.SyscallConn()
80 if err != nil {
81 return fmt.Errorf("error getting raw connection: %v", err)
82 }
83 err = rawConn.Control(func(fd uintptr) {
84 err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond))
85 })
86 if err != nil {
87 return fmt.Errorf("error setting option on socket: %v", err)
88 }
89
90 return nil
91}
92
93// GetTCPUserTimeout gets the TCP user timeout on a connection's socket
94func GetTCPUserTimeout(conn net.Conn) (opt int, err error) {
95 tcpconn, ok := conn.(*net.TCPConn)
96 if !ok {
97 err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn)
98 return
99 }
100 rawConn, err := tcpconn.SyscallConn()
101 if err != nil {
102 err = fmt.Errorf("error getting raw connection: %v", err)
103 return
104 }
105 err = rawConn.Control(func(fd uintptr) {
106 opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT)
107 })
108 if err != nil {
109 err = fmt.Errorf("error getting option on socket: %v", err)
110 return
111 }
112
113 return
114}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
new file mode 100644
index 0000000..61678fe
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -0,0 +1,63 @@
1// +build !linux appengine
2
3/*
4 *
5 * Copyright 2018 gRPC authors.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 */
20
21package syscall
22
23import (
24 "net"
25 "time"
26
27 "google.golang.org/grpc/grpclog"
28)
29
30func init() {
31 grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
32}
33
34// GetCPUTime returns the how much CPU time has passed since the start of this process.
35// It always returns 0 under non-linux or appengine environment.
36func GetCPUTime() int64 {
37 return 0
38}
39
40// Rusage is an empty struct under non-linux or appengine environment.
41type Rusage struct{}
42
43// GetRusage is a no-op function under non-linux or appengine environment.
44func GetRusage() (rusage *Rusage) {
45 return nil
46}
47
48// CPUTimeDiff returns the differences of user CPU time and system CPU time used
49// between two Rusage structs. It a no-op function for non-linux or appengine environment.
50func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
51 return 0, 0
52}
53
54// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
55func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
56 return nil
57}
58
59// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
60// a negative return value indicates the operation is not supported
61func GetTCPUserTimeout(conn net.Conn) (int, error) {
62 return -1, nil
63}
diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
new file mode 100644
index 0000000..070680e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
@@ -0,0 +1,141 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package transport
20
21import (
22 "sync"
23 "time"
24)
25
26const (
27 // bdpLimit is the maximum value the flow control windows will be increased
28 // to. TCP typically limits this to 4MB, but some systems go up to 16MB.
29 // Since this is only a limit, it is safe to make it optimistic.
30 bdpLimit = (1 << 20) * 16
31 // alpha is a constant factor used to keep a moving average
32 // of RTTs.
33 alpha = 0.9
34 // If the current bdp sample is greater than or equal to
35 // our beta * our estimated bdp and the current bandwidth
36 // sample is the maximum bandwidth observed so far, we
37 // increase our bbp estimate by a factor of gamma.
38 beta = 0.66
39 // To put our bdp to be smaller than or equal to twice the real BDP,
40 // we should multiply our current sample with 4/3, however to round things out
41 // we use 2 as the multiplication factor.
42 gamma = 2
43)
44
45// Adding arbitrary data to ping so that its ack can be identified.
46// Easter-egg: what does the ping message say?
47var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
48
49type bdpEstimator struct {
50 // sentAt is the time when the ping was sent.
51 sentAt time.Time
52
53 mu sync.Mutex
54 // bdp is the current bdp estimate.
55 bdp uint32
56 // sample is the number of bytes received in one measurement cycle.
57 sample uint32
58 // bwMax is the maximum bandwidth noted so far (bytes/sec).
59 bwMax float64
60 // bool to keep track of the beginning of a new measurement cycle.
61 isSent bool
62 // Callback to update the window sizes.
63 updateFlowControl func(n uint32)
64 // sampleCount is the number of samples taken so far.
65 sampleCount uint64
66 // round trip time (seconds)
67 rtt float64
68}
69
70// timesnap registers the time bdp ping was sent out so that
71// network rtt can be calculated when its ack is received.
72// It is called (by controller) when the bdpPing is
73// being written on the wire.
74func (b *bdpEstimator) timesnap(d [8]byte) {
75 if bdpPing.data != d {
76 return
77 }
78 b.sentAt = time.Now()
79}
80
81// add adds bytes to the current sample for calculating bdp.
82// It returns true only if a ping must be sent. This can be used
83// by the caller (handleData) to make decision about batching
84// a window update with it.
85func (b *bdpEstimator) add(n uint32) bool {
86 b.mu.Lock()
87 defer b.mu.Unlock()
88 if b.bdp == bdpLimit {
89 return false
90 }
91 if !b.isSent {
92 b.isSent = true
93 b.sample = n
94 b.sentAt = time.Time{}
95 b.sampleCount++
96 return true
97 }
98 b.sample += n
99 return false
100}
101
102// calculate is called when an ack for a bdp ping is received.
103// Here we calculate the current bdp and bandwidth sample and
104// decide if the flow control windows should go up.
105func (b *bdpEstimator) calculate(d [8]byte) {
106 // Check if the ping acked for was the bdp ping.
107 if bdpPing.data != d {
108 return
109 }
110 b.mu.Lock()
111 rttSample := time.Since(b.sentAt).Seconds()
112 if b.sampleCount < 10 {
113 // Bootstrap rtt with an average of first 10 rtt samples.
114 b.rtt += (rttSample - b.rtt) / float64(b.sampleCount)
115 } else {
116 // Heed to the recent past more.
117 b.rtt += (rttSample - b.rtt) * float64(alpha)
118 }
119 b.isSent = false
120 // The number of bytes accumulated so far in the sample is smaller
121 // than or equal to 1.5 times the real BDP on a saturated connection.
122 bwCurrent := float64(b.sample) / (b.rtt * float64(1.5))
123 if bwCurrent > b.bwMax {
124 b.bwMax = bwCurrent
125 }
126 // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is
127 // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we
128 // should update our perception of the network BDP.
129 if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit {
130 sampleFloat := float64(b.sample)
131 b.bdp = uint32(gamma * sampleFloat)
132 if b.bdp > bdpLimit {
133 b.bdp = bdpLimit
134 }
135 bdp := b.bdp
136 b.mu.Unlock()
137 b.updateFlowControl(bdp)
138 return
139 }
140 b.mu.Unlock()
141}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
new file mode 100644
index 0000000..204ba15
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -0,0 +1,852 @@
1/*
2 *
3 * Copyright 2014 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package transport
20
21import (
22 "bytes"
23 "fmt"
24 "runtime"
25 "sync"
26
27 "golang.org/x/net/http2"
28 "golang.org/x/net/http2/hpack"
29)
30
31var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
32 e.SetMaxDynamicTableSizeLimit(v)
33}
34
35type itemNode struct {
36 it interface{}
37 next *itemNode
38}
39
40type itemList struct {
41 head *itemNode
42 tail *itemNode
43}
44
45func (il *itemList) enqueue(i interface{}) {
46 n := &itemNode{it: i}
47 if il.tail == nil {
48 il.head, il.tail = n, n
49 return
50 }
51 il.tail.next = n
52 il.tail = n
53}
54
55// peek returns the first item in the list without removing it from the
56// list.
57func (il *itemList) peek() interface{} {
58 return il.head.it
59}
60
61func (il *itemList) dequeue() interface{} {
62 if il.head == nil {
63 return nil
64 }
65 i := il.head.it
66 il.head = il.head.next
67 if il.head == nil {
68 il.tail = nil
69 }
70 return i
71}
72
73func (il *itemList) dequeueAll() *itemNode {
74 h := il.head
75 il.head, il.tail = nil, nil
76 return h
77}
78
79func (il *itemList) isEmpty() bool {
80 return il.head == nil
81}
82
83// The following defines various control items which could flow through
84// the control buffer of transport. They represent different aspects of
85// control tasks, e.g., flow control, settings, streaming resetting, etc.
86
87// registerStream is used to register an incoming stream with loopy writer.
88type registerStream struct {
89 streamID uint32
90 wq *writeQuota
91}
92
93// headerFrame is also used to register stream on the client-side.
94type headerFrame struct {
95 streamID uint32
96 hf []hpack.HeaderField
97 endStream bool // Valid on server side.
98 initStream func(uint32) (bool, error) // Used only on the client side.
99 onWrite func()
100 wq *writeQuota // write quota for the stream created.
101 cleanup *cleanupStream // Valid on the server side.
102 onOrphaned func(error) // Valid on client-side
103}
104
105type cleanupStream struct {
106 streamID uint32
107 rst bool
108 rstCode http2.ErrCode
109 onWrite func()
110}
111
112type dataFrame struct {
113 streamID uint32
114 endStream bool
115 h []byte
116 d []byte
117 // onEachWrite is called every time
118 // a part of d is written out.
119 onEachWrite func()
120}
121
122type incomingWindowUpdate struct {
123 streamID uint32
124 increment uint32
125}
126
127type outgoingWindowUpdate struct {
128 streamID uint32
129 increment uint32
130}
131
132type incomingSettings struct {
133 ss []http2.Setting
134}
135
136type outgoingSettings struct {
137 ss []http2.Setting
138}
139
140type incomingGoAway struct {
141}
142
143type goAway struct {
144 code http2.ErrCode
145 debugData []byte
146 headsUp bool
147 closeConn bool
148}
149
150type ping struct {
151 ack bool
152 data [8]byte
153}
154
155type outFlowControlSizeRequest struct {
156 resp chan uint32
157}
158
159type outStreamState int
160
161const (
162 active outStreamState = iota
163 empty
164 waitingOnStreamQuota
165)
166
167type outStream struct {
168 id uint32
169 state outStreamState
170 itl *itemList
171 bytesOutStanding int
172 wq *writeQuota
173
174 next *outStream
175 prev *outStream
176}
177
178func (s *outStream) deleteSelf() {
179 if s.prev != nil {
180 s.prev.next = s.next
181 }
182 if s.next != nil {
183 s.next.prev = s.prev
184 }
185 s.next, s.prev = nil, nil
186}
187
188type outStreamList struct {
189 // Following are sentinel objects that mark the
190 // beginning and end of the list. They do not
191 // contain any item lists. All valid objects are
192 // inserted in between them.
193 // This is needed so that an outStream object can
194 // deleteSelf() in O(1) time without knowing which
195 // list it belongs to.
196 head *outStream
197 tail *outStream
198}
199
200func newOutStreamList() *outStreamList {
201 head, tail := new(outStream), new(outStream)
202 head.next = tail
203 tail.prev = head
204 return &outStreamList{
205 head: head,
206 tail: tail,
207 }
208}
209
210func (l *outStreamList) enqueue(s *outStream) {
211 e := l.tail.prev
212 e.next = s
213 s.prev = e
214 s.next = l.tail
215 l.tail.prev = s
216}
217
218// remove from the beginning of the list.
219func (l *outStreamList) dequeue() *outStream {
220 b := l.head.next
221 if b == l.tail {
222 return nil
223 }
224 b.deleteSelf()
225 return b
226}
227
228// controlBuffer is a way to pass information to loopy.
229// Information is passed as specific struct types called control frames.
230// A control frame not only represents data, messages or headers to be sent out
231// but can also be used to instruct loopy to update its internal state.
232// It shouldn't be confused with an HTTP2 frame, although some of the control frames
233// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
234type controlBuffer struct {
235 ch chan struct{}
236 done <-chan struct{}
237 mu sync.Mutex
238 consumerWaiting bool
239 list *itemList
240 err error
241}
242
243func newControlBuffer(done <-chan struct{}) *controlBuffer {
244 return &controlBuffer{
245 ch: make(chan struct{}, 1),
246 list: &itemList{},
247 done: done,
248 }
249}
250
251func (c *controlBuffer) put(it interface{}) error {
252 _, err := c.executeAndPut(nil, it)
253 return err
254}
255
256func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
257 var wakeUp bool
258 c.mu.Lock()
259 if c.err != nil {
260 c.mu.Unlock()
261 return false, c.err
262 }
263 if f != nil {
264 if !f(it) { // f wasn't successful
265 c.mu.Unlock()
266 return false, nil
267 }
268 }
269 if c.consumerWaiting {
270 wakeUp = true
271 c.consumerWaiting = false
272 }
273 c.list.enqueue(it)
274 c.mu.Unlock()
275 if wakeUp {
276 select {
277 case c.ch <- struct{}{}:
278 default:
279 }
280 }
281 return true, nil
282}
283
284// Note argument f should never be nil.
285func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
286 c.mu.Lock()
287 if c.err != nil {
288 c.mu.Unlock()
289 return false, c.err
290 }
291 if !f(it) { // f wasn't successful
292 c.mu.Unlock()
293 return false, nil
294 }
295 c.mu.Unlock()
296 return true, nil
297}
298
299func (c *controlBuffer) get(block bool) (interface{}, error) {
300 for {
301 c.mu.Lock()
302 if c.err != nil {
303 c.mu.Unlock()
304 return nil, c.err
305 }
306 if !c.list.isEmpty() {
307 h := c.list.dequeue()
308 c.mu.Unlock()
309 return h, nil
310 }
311 if !block {
312 c.mu.Unlock()
313 return nil, nil
314 }
315 c.consumerWaiting = true
316 c.mu.Unlock()
317 select {
318 case <-c.ch:
319 case <-c.done:
320 c.finish()
321 return nil, ErrConnClosing
322 }
323 }
324}
325
326func (c *controlBuffer) finish() {
327 c.mu.Lock()
328 if c.err != nil {
329 c.mu.Unlock()
330 return
331 }
332 c.err = ErrConnClosing
333 // There may be headers for streams in the control buffer.
334 // These streams need to be cleaned out since the transport
335 // is still not aware of these yet.
336 for head := c.list.dequeueAll(); head != nil; head = head.next {
337 hdr, ok := head.it.(*headerFrame)
338 if !ok {
339 continue
340 }
341 if hdr.onOrphaned != nil { // It will be nil on the server-side.
342 hdr.onOrphaned(ErrConnClosing)
343 }
344 }
345 c.mu.Unlock()
346}
347
348type side int
349
350const (
351 clientSide side = iota
352 serverSide
353)
354
355// Loopy receives frames from the control buffer.
356// Each frame is handled individually; most of the work done by loopy goes
357// into handling data frames. Loopy maintains a queue of active streams, and each
358// stream maintains a queue of data frames; as loopy receives data frames
359// it gets added to the queue of the relevant stream.
360// Loopy goes over this list of active streams by processing one node every iteration,
361// thereby closely resemebling to a round-robin scheduling over all streams. While
362// processing a stream, loopy writes out data bytes from this stream capped by the min
363// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
364type loopyWriter struct {
365 side side
366 cbuf *controlBuffer
367 sendQuota uint32
368 oiws uint32 // outbound initial window size.
369 // estdStreams is map of all established streams that are not cleaned-up yet.
370 // On client-side, this is all streams whose headers were sent out.
371 // On server-side, this is all streams whose headers were received.
372 estdStreams map[uint32]*outStream // Established streams.
373 // activeStreams is a linked-list of all streams that have data to send and some
374 // stream-level flow control quota.
375 // Each of these streams internally have a list of data items(and perhaps trailers
376 // on the server-side) to be sent out.
377 activeStreams *outStreamList
378 framer *framer
379 hBuf *bytes.Buffer // The buffer for HPACK encoding.
380 hEnc *hpack.Encoder // HPACK encoder.
381 bdpEst *bdpEstimator
382 draining bool
383
384 // Side-specific handlers
385 ssGoAwayHandler func(*goAway) (bool, error)
386}
387
388func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
389 var buf bytes.Buffer
390 l := &loopyWriter{
391 side: s,
392 cbuf: cbuf,
393 sendQuota: defaultWindowSize,
394 oiws: defaultWindowSize,
395 estdStreams: make(map[uint32]*outStream),
396 activeStreams: newOutStreamList(),
397 framer: fr,
398 hBuf: &buf,
399 hEnc: hpack.NewEncoder(&buf),
400 bdpEst: bdpEst,
401 }
402 return l
403}
404
405const minBatchSize = 1000
406
407// run should be run in a separate goroutine.
408// It reads control frames from controlBuf and processes them by:
409// 1. Updating loopy's internal state, or/and
410// 2. Writing out HTTP2 frames on the wire.
411//
412// Loopy keeps all active streams with data to send in a linked-list.
413// All streams in the activeStreams linked-list must have both:
414// 1. Data to send, and
415// 2. Stream level flow control quota available.
416//
417// In each iteration of run loop, other than processing the incoming control
418// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
419// This results in writing of HTTP2 frames into an underlying write buffer.
420// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
421// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
422// if the batch size is too low to give stream goroutines a chance to fill it up.
423func (l *loopyWriter) run() (err error) {
424 defer func() {
425 if err == ErrConnClosing {
426 // Don't log ErrConnClosing as error since it happens
427 // 1. When the connection is closed by some other known issue.
428 // 2. User closed the connection.
429 // 3. A graceful close of connection.
430 infof("transport: loopyWriter.run returning. %v", err)
431 err = nil
432 }
433 }()
434 for {
435 it, err := l.cbuf.get(true)
436 if err != nil {
437 return err
438 }
439 if err = l.handle(it); err != nil {
440 return err
441 }
442 if _, err = l.processData(); err != nil {
443 return err
444 }
445 gosched := true
446 hasdata:
447 for {
448 it, err := l.cbuf.get(false)
449 if err != nil {
450 return err
451 }
452 if it != nil {
453 if err = l.handle(it); err != nil {
454 return err
455 }
456 if _, err = l.processData(); err != nil {
457 return err
458 }
459 continue hasdata
460 }
461 isEmpty, err := l.processData()
462 if err != nil {
463 return err
464 }
465 if !isEmpty {
466 continue hasdata
467 }
468 if gosched {
469 gosched = false
470 if l.framer.writer.offset < minBatchSize {
471 runtime.Gosched()
472 continue hasdata
473 }
474 }
475 l.framer.writer.Flush()
476 break hasdata
477
478 }
479 }
480}
481
482func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
483 return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
484}
485
486func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
487 // Otherwise update the quota.
488 if w.streamID == 0 {
489 l.sendQuota += w.increment
490 return nil
491 }
492 // Find the stream and update it.
493 if str, ok := l.estdStreams[w.streamID]; ok {
494 str.bytesOutStanding -= int(w.increment)
495 if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
496 str.state = active
497 l.activeStreams.enqueue(str)
498 return nil
499 }
500 }
501 return nil
502}
503
504func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
505 return l.framer.fr.WriteSettings(s.ss...)
506}
507
508func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
509 if err := l.applySettings(s.ss); err != nil {
510 return err
511 }
512 return l.framer.fr.WriteSettingsAck()
513}
514
515func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
516 str := &outStream{
517 id: h.streamID,
518 state: empty,
519 itl: &itemList{},
520 wq: h.wq,
521 }
522 l.estdStreams[h.streamID] = str
523 return nil
524}
525
526func (l *loopyWriter) headerHandler(h *headerFrame) error {
527 if l.side == serverSide {
528 str, ok := l.estdStreams[h.streamID]
529 if !ok {
530 warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
531 return nil
532 }
533 // Case 1.A: Server is responding back with headers.
534 if !h.endStream {
535 return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
536 }
537 // else: Case 1.B: Server wants to close stream.
538
539 if str.state != empty { // either active or waiting on stream quota.
540 // add it str's list of items.
541 str.itl.enqueue(h)
542 return nil
543 }
544 if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
545 return err
546 }
547 return l.cleanupStreamHandler(h.cleanup)
548 }
549 // Case 2: Client wants to originate stream.
550 str := &outStream{
551 id: h.streamID,
552 state: empty,
553 itl: &itemList{},
554 wq: h.wq,
555 }
556 str.itl.enqueue(h)
557 return l.originateStream(str)
558}
559
560func (l *loopyWriter) originateStream(str *outStream) error {
561 hdr := str.itl.dequeue().(*headerFrame)
562 sendPing, err := hdr.initStream(str.id)
563 if err != nil {
564 if err == ErrConnClosing {
565 return err
566 }
567 // Other errors(errStreamDrain) need not close transport.
568 return nil
569 }
570 if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
571 return err
572 }
573 l.estdStreams[str.id] = str
574 if sendPing {
575 return l.pingHandler(&ping{data: [8]byte{}})
576 }
577 return nil
578}
579
580func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
581 if onWrite != nil {
582 onWrite()
583 }
584 l.hBuf.Reset()
585 for _, f := range hf {
586 if err := l.hEnc.WriteField(f); err != nil {
587 warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
588 }
589 }
590 var (
591 err error
592 endHeaders, first bool
593 )
594 first = true
595 for !endHeaders {
596 size := l.hBuf.Len()
597 if size > http2MaxFrameLen {
598 size = http2MaxFrameLen
599 } else {
600 endHeaders = true
601 }
602 if first {
603 first = false
604 err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
605 StreamID: streamID,
606 BlockFragment: l.hBuf.Next(size),
607 EndStream: endStream,
608 EndHeaders: endHeaders,
609 })
610 } else {
611 err = l.framer.fr.WriteContinuation(
612 streamID,
613 endHeaders,
614 l.hBuf.Next(size),
615 )
616 }
617 if err != nil {
618 return err
619 }
620 }
621 return nil
622}
623
624func (l *loopyWriter) preprocessData(df *dataFrame) error {
625 str, ok := l.estdStreams[df.streamID]
626 if !ok {
627 return nil
628 }
629 // If we got data for a stream it means that
630 // stream was originated and the headers were sent out.
631 str.itl.enqueue(df)
632 if str.state == empty {
633 str.state = active
634 l.activeStreams.enqueue(str)
635 }
636 return nil
637}
638
639func (l *loopyWriter) pingHandler(p *ping) error {
640 if !p.ack {
641 l.bdpEst.timesnap(p.data)
642 }
643 return l.framer.fr.WritePing(p.ack, p.data)
644
645}
646
647func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
648 o.resp <- l.sendQuota
649 return nil
650}
651
652func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
653 c.onWrite()
654 if str, ok := l.estdStreams[c.streamID]; ok {
655 // On the server side it could be a trailers-only response or
656 // a RST_STREAM before stream initialization thus the stream might
657 // not be established yet.
658 delete(l.estdStreams, c.streamID)
659 str.deleteSelf()
660 }
661 if c.rst { // If RST_STREAM needs to be sent.
662 if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
663 return err
664 }
665 }
666 if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
667 return ErrConnClosing
668 }
669 return nil
670}
671
672func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
673 if l.side == clientSide {
674 l.draining = true
675 if len(l.estdStreams) == 0 {
676 return ErrConnClosing
677 }
678 }
679 return nil
680}
681
682func (l *loopyWriter) goAwayHandler(g *goAway) error {
683 // Handling of outgoing GoAway is very specific to side.
684 if l.ssGoAwayHandler != nil {
685 draining, err := l.ssGoAwayHandler(g)
686 if err != nil {
687 return err
688 }
689 l.draining = draining
690 }
691 return nil
692}
693
694func (l *loopyWriter) handle(i interface{}) error {
695 switch i := i.(type) {
696 case *incomingWindowUpdate:
697 return l.incomingWindowUpdateHandler(i)
698 case *outgoingWindowUpdate:
699 return l.outgoingWindowUpdateHandler(i)
700 case *incomingSettings:
701 return l.incomingSettingsHandler(i)
702 case *outgoingSettings:
703 return l.outgoingSettingsHandler(i)
704 case *headerFrame:
705 return l.headerHandler(i)
706 case *registerStream:
707 return l.registerStreamHandler(i)
708 case *cleanupStream:
709 return l.cleanupStreamHandler(i)
710 case *incomingGoAway:
711 return l.incomingGoAwayHandler(i)
712 case *dataFrame:
713 return l.preprocessData(i)
714 case *ping:
715 return l.pingHandler(i)
716 case *goAway:
717 return l.goAwayHandler(i)
718 case *outFlowControlSizeRequest:
719 return l.outFlowControlSizeRequestHandler(i)
720 default:
721 return fmt.Errorf("transport: unknown control message type %T", i)
722 }
723}
724
725func (l *loopyWriter) applySettings(ss []http2.Setting) error {
726 for _, s := range ss {
727 switch s.ID {
728 case http2.SettingInitialWindowSize:
729 o := l.oiws
730 l.oiws = s.Val
731 if o < l.oiws {
732 // If the new limit is greater make all depleted streams active.
733 for _, stream := range l.estdStreams {
734 if stream.state == waitingOnStreamQuota {
735 stream.state = active
736 l.activeStreams.enqueue(stream)
737 }
738 }
739 }
740 case http2.SettingHeaderTableSize:
741 updateHeaderTblSize(l.hEnc, s.Val)
742 }
743 }
744 return nil
745}
746
747// processData removes the first stream from active streams, writes out at most 16KB
748// of its data and then puts it at the end of activeStreams if there's still more data
749// to be sent and stream has some stream-level flow control.
750func (l *loopyWriter) processData() (bool, error) {
751 if l.sendQuota == 0 {
752 return true, nil
753 }
754 str := l.activeStreams.dequeue() // Remove the first stream.
755 if str == nil {
756 return true, nil
757 }
758 dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
759 // A data item is represented by a dataFrame, since it later translates into
760 // multiple HTTP2 data frames.
761 // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
762 // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
763 // maximum possilbe HTTP2 frame size.
764
765 if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
766 // Client sends out empty data frame with endStream = true
767 if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
768 return false, err
769 }
770 str.itl.dequeue() // remove the empty data item from stream
771 if str.itl.isEmpty() {
772 str.state = empty
773 } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
774 if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
775 return false, err
776 }
777 if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
778 return false, nil
779 }
780 } else {
781 l.activeStreams.enqueue(str)
782 }
783 return false, nil
784 }
785 var (
786 idx int
787 buf []byte
788 )
789 if len(dataItem.h) != 0 { // data header has not been written out yet.
790 buf = dataItem.h
791 } else {
792 idx = 1
793 buf = dataItem.d
794 }
795 size := http2MaxFrameLen
796 if len(buf) < size {
797 size = len(buf)
798 }
799 if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
800 str.state = waitingOnStreamQuota
801 return false, nil
802 } else if strQuota < size {
803 size = strQuota
804 }
805
806 if l.sendQuota < uint32(size) { // connection-level flow control.
807 size = int(l.sendQuota)
808 }
809 // Now that outgoing flow controls are checked we can replenish str's write quota
810 str.wq.replenish(size)
811 var endStream bool
812 // If this is the last data message on this stream and all of it can be written in this iteration.
813 if dataItem.endStream && size == len(buf) {
814 // buf contains either data or it contains header but data is empty.
815 if idx == 1 || len(dataItem.d) == 0 {
816 endStream = true
817 }
818 }
819 if dataItem.onEachWrite != nil {
820 dataItem.onEachWrite()
821 }
822 if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
823 return false, err
824 }
825 buf = buf[size:]
826 str.bytesOutStanding += size
827 l.sendQuota -= uint32(size)
828 if idx == 0 {
829 dataItem.h = buf
830 } else {
831 dataItem.d = buf
832 }
833
834 if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
835 str.itl.dequeue()
836 }
837 if str.itl.isEmpty() {
838 str.state = empty
839 } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
840 if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
841 return false, err
842 }
843 if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
844 return false, err
845 }
846 } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
847 str.state = waitingOnStreamQuota
848 } else { // Otherwise add it back to the list of active streams.
849 l.activeStreams.enqueue(str)
850 }
851 return false, nil
852}
diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go
new file mode 100644
index 0000000..9fa306b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go
@@ -0,0 +1,49 @@
1/*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package transport
20
21import (
22 "math"
23 "time"
24)
25
26const (
27 // The default value of flow control window size in HTTP2 spec.
28 defaultWindowSize = 65535
29 // The initial window size for flow control.
30 initialWindowSize = defaultWindowSize // for an RPC
31 infinity = time.Duration(math.MaxInt64)
32 defaultClientKeepaliveTime = infinity
33 defaultClientKeepaliveTimeout = 20 * time.Second
34 defaultMaxStreamsClient = 100
35 defaultMaxConnectionIdle = infinity
36 defaultMaxConnectionAge = infinity
37 defaultMaxConnectionAgeGrace = infinity
38 defaultServerKeepaliveTime = 2 * time.Hour
39 defaultServerKeepaliveTimeout = 20 * time.Second
40 defaultKeepalivePolicyMinTime = 5 * time.Minute
41 // max window limit set by HTTP2 Specs.
42 maxWindowSize = math.MaxInt32
43 // defaultWriteQuota is the default value for number of data
44 // bytes that each stream can schedule before some of it being
45 // flushed out.
46 defaultWriteQuota = 64 * 1024
47 defaultClientMaxHeaderListSize = uint32(16 << 20)
48 defaultServerMaxHeaderListSize = uint32(16 << 20)
49)
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
new file mode 100644
index 0000000..5ea997a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -0,0 +1,218 @@
1/*
2 *
3 * Copyright 2014 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package transport
20
21import (
22 "fmt"
23 "math"
24 "sync"
25 "sync/atomic"
26)
27
28// writeQuota is a soft limit on the amount of data a stream can
29// schedule before some of it is written out.
30type writeQuota struct {
31 quota int32
32 // get waits on read from when quota goes less than or equal to zero.
33 // replenish writes on it when quota goes positive again.
34 ch chan struct{}
35 // done is triggered in error case.
36 done <-chan struct{}
37 // replenish is called by loopyWriter to give quota back to.
38 // It is implemented as a field so that it can be updated
39 // by tests.
40 replenish func(n int)
41}
42
43func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
44 w := &writeQuota{
45 quota: sz,
46 ch: make(chan struct{}, 1),
47 done: done,
48 }
49 w.replenish = w.realReplenish
50 return w
51}
52
53func (w *writeQuota) get(sz int32) error {
54 for {
55 if atomic.LoadInt32(&w.quota) > 0 {
56 atomic.AddInt32(&w.quota, -sz)
57 return nil
58 }
59 select {
60 case <-w.ch:
61 continue
62 case <-w.done:
63 return errStreamDone
64 }
65 }
66}
67
68func (w *writeQuota) realReplenish(n int) {
69 sz := int32(n)
70 a := atomic.AddInt32(&w.quota, sz)
71 b := a - sz
72 if b <= 0 && a > 0 {
73 select {
74 case w.ch <- struct{}{}:
75 default:
76 }
77 }
78}
79
80type trInFlow struct {
81 limit uint32
82 unacked uint32
83 effectiveWindowSize uint32
84}
85
86func (f *trInFlow) newLimit(n uint32) uint32 {
87 d := n - f.limit
88 f.limit = n
89 f.updateEffectiveWindowSize()
90 return d
91}
92
93func (f *trInFlow) onData(n uint32) uint32 {
94 f.unacked += n
95 if f.unacked >= f.limit/4 {
96 w := f.unacked
97 f.unacked = 0
98 f.updateEffectiveWindowSize()
99 return w
100 }
101 f.updateEffectiveWindowSize()
102 return 0
103}
104
105func (f *trInFlow) reset() uint32 {
106 w := f.unacked
107 f.unacked = 0
108 f.updateEffectiveWindowSize()
109 return w
110}
111
112func (f *trInFlow) updateEffectiveWindowSize() {
113 atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
114}
115
116func (f *trInFlow) getSize() uint32 {
117 return atomic.LoadUint32(&f.effectiveWindowSize)
118}
119
120// TODO(mmukhi): Simplify this code.
121// inFlow deals with inbound flow control
122type inFlow struct {
123 mu sync.Mutex
124 // The inbound flow control limit for pending data.
125 limit uint32
126 // pendingData is the overall data which have been received but not been
127 // consumed by applications.
128 pendingData uint32
129 // The amount of data the application has consumed but grpc has not sent
130 // window update for them. Used to reduce window update frequency.
131 pendingUpdate uint32
132 // delta is the extra window update given by receiver when an application
133 // is reading data bigger in size than the inFlow limit.
134 delta uint32
135}
136
137// newLimit updates the inflow window to a new value n.
138// It assumes that n is always greater than the old limit.
139func (f *inFlow) newLimit(n uint32) uint32 {
140 f.mu.Lock()
141 d := n - f.limit
142 f.limit = n
143 f.mu.Unlock()
144 return d
145}
146
147func (f *inFlow) maybeAdjust(n uint32) uint32 {
148 if n > uint32(math.MaxInt32) {
149 n = uint32(math.MaxInt32)
150 }
151 f.mu.Lock()
152 // estSenderQuota is the receiver's view of the maximum number of bytes the sender
153 // can send without a window update.
154 estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
155 // estUntransmittedData is the maximum number of bytes the sends might not have put
156 // on the wire yet. A value of 0 or less means that we have already received all or
157 // more bytes than the application is requesting to read.
158 estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
159 // This implies that unless we send a window update, the sender won't be able to send all the bytes
160 // for this message. Therefore we must send an update over the limit since there's an active read
161 // request from the application.
162 if estUntransmittedData > estSenderQuota {
163 // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
164 if f.limit+n > maxWindowSize {
165 f.delta = maxWindowSize - f.limit
166 } else {
167 // Send a window update for the whole message and not just the difference between
168 // estUntransmittedData and estSenderQuota. This will be helpful in case the message
169 // is padded; We will fallback on the current available window(at least a 1/4th of the limit).
170 f.delta = n
171 }
172 f.mu.Unlock()
173 return f.delta
174 }
175 f.mu.Unlock()
176 return 0
177}
178
179// onData is invoked when some data frame is received. It updates pendingData.
180func (f *inFlow) onData(n uint32) error {
181 f.mu.Lock()
182 f.pendingData += n
183 if f.pendingData+f.pendingUpdate > f.limit+f.delta {
184 limit := f.limit
185 rcvd := f.pendingData + f.pendingUpdate
186 f.mu.Unlock()
187 return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
188 }
189 f.mu.Unlock()
190 return nil
191}
192
193// onRead is invoked when the application reads the data. It returns the window size
194// to be sent to the peer.
195func (f *inFlow) onRead(n uint32) uint32 {
196 f.mu.Lock()
197 if f.pendingData == 0 {
198 f.mu.Unlock()
199 return 0
200 }
201 f.pendingData -= n
202 if n > f.delta {
203 n -= f.delta
204 f.delta = 0
205 } else {
206 f.delta -= n
207 n = 0
208 }
209 f.pendingUpdate += n
210 if f.pendingUpdate >= f.limit/4 {
211 wu := f.pendingUpdate
212 f.pendingUpdate = 0
213 f.mu.Unlock()
214 return wu
215 }
216 f.mu.Unlock()
217 return 0
218}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
new file mode 100644
index 0000000..73b41ea
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -0,0 +1,449 @@
1/*
2 *
3 * Copyright 2016 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// This file is the implementation of a gRPC server using HTTP/2 which
20// uses the standard Go http2 Server implementation (via the
21// http.Handler interface), rather than speaking low-level HTTP/2
22// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
23
24package transport
25
26import (
27 "context"
28 "errors"
29 "fmt"
30 "io"
31 "net"
32 "net/http"
33 "strings"
34 "sync"
35 "time"
36
37 "github.com/golang/protobuf/proto"
38 "golang.org/x/net/http2"
39 "google.golang.org/grpc/codes"
40 "google.golang.org/grpc/credentials"
41 "google.golang.org/grpc/metadata"
42 "google.golang.org/grpc/peer"
43 "google.golang.org/grpc/stats"
44 "google.golang.org/grpc/status"
45)
46
47// NewServerHandlerTransport returns a ServerTransport handling gRPC
48// from inside an http.Handler. It requires that the http Server
49// supports HTTP/2.
50func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
51 if r.ProtoMajor != 2 {
52 return nil, errors.New("gRPC requires HTTP/2")
53 }
54 if r.Method != "POST" {
55 return nil, errors.New("invalid gRPC request method")
56 }
57 contentType := r.Header.Get("Content-Type")
58 // TODO: do we assume contentType is lowercase? we did before
59 contentSubtype, validContentType := contentSubtype(contentType)
60 if !validContentType {
61 return nil, errors.New("invalid gRPC request content-type")
62 }
63 if _, ok := w.(http.Flusher); !ok {
64 return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
65 }
66 if _, ok := w.(http.CloseNotifier); !ok {
67 return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
68 }
69
70 st := &serverHandlerTransport{
71 rw: w,
72 req: r,
73 closedCh: make(chan struct{}),
74 writes: make(chan func()),
75 contentType: contentType,
76 contentSubtype: contentSubtype,
77 stats: stats,
78 }
79
80 if v := r.Header.Get("grpc-timeout"); v != "" {
81 to, err := decodeTimeout(v)
82 if err != nil {
83 return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
84 }
85 st.timeoutSet = true
86 st.timeout = to
87 }
88
89 metakv := []string{"content-type", contentType}
90 if r.Host != "" {
91 metakv = append(metakv, ":authority", r.Host)
92 }
93 for k, vv := range r.Header {
94 k = strings.ToLower(k)
95 if isReservedHeader(k) && !isWhitelistedHeader(k) {
96 continue
97 }
98 for _, v := range vv {
99 v, err := decodeMetadataHeader(k, v)
100 if err != nil {
101 return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
102 }
103 metakv = append(metakv, k, v)
104 }
105 }
106 st.headerMD = metadata.Pairs(metakv...)
107
108 return st, nil
109}
110
111// serverHandlerTransport is an implementation of ServerTransport
112// which replies to exactly one gRPC request (exactly one HTTP request),
113// using the net/http.Handler interface. This http.Handler is guaranteed
114// at this point to be speaking over HTTP/2, so it's able to speak valid
115// gRPC.
116type serverHandlerTransport struct {
117 rw http.ResponseWriter
118 req *http.Request
119 timeoutSet bool
120 timeout time.Duration
121 didCommonHeaders bool
122
123 headerMD metadata.MD
124
125 closeOnce sync.Once
126 closedCh chan struct{} // closed on Close
127
128 // writes is a channel of code to run serialized in the
129 // ServeHTTP (HandleStreams) goroutine. The channel is closed
130 // when WriteStatus is called.
131 writes chan func()
132
133 // block concurrent WriteStatus calls
134 // e.g. grpc/(*serverStream).SendMsg/RecvMsg
135 writeStatusMu sync.Mutex
136
137 // we just mirror the request content-type
138 contentType string
139 // we store both contentType and contentSubtype so we don't keep recreating them
140 // TODO make sure this is consistent across handler_server and http2_server
141 contentSubtype string
142
143 stats stats.Handler
144}
145
146func (ht *serverHandlerTransport) Close() error {
147 ht.closeOnce.Do(ht.closeCloseChanOnce)
148 return nil
149}
150
151func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
152
153func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
154
155// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
156// the empty string if unknown.
157type strAddr string
158
159func (a strAddr) Network() string {
160 if a != "" {
161 // Per the documentation on net/http.Request.RemoteAddr, if this is
162 // set, it's set to the IP:port of the peer (hence, TCP):
163 // https://golang.org/pkg/net/http/#Request
164 //
165 // If we want to support Unix sockets later, we can
166 // add our own grpc-specific convention within the
167 // grpc codebase to set RemoteAddr to a different
168 // format, or probably better: we can attach it to the
169 // context and use that from serverHandlerTransport.RemoteAddr.
170 return "tcp"
171 }
172 return ""
173}
174
175func (a strAddr) String() string { return string(a) }
176
177// do runs fn in the ServeHTTP goroutine.
178func (ht *serverHandlerTransport) do(fn func()) error {
179 // Avoid a panic writing to closed channel. Imperfect but maybe good enough.
180 select {
181 case <-ht.closedCh:
182 return ErrConnClosing
183 default:
184 select {
185 case ht.writes <- fn:
186 return nil
187 case <-ht.closedCh:
188 return ErrConnClosing
189 }
190 }
191}
192
193func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
194 ht.writeStatusMu.Lock()
195 defer ht.writeStatusMu.Unlock()
196
197 err := ht.do(func() {
198 ht.writeCommonHeaders(s)
199
200 // And flush, in case no header or body has been sent yet.
201 // This forces a separation of headers and trailers if this is the
202 // first call (for example, in end2end tests's TestNoService).
203 ht.rw.(http.Flusher).Flush()
204
205 h := ht.rw.Header()
206 h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code()))
207 if m := st.Message(); m != "" {
208 h.Set("Grpc-Message", encodeGrpcMessage(m))
209 }
210
211 if p := st.Proto(); p != nil && len(p.Details) > 0 {
212 stBytes, err := proto.Marshal(p)
213 if err != nil {
214 // TODO: return error instead, when callers are able to handle it.
215 panic(err)
216 }
217
218 h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
219 }
220
221 if md := s.Trailer(); len(md) > 0 {
222 for k, vv := range md {
223 // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
224 if isReservedHeader(k) {
225 continue
226 }
227 for _, v := range vv {
228 // http2 ResponseWriter mechanism to send undeclared Trailers after
229 // the headers have possibly been written.
230 h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v))
231 }
232 }
233 }
234 })
235
236 if err == nil { // transport has not been closed
237 if ht.stats != nil {
238 ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
239 }
240 close(ht.writes)
241 }
242 ht.Close()
243 return err
244}
245
246// writeCommonHeaders sets common headers on the first write
247// call (Write, WriteHeader, or WriteStatus).
248func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
249 if ht.didCommonHeaders {
250 return
251 }
252 ht.didCommonHeaders = true
253
254 h := ht.rw.Header()
255 h["Date"] = nil // suppress Date to make tests happy; TODO: restore
256 h.Set("Content-Type", ht.contentType)
257
258 // Predeclare trailers we'll set later in WriteStatus (after the body).
259 // This is a SHOULD in the HTTP RFC, and the way you add (known)
260 // Trailers per the net/http.ResponseWriter contract.
261 // See https://golang.org/pkg/net/http/#ResponseWriter
262 // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
263 h.Add("Trailer", "Grpc-Status")
264 h.Add("Trailer", "Grpc-Message")
265 h.Add("Trailer", "Grpc-Status-Details-Bin")
266
267 if s.sendCompress != "" {
268 h.Set("Grpc-Encoding", s.sendCompress)
269 }
270}
271
272func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
273 return ht.do(func() {
274 ht.writeCommonHeaders(s)
275 ht.rw.Write(hdr)
276 ht.rw.Write(data)
277 ht.rw.(http.Flusher).Flush()
278 })
279}
280
281func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
282 err := ht.do(func() {
283 ht.writeCommonHeaders(s)
284 h := ht.rw.Header()
285 for k, vv := range md {
286 // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
287 if isReservedHeader(k) {
288 continue
289 }
290 for _, v := range vv {
291 v = encodeMetadataHeader(k, v)
292 h.Add(k, v)
293 }
294 }
295 ht.rw.WriteHeader(200)
296 ht.rw.(http.Flusher).Flush()
297 })
298
299 if err == nil {
300 if ht.stats != nil {
301 ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
302 }
303 }
304 return err
305}
306
307func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
308 // With this transport type there will be exactly 1 stream: this HTTP request.
309
310 ctx := ht.req.Context()
311 var cancel context.CancelFunc
312 if ht.timeoutSet {
313 ctx, cancel = context.WithTimeout(ctx, ht.timeout)
314 } else {
315 ctx, cancel = context.WithCancel(ctx)
316 }
317
318 // requestOver is closed when either the request's context is done
319 // or the status has been written via WriteStatus.
320 requestOver := make(chan struct{})
321
322 // clientGone receives a single value if peer is gone, either
323 // because the underlying connection is dead or because the
324 // peer sends an http2 RST_STREAM.
325 clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
326 go func() {
327 select {
328 case <-requestOver:
329 case <-ht.closedCh:
330 case <-clientGone:
331 }
332 cancel()
333 ht.Close()
334 }()
335
336 req := ht.req
337
338 s := &Stream{
339 id: 0, // irrelevant
340 requestRead: func(int) {},
341 cancel: cancel,
342 buf: newRecvBuffer(),
343 st: ht,
344 method: req.URL.Path,
345 recvCompress: req.Header.Get("grpc-encoding"),
346 contentSubtype: ht.contentSubtype,
347 }
348 pr := &peer.Peer{
349 Addr: ht.RemoteAddr(),
350 }
351 if req.TLS != nil {
352 pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
353 }
354 ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
355 s.ctx = peer.NewContext(ctx, pr)
356 if ht.stats != nil {
357 s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
358 inHeader := &stats.InHeader{
359 FullMethod: s.method,
360 RemoteAddr: ht.RemoteAddr(),
361 Compression: s.recvCompress,
362 }
363 ht.stats.HandleRPC(s.ctx, inHeader)
364 }
365 s.trReader = &transportReader{
366 reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
367 windowHandler: func(int) {},
368 }
369
370 // readerDone is closed when the Body.Read-ing goroutine exits.
371 readerDone := make(chan struct{})
372 go func() {
373 defer close(readerDone)
374
375 // TODO: minimize garbage, optimize recvBuffer code/ownership
376 const readSize = 8196
377 for buf := make([]byte, readSize); ; {
378 n, err := req.Body.Read(buf)
379 if n > 0 {
380 s.buf.put(recvMsg{data: buf[:n:n]})
381 buf = buf[n:]
382 }
383 if err != nil {
384 s.buf.put(recvMsg{err: mapRecvMsgError(err)})
385 return
386 }
387 if len(buf) == 0 {
388 buf = make([]byte, readSize)
389 }
390 }
391 }()
392
393 // startStream is provided by the *grpc.Server's serveStreams.
394 // It starts a goroutine serving s and exits immediately.
395 // The goroutine that is started is the one that then calls
396 // into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
397 startStream(s)
398
399 ht.runStream()
400 close(requestOver)
401
402 // Wait for reading goroutine to finish.
403 req.Body.Close()
404 <-readerDone
405}
406
407func (ht *serverHandlerTransport) runStream() {
408 for {
409 select {
410 case fn, ok := <-ht.writes:
411 if !ok {
412 return
413 }
414 fn()
415 case <-ht.closedCh:
416 return
417 }
418 }
419}
420
421func (ht *serverHandlerTransport) IncrMsgSent() {}
422
423func (ht *serverHandlerTransport) IncrMsgRecv() {}
424
425func (ht *serverHandlerTransport) Drain() {
426 panic("Drain() is not implemented")
427}
428
429// mapRecvMsgError returns the non-nil err into the appropriate
430// error value as expected by callers of *grpc.parser.recvMsg.
431// In particular, in can only be:
432// * io.EOF
433// * io.ErrUnexpectedEOF
434// * of type transport.ConnectionError
435// * an error from the status package
436func mapRecvMsgError(err error) error {
437 if err == io.EOF || err == io.ErrUnexpectedEOF {
438 return err
439 }
440 if se, ok := err.(http2.StreamError); ok {
441 if code, ok := http2ErrConvTab[se.Code]; ok {
442 return status.Error(code, se.Error())
443 }
444 }
445 if strings.Contains(err.Error(), "body closed by handler") {
446 return status.Error(codes.Canceled, err.Error())
447 }
448 return connectionErrorf(true, err, err.Error())
449}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
new file mode 100644
index 0000000..babcaee
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -0,0 +1,1380 @@
1/*
2 *
3 * Copyright 2014 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package transport
20
21import (
22 "context"
23 "fmt"
24 "io"
25 "math"
26 "net"
27 "strconv"
28 "strings"
29 "sync"
30 "sync/atomic"
31 "time"
32
33 "golang.org/x/net/http2"
34 "golang.org/x/net/http2/hpack"
35
36 "google.golang.org/grpc/codes"
37 "google.golang.org/grpc/credentials"
38 "google.golang.org/grpc/internal/channelz"
39 "google.golang.org/grpc/internal/syscall"
40 "google.golang.org/grpc/keepalive"
41 "google.golang.org/grpc/metadata"
42 "google.golang.org/grpc/peer"
43 "google.golang.org/grpc/stats"
44 "google.golang.org/grpc/status"
45)
46
47// http2Client implements the ClientTransport interface with HTTP2.
48type http2Client struct {
49 ctx context.Context
50 cancel context.CancelFunc
51 ctxDone <-chan struct{} // Cache the ctx.Done() chan.
52 userAgent string
53 md interface{}
54 conn net.Conn // underlying communication channel
55 loopy *loopyWriter
56 remoteAddr net.Addr
57 localAddr net.Addr
58 authInfo credentials.AuthInfo // auth info about the connection
59
60 readerDone chan struct{} // sync point to enable testing.
61 writerDone chan struct{} // sync point to enable testing.
62 // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
63 // that the server sent GoAway on this transport.
64 goAway chan struct{}
65 // awakenKeepalive is used to wake up keepalive when after it has gone dormant.
66 awakenKeepalive chan struct{}
67
68 framer *framer
69 // controlBuf delivers all the control related tasks (e.g., window
70 // updates, reset streams, and various settings) to the controller.
71 controlBuf *controlBuffer
72 fc *trInFlow
73 // The scheme used: https if TLS is on, http otherwise.
74 scheme string
75
76 isSecure bool
77
78 perRPCCreds []credentials.PerRPCCredentials
79
80 // Boolean to keep track of reading activity on transport.
81 // 1 is true and 0 is false.
82 activity uint32 // Accessed atomically.
83 kp keepalive.ClientParameters
84 keepaliveEnabled bool
85
86 statsHandler stats.Handler
87
88 initialWindowSize int32
89
90 // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE
91 maxSendHeaderListSize *uint32
92
93 bdpEst *bdpEstimator
94 // onPrefaceReceipt is a callback that client transport calls upon
95 // receiving server preface to signal that a succefull HTTP2
96 // connection was established.
97 onPrefaceReceipt func()
98
99 maxConcurrentStreams uint32
100 streamQuota int64
101 streamsQuotaAvailable chan struct{}
102 waitingStreams uint32
103 nextID uint32
104
105 mu sync.Mutex // guard the following variables
106 state transportState
107 activeStreams map[uint32]*Stream
108 // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
109 prevGoAwayID uint32
110 // goAwayReason records the http2.ErrCode and debug data received with the
111 // GoAway frame.
112 goAwayReason GoAwayReason
113
114 // Fields below are for channelz metric collection.
115 channelzID int64 // channelz unique identification number
116 czData *channelzData
117
118 onGoAway func(GoAwayReason)
119 onClose func()
120}
121
122func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
123 if fn != nil {
124 return fn(ctx, addr)
125 }
126 return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
127}
128
129func isTemporary(err error) bool {
130 switch err := err.(type) {
131 case interface {
132 Temporary() bool
133 }:
134 return err.Temporary()
135 case interface {
136 Timeout() bool
137 }:
138 // Timeouts may be resolved upon retry, and are thus treated as
139 // temporary.
140 return err.Timeout()
141 }
142 return true
143}
144
145// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
146// and starts to receive messages on it. Non-nil error returns if construction
147// fails.
148func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
149 scheme := "http"
150 ctx, cancel := context.WithCancel(ctx)
151 defer func() {
152 if err != nil {
153 cancel()
154 }
155 }()
156
157 conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
158 if err != nil {
159 if opts.FailOnNonTempDialError {
160 return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
161 }
162 return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
163 }
164 // Any further errors will close the underlying connection
165 defer func(conn net.Conn) {
166 if err != nil {
167 conn.Close()
168 }
169 }(conn)
170 kp := opts.KeepaliveParams
171 // Validate keepalive parameters.
172 if kp.Time == 0 {
173 kp.Time = defaultClientKeepaliveTime
174 }
175 if kp.Timeout == 0 {
176 kp.Timeout = defaultClientKeepaliveTimeout
177 }
178 keepaliveEnabled := false
179 if kp.Time != infinity {
180 if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
181 return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
182 }
183 keepaliveEnabled = true
184 }
185 var (
186 isSecure bool
187 authInfo credentials.AuthInfo
188 )
189 transportCreds := opts.TransportCredentials
190 perRPCCreds := opts.PerRPCCredentials
191
192 if b := opts.CredsBundle; b != nil {
193 if t := b.TransportCredentials(); t != nil {
194 transportCreds = t
195 }
196 if t := b.PerRPCCredentials(); t != nil {
197 perRPCCreds = append(perRPCCreds, t)
198 }
199 }
200 if transportCreds != nil {
201 scheme = "https"
202 conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
203 if err != nil {
204 return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
205 }
206 isSecure = true
207 }
208 dynamicWindow := true
209 icwz := int32(initialWindowSize)
210 if opts.InitialConnWindowSize >= defaultWindowSize {
211 icwz = opts.InitialConnWindowSize
212 dynamicWindow = false
213 }
214 writeBufSize := opts.WriteBufferSize
215 readBufSize := opts.ReadBufferSize
216 maxHeaderListSize := defaultClientMaxHeaderListSize
217 if opts.MaxHeaderListSize != nil {
218 maxHeaderListSize = *opts.MaxHeaderListSize
219 }
220 t := &http2Client{
221 ctx: ctx,
222 ctxDone: ctx.Done(), // Cache Done chan.
223 cancel: cancel,
224 userAgent: opts.UserAgent,
225 md: addr.Metadata,
226 conn: conn,
227 remoteAddr: conn.RemoteAddr(),
228 localAddr: conn.LocalAddr(),
229 authInfo: authInfo,
230 readerDone: make(chan struct{}),
231 writerDone: make(chan struct{}),
232 goAway: make(chan struct{}),
233 awakenKeepalive: make(chan struct{}, 1),
234 framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
235 fc: &trInFlow{limit: uint32(icwz)},
236 scheme: scheme,
237 activeStreams: make(map[uint32]*Stream),
238 isSecure: isSecure,
239 perRPCCreds: perRPCCreds,
240 kp: kp,
241 statsHandler: opts.StatsHandler,
242 initialWindowSize: initialWindowSize,
243 onPrefaceReceipt: onPrefaceReceipt,
244 nextID: 1,
245 maxConcurrentStreams: defaultMaxStreamsClient,
246 streamQuota: defaultMaxStreamsClient,
247 streamsQuotaAvailable: make(chan struct{}, 1),
248 czData: new(channelzData),
249 onGoAway: onGoAway,
250 onClose: onClose,
251 keepaliveEnabled: keepaliveEnabled,
252 }
253 t.controlBuf = newControlBuffer(t.ctxDone)
254 if opts.InitialWindowSize >= defaultWindowSize {
255 t.initialWindowSize = opts.InitialWindowSize
256 dynamicWindow = false
257 }
258 if dynamicWindow {
259 t.bdpEst = &bdpEstimator{
260 bdp: initialWindowSize,
261 updateFlowControl: t.updateFlowControl,
262 }
263 }
264 // Make sure awakenKeepalive can't be written upon.
265 // keepalive routine will make it writable, if need be.
266 t.awakenKeepalive <- struct{}{}
267 if t.statsHandler != nil {
268 t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
269 RemoteAddr: t.remoteAddr,
270 LocalAddr: t.localAddr,
271 })
272 connBegin := &stats.ConnBegin{
273 Client: true,
274 }
275 t.statsHandler.HandleConn(t.ctx, connBegin)
276 }
277 if channelz.IsOn() {
278 t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
279 }
280 if t.keepaliveEnabled {
281 go t.keepalive()
282 }
283 // Start the reader goroutine for incoming message. Each transport has
284 // a dedicated goroutine which reads HTTP2 frame from network. Then it
285 // dispatches the frame to the corresponding stream entity.
286 go t.reader()
287
288 // Send connection preface to server.
289 n, err := t.conn.Write(clientPreface)
290 if err != nil {
291 t.Close()
292 return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
293 }
294 if n != len(clientPreface) {
295 t.Close()
296 return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
297 }
298 var ss []http2.Setting
299
300 if t.initialWindowSize != defaultWindowSize {
301 ss = append(ss, http2.Setting{
302 ID: http2.SettingInitialWindowSize,
303 Val: uint32(t.initialWindowSize),
304 })
305 }
306 if opts.MaxHeaderListSize != nil {
307 ss = append(ss, http2.Setting{
308 ID: http2.SettingMaxHeaderListSize,
309 Val: *opts.MaxHeaderListSize,
310 })
311 }
312 err = t.framer.fr.WriteSettings(ss...)
313 if err != nil {
314 t.Close()
315 return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
316 }
317 // Adjust the connection flow control window if needed.
318 if delta := uint32(icwz - defaultWindowSize); delta > 0 {
319 if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
320 t.Close()
321 return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
322 }
323 }
324
325 t.framer.writer.Flush()
326 go func() {
327 t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
328 err := t.loopy.run()
329 if err != nil {
330 errorf("transport: loopyWriter.run returning. Err: %v", err)
331 }
332 // If it's a connection error, let reader goroutine handle it
333 // since there might be data in the buffers.
334 if _, ok := err.(net.Error); !ok {
335 t.conn.Close()
336 }
337 close(t.writerDone)
338 }()
339 return t, nil
340}
341
342func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
343 // TODO(zhaoq): Handle uint32 overflow of Stream.id.
344 s := &Stream{
345 done: make(chan struct{}),
346 method: callHdr.Method,
347 sendCompress: callHdr.SendCompress,
348 buf: newRecvBuffer(),
349 headerChan: make(chan struct{}),
350 contentSubtype: callHdr.ContentSubtype,
351 }
352 s.wq = newWriteQuota(defaultWriteQuota, s.done)
353 s.requestRead = func(n int) {
354 t.adjustWindow(s, uint32(n))
355 }
356 // The client side stream context should have exactly the same life cycle with the user provided context.
357 // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
358 // So we use the original context here instead of creating a copy.
359 s.ctx = ctx
360 s.trReader = &transportReader{
361 reader: &recvBufferReader{
362 ctx: s.ctx,
363 ctxDone: s.ctx.Done(),
364 recv: s.buf,
365 closeStream: func(err error) {
366 t.CloseStream(s, err)
367 },
368 },
369 windowHandler: func(n int) {
370 t.updateWindow(s, uint32(n))
371 },
372 }
373 return s
374}
375
376func (t *http2Client) getPeer() *peer.Peer {
377 pr := &peer.Peer{
378 Addr: t.remoteAddr,
379 }
380 // Attach Auth info if there is any.
381 if t.authInfo != nil {
382 pr.AuthInfo = t.authInfo
383 }
384 return pr
385}
386
387func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
388 aud := t.createAudience(callHdr)
389 authData, err := t.getTrAuthData(ctx, aud)
390 if err != nil {
391 return nil, err
392 }
393 callAuthData, err := t.getCallAuthData(ctx, aud, callHdr)
394 if err != nil {
395 return nil, err
396 }
397 // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
398 // first and create a slice of that exact size.
399 // Make the slice of certain predictable size to reduce allocations made by append.
400 hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
401 hfLen += len(authData) + len(callAuthData)
402 headerFields := make([]hpack.HeaderField, 0, hfLen)
403 headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
404 headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
405 headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
406 headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
407 headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
408 headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
409 headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
410 if callHdr.PreviousAttempts > 0 {
411 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
412 }
413
414 if callHdr.SendCompress != "" {
415 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
416 }
417 if dl, ok := ctx.Deadline(); ok {
418 // Send out timeout regardless its value. The server can detect timeout context by itself.
419 // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
420 timeout := dl.Sub(time.Now())
421 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
422 }
423 for k, v := range authData {
424 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
425 }
426 for k, v := range callAuthData {
427 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
428 }
429 if b := stats.OutgoingTags(ctx); b != nil {
430 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
431 }
432 if b := stats.OutgoingTrace(ctx); b != nil {
433 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
434 }
435
436 if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
437 var k string
438 for _, vv := range added {
439 for i, v := range vv {
440 if i%2 == 0 {
441 k = v
442 continue
443 }
444 // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
445 if isReservedHeader(k) {
446 continue
447 }
448 headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
449 }
450 }
451 for k, vv := range md {
452 // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
453 if isReservedHeader(k) {
454 continue
455 }
456 for _, v := range vv {
457 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
458 }
459 }
460 }
461 if md, ok := t.md.(*metadata.MD); ok {
462 for k, vv := range *md {
463 if isReservedHeader(k) {
464 continue
465 }
466 for _, v := range vv {
467 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
468 }
469 }
470 }
471 return headerFields, nil
472}
473
474func (t *http2Client) createAudience(callHdr *CallHdr) string {
475 // Create an audience string only if needed.
476 if len(t.perRPCCreds) == 0 && callHdr.Creds == nil {
477 return ""
478 }
479 // Construct URI required to get auth request metadata.
480 // Omit port if it is the default one.
481 host := strings.TrimSuffix(callHdr.Host, ":443")
482 pos := strings.LastIndex(callHdr.Method, "/")
483 if pos == -1 {
484 pos = len(callHdr.Method)
485 }
486 return "https://" + host + callHdr.Method[:pos]
487}
488
489func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
490 authData := map[string]string{}
491 for _, c := range t.perRPCCreds {
492 data, err := c.GetRequestMetadata(ctx, audience)
493 if err != nil {
494 if _, ok := status.FromError(err); ok {
495 return nil, err
496 }
497
498 return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
499 }
500 for k, v := range data {
501 // Capital header names are illegal in HTTP/2.
502 k = strings.ToLower(k)
503 authData[k] = v
504 }
505 }
506 return authData, nil
507}
508
509func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) {
510 callAuthData := map[string]string{}
511 // Check if credentials.PerRPCCredentials were provided via call options.
512 // Note: if these credentials are provided both via dial options and call
513 // options, then both sets of credentials will be applied.
514 if callCreds := callHdr.Creds; callCreds != nil {
515 if !t.isSecure && callCreds.RequireTransportSecurity() {
516 return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
517 }
518 data, err := callCreds.GetRequestMetadata(ctx, audience)
519 if err != nil {
520 return nil, status.Errorf(codes.Internal, "transport: %v", err)
521 }
522 for k, v := range data {
523 // Capital header names are illegal in HTTP/2
524 k = strings.ToLower(k)
525 callAuthData[k] = v
526 }
527 }
528 return callAuthData, nil
529}
530
531// NewStream creates a stream and registers it into the transport as "active"
532// streams.
533func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
534 ctx = peer.NewContext(ctx, t.getPeer())
535 headerFields, err := t.createHeaderFields(ctx, callHdr)
536 if err != nil {
537 return nil, err
538 }
539 s := t.newStream(ctx, callHdr)
540 cleanup := func(err error) {
541 if s.swapState(streamDone) == streamDone {
542 // If it was already done, return.
543 return
544 }
545 // The stream was unprocessed by the server.
546 atomic.StoreUint32(&s.unprocessed, 1)
547 s.write(recvMsg{err: err})
548 close(s.done)
549 // If headerChan isn't closed, then close it.
550 if atomic.SwapUint32(&s.headerDone, 1) == 0 {
551 close(s.headerChan)
552 }
553
554 }
555 hdr := &headerFrame{
556 hf: headerFields,
557 endStream: false,
558 initStream: func(id uint32) (bool, error) {
559 t.mu.Lock()
560 if state := t.state; state != reachable {
561 t.mu.Unlock()
562 // Do a quick cleanup.
563 err := error(errStreamDrain)
564 if state == closing {
565 err = ErrConnClosing
566 }
567 cleanup(err)
568 return false, err
569 }
570 t.activeStreams[id] = s
571 if channelz.IsOn() {
572 atomic.AddInt64(&t.czData.streamsStarted, 1)
573 atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
574 }
575 var sendPing bool
576 // If the number of active streams change from 0 to 1, then check if keepalive
577 // has gone dormant. If so, wake it up.
578 if len(t.activeStreams) == 1 && t.keepaliveEnabled {
579 select {
580 case t.awakenKeepalive <- struct{}{}:
581 sendPing = true
582 // Fill the awakenKeepalive channel again as this channel must be
583 // kept non-writable except at the point that the keepalive()
584 // goroutine is waiting either to be awaken or shutdown.
585 t.awakenKeepalive <- struct{}{}
586 default:
587 }
588 }
589 t.mu.Unlock()
590 return sendPing, nil
591 },
592 onOrphaned: cleanup,
593 wq: s.wq,
594 }
595 firstTry := true
596 var ch chan struct{}
597 checkForStreamQuota := func(it interface{}) bool {
598 if t.streamQuota <= 0 { // Can go negative if server decreases it.
599 if firstTry {
600 t.waitingStreams++
601 }
602 ch = t.streamsQuotaAvailable
603 return false
604 }
605 if !firstTry {
606 t.waitingStreams--
607 }
608 t.streamQuota--
609 h := it.(*headerFrame)
610 h.streamID = t.nextID
611 t.nextID += 2
612 s.id = h.streamID
613 s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
614 if t.streamQuota > 0 && t.waitingStreams > 0 {
615 select {
616 case t.streamsQuotaAvailable <- struct{}{}:
617 default:
618 }
619 }
620 return true
621 }
622 var hdrListSizeErr error
623 checkForHeaderListSize := func(it interface{}) bool {
624 if t.maxSendHeaderListSize == nil {
625 return true
626 }
627 hdrFrame := it.(*headerFrame)
628 var sz int64
629 for _, f := range hdrFrame.hf {
630 if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
631 hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
632 return false
633 }
634 }
635 return true
636 }
637 for {
638 success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
639 if !checkForStreamQuota(it) {
640 return false
641 }
642 if !checkForHeaderListSize(it) {
643 return false
644 }
645 return true
646 }, hdr)
647 if err != nil {
648 return nil, err
649 }
650 if success {
651 break
652 }
653 if hdrListSizeErr != nil {
654 return nil, hdrListSizeErr
655 }
656 firstTry = false
657 select {
658 case <-ch:
659 case <-s.ctx.Done():
660 return nil, ContextErr(s.ctx.Err())
661 case <-t.goAway:
662 return nil, errStreamDrain
663 case <-t.ctx.Done():
664 return nil, ErrConnClosing
665 }
666 }
667 if t.statsHandler != nil {
668 outHeader := &stats.OutHeader{
669 Client: true,
670 FullMethod: callHdr.Method,
671 RemoteAddr: t.remoteAddr,
672 LocalAddr: t.localAddr,
673 Compression: callHdr.SendCompress,
674 }
675 t.statsHandler.HandleRPC(s.ctx, outHeader)
676 }
677 return s, nil
678}
679
680// CloseStream clears the footprint of a stream when the stream is not needed any more.
681// This must not be executed in reader's goroutine.
682func (t *http2Client) CloseStream(s *Stream, err error) {
683 var (
684 rst bool
685 rstCode http2.ErrCode
686 )
687 if err != nil {
688 rst = true
689 rstCode = http2.ErrCodeCancel
690 }
691 t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
692}
693
694func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
695 // Set stream status to done.
696 if s.swapState(streamDone) == streamDone {
697 // If it was already done, return. If multiple closeStream calls
698 // happen simultaneously, wait for the first to finish.
699 <-s.done
700 return
701 }
702 // status and trailers can be updated here without any synchronization because the stream goroutine will
703 // only read it after it sees an io.EOF error from read or write and we'll write those errors
704 // only after updating this.
705 s.status = st
706 if len(mdata) > 0 {
707 s.trailer = mdata
708 }
709 if err != nil {
710 // This will unblock reads eventually.
711 s.write(recvMsg{err: err})
712 }
713 // If headerChan isn't closed, then close it.
714 if atomic.SwapUint32(&s.headerDone, 1) == 0 {
715 s.noHeaders = true
716 close(s.headerChan)
717 }
718 cleanup := &cleanupStream{
719 streamID: s.id,
720 onWrite: func() {
721 t.mu.Lock()
722 if t.activeStreams != nil {
723 delete(t.activeStreams, s.id)
724 }
725 t.mu.Unlock()
726 if channelz.IsOn() {
727 if eosReceived {
728 atomic.AddInt64(&t.czData.streamsSucceeded, 1)
729 } else {
730 atomic.AddInt64(&t.czData.streamsFailed, 1)
731 }
732 }
733 },
734 rst: rst,
735 rstCode: rstCode,
736 }
737 addBackStreamQuota := func(interface{}) bool {
738 t.streamQuota++
739 if t.streamQuota > 0 && t.waitingStreams > 0 {
740 select {
741 case t.streamsQuotaAvailable <- struct{}{}:
742 default:
743 }
744 }
745 return true
746 }
747 t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
748 // This will unblock write.
749 close(s.done)
750}
751
752// Close kicks off the shutdown process of the transport. This should be called
753// only once on a transport. Once it is called, the transport should not be
754// accessed any more.
755//
756// This method blocks until the addrConn that initiated this transport is
757// re-connected. This happens because t.onClose() begins reconnect logic at the
758// addrConn level and blocks until the addrConn is successfully connected.
759func (t *http2Client) Close() error {
760 t.mu.Lock()
761 // Make sure we only Close once.
762 if t.state == closing {
763 t.mu.Unlock()
764 return nil
765 }
766 t.state = closing
767 streams := t.activeStreams
768 t.activeStreams = nil
769 t.mu.Unlock()
770 t.controlBuf.finish()
771 t.cancel()
772 err := t.conn.Close()
773 if channelz.IsOn() {
774 channelz.RemoveEntry(t.channelzID)
775 }
776 // Notify all active streams.
777 for _, s := range streams {
778 t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
779 }
780 if t.statsHandler != nil {
781 connEnd := &stats.ConnEnd{
782 Client: true,
783 }
784 t.statsHandler.HandleConn(t.ctx, connEnd)
785 }
786 t.onClose()
787 return err
788}
789
790// GracefulClose sets the state to draining, which prevents new streams from
791// being created and causes the transport to be closed when the last active
792// stream is closed. If there are no active streams, the transport is closed
793// immediately. This does nothing if the transport is already draining or
794// closing.
795func (t *http2Client) GracefulClose() error {
796 t.mu.Lock()
797 // Make sure we move to draining only from active.
798 if t.state == draining || t.state == closing {
799 t.mu.Unlock()
800 return nil
801 }
802 t.state = draining
803 active := len(t.activeStreams)
804 t.mu.Unlock()
805 if active == 0 {
806 return t.Close()
807 }
808 t.controlBuf.put(&incomingGoAway{})
809 return nil
810}
811
812// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
813// should proceed only if Write returns nil.
814func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
815 if opts.Last {
816 // If it's the last message, update stream state.
817 if !s.compareAndSwapState(streamActive, streamWriteDone) {
818 return errStreamDone
819 }
820 } else if s.getState() != streamActive {
821 return errStreamDone
822 }
823 df := &dataFrame{
824 streamID: s.id,
825 endStream: opts.Last,
826 }
827 if hdr != nil || data != nil { // If it's not an empty data frame.
828 // Add some data to grpc message header so that we can equally
829 // distribute bytes across frames.
830 emptyLen := http2MaxFrameLen - len(hdr)
831 if emptyLen > len(data) {
832 emptyLen = len(data)
833 }
834 hdr = append(hdr, data[:emptyLen]...)
835 data = data[emptyLen:]
836 df.h, df.d = hdr, data
837 // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
838 if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
839 return err
840 }
841 }
842 return t.controlBuf.put(df)
843}
844
845func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
846 t.mu.Lock()
847 defer t.mu.Unlock()
848 s, ok := t.activeStreams[f.Header().StreamID]
849 return s, ok
850}
851
852// adjustWindow sends out extra window update over the initial window size
853// of stream if the application is requesting data larger in size than
854// the window.
855func (t *http2Client) adjustWindow(s *Stream, n uint32) {
856 if w := s.fc.maybeAdjust(n); w > 0 {
857 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
858 }
859}
860
861// updateWindow adjusts the inbound quota for the stream.
862// Window updates will be sent out when the cumulative quota
863// exceeds the corresponding threshold.
864func (t *http2Client) updateWindow(s *Stream, n uint32) {
865 if w := s.fc.onRead(n); w > 0 {
866 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
867 }
868}
869
870// updateFlowControl updates the incoming flow control windows
871// for the transport and the stream based on the current bdp
872// estimation.
873func (t *http2Client) updateFlowControl(n uint32) {
874 t.mu.Lock()
875 for _, s := range t.activeStreams {
876 s.fc.newLimit(n)
877 }
878 t.mu.Unlock()
879 updateIWS := func(interface{}) bool {
880 t.initialWindowSize = int32(n)
881 return true
882 }
883 t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)})
884 t.controlBuf.put(&outgoingSettings{
885 ss: []http2.Setting{
886 {
887 ID: http2.SettingInitialWindowSize,
888 Val: n,
889 },
890 },
891 })
892}
893
894func (t *http2Client) handleData(f *http2.DataFrame) {
895 size := f.Header().Length
896 var sendBDPPing bool
897 if t.bdpEst != nil {
898 sendBDPPing = t.bdpEst.add(size)
899 }
900 // Decouple connection's flow control from application's read.
901 // An update on connection's flow control should not depend on
902 // whether user application has read the data or not. Such a
903 // restriction is already imposed on the stream's flow control,
904 // and therefore the sender will be blocked anyways.
905 // Decoupling the connection flow control will prevent other
906 // active(fast) streams from starving in presence of slow or
907 // inactive streams.
908 //
909 if w := t.fc.onData(size); w > 0 {
910 t.controlBuf.put(&outgoingWindowUpdate{
911 streamID: 0,
912 increment: w,
913 })
914 }
915 if sendBDPPing {
916 // Avoid excessive ping detection (e.g. in an L7 proxy)
917 // by sending a window update prior to the BDP ping.
918
919 if w := t.fc.reset(); w > 0 {
920 t.controlBuf.put(&outgoingWindowUpdate{
921 streamID: 0,
922 increment: w,
923 })
924 }
925
926 t.controlBuf.put(bdpPing)
927 }
928 // Select the right stream to dispatch.
929 s, ok := t.getStream(f)
930 if !ok {
931 return
932 }
933 if size > 0 {
934 if err := s.fc.onData(size); err != nil {
935 t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
936 return
937 }
938 if f.Header().Flags.Has(http2.FlagDataPadded) {
939 if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
940 t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
941 }
942 }
943 // TODO(bradfitz, zhaoq): A copy is required here because there is no
944 // guarantee f.Data() is consumed before the arrival of next frame.
945 // Can this copy be eliminated?
946 if len(f.Data()) > 0 {
947 data := make([]byte, len(f.Data()))
948 copy(data, f.Data())
949 s.write(recvMsg{data: data})
950 }
951 }
952 // The server has closed the stream without sending trailers. Record that
953 // the read direction is closed, and set the status appropriately.
954 if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
955 t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
956 }
957}
958
959func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
960 s, ok := t.getStream(f)
961 if !ok {
962 return
963 }
964 if f.ErrCode == http2.ErrCodeRefusedStream {
965 // The stream was unprocessed by the server.
966 atomic.StoreUint32(&s.unprocessed, 1)
967 }
968 statusCode, ok := http2ErrConvTab[f.ErrCode]
969 if !ok {
970 warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
971 statusCode = codes.Unknown
972 }
973 if statusCode == codes.Canceled {
974 // Our deadline was already exceeded, and that was likely the cause of
975 // this cancelation. Alter the status code accordingly.
976 if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) {
977 statusCode = codes.DeadlineExceeded
978 }
979 }
980 t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
981}
982
983func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
984 if f.IsAck() {
985 return
986 }
987 var maxStreams *uint32
988 var ss []http2.Setting
989 var updateFuncs []func()
990 f.ForeachSetting(func(s http2.Setting) error {
991 switch s.ID {
992 case http2.SettingMaxConcurrentStreams:
993 maxStreams = new(uint32)
994 *maxStreams = s.Val
995 case http2.SettingMaxHeaderListSize:
996 updateFuncs = append(updateFuncs, func() {
997 t.maxSendHeaderListSize = new(uint32)
998 *t.maxSendHeaderListSize = s.Val
999 })
1000 default:
1001 ss = append(ss, s)
1002 }
1003 return nil
1004 })
1005 if isFirst && maxStreams == nil {
1006 maxStreams = new(uint32)
1007 *maxStreams = math.MaxUint32
1008 }
1009 sf := &incomingSettings{
1010 ss: ss,
1011 }
1012 if maxStreams != nil {
1013 updateStreamQuota := func() {
1014 delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
1015 t.maxConcurrentStreams = *maxStreams
1016 t.streamQuota += delta
1017 if delta > 0 && t.waitingStreams > 0 {
1018 close(t.streamsQuotaAvailable) // wake all of them up.
1019 t.streamsQuotaAvailable = make(chan struct{}, 1)
1020 }
1021 }
1022 updateFuncs = append(updateFuncs, updateStreamQuota)
1023 }
1024 t.controlBuf.executeAndPut(func(interface{}) bool {
1025 for _, f := range updateFuncs {
1026 f()
1027 }
1028 return true
1029 }, sf)
1030}
1031
1032func (t *http2Client) handlePing(f *http2.PingFrame) {
1033 if f.IsAck() {
1034 // Maybe it's a BDP ping.
1035 if t.bdpEst != nil {
1036 t.bdpEst.calculate(f.Data)
1037 }
1038 return
1039 }
1040 pingAck := &ping{ack: true}
1041 copy(pingAck.data[:], f.Data[:])
1042 t.controlBuf.put(pingAck)
1043}
1044
1045func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
1046 t.mu.Lock()
1047 if t.state == closing {
1048 t.mu.Unlock()
1049 return
1050 }
1051 if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
1052 infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
1053 }
1054 id := f.LastStreamID
1055 if id > 0 && id%2 != 1 {
1056 t.mu.Unlock()
1057 t.Close()
1058 return
1059 }
1060 // A client can receive multiple GoAways from the server (see
1061 // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first
1062 // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be
1063 // sent after an RTT delay with the ID of the last stream the server will
1064 // process.
1065 //
1066 // Therefore, when we get the first GoAway we don't necessarily close any
1067 // streams. While in case of second GoAway we close all streams created after
1068 // the GoAwayId. This way streams that were in-flight while the GoAway from
1069 // server was being sent don't get killed.
1070 select {
1071 case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
1072 // If there are multiple GoAways the first one should always have an ID greater than the following ones.
1073 if id > t.prevGoAwayID {
1074 t.mu.Unlock()
1075 t.Close()
1076 return
1077 }
1078 default:
1079 t.setGoAwayReason(f)
1080 close(t.goAway)
1081 t.state = draining
1082 t.controlBuf.put(&incomingGoAway{})
1083
1084 // This has to be a new goroutine because we're still using the current goroutine to read in the transport.
1085 t.onGoAway(t.goAwayReason)
1086 }
1087 // All streams with IDs greater than the GoAwayId
1088 // and smaller than the previous GoAway ID should be killed.
1089 upperLimit := t.prevGoAwayID
1090 if upperLimit == 0 { // This is the first GoAway Frame.
1091 upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
1092 }
1093 for streamID, stream := range t.activeStreams {
1094 if streamID > id && streamID <= upperLimit {
1095 // The stream was unprocessed by the server.
1096 atomic.StoreUint32(&stream.unprocessed, 1)
1097 t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
1098 }
1099 }
1100 t.prevGoAwayID = id
1101 active := len(t.activeStreams)
1102 t.mu.Unlock()
1103 if active == 0 {
1104 t.Close()
1105 }
1106}
1107
1108// setGoAwayReason sets the value of t.goAwayReason based
1109// on the GoAway frame received.
1110// It expects a lock on transport's mutext to be held by
1111// the caller.
1112func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
1113 t.goAwayReason = GoAwayNoReason
1114 switch f.ErrCode {
1115 case http2.ErrCodeEnhanceYourCalm:
1116 if string(f.DebugData()) == "too_many_pings" {
1117 t.goAwayReason = GoAwayTooManyPings
1118 }
1119 }
1120}
1121
1122func (t *http2Client) GetGoAwayReason() GoAwayReason {
1123 t.mu.Lock()
1124 defer t.mu.Unlock()
1125 return t.goAwayReason
1126}
1127
1128func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
1129 t.controlBuf.put(&incomingWindowUpdate{
1130 streamID: f.Header().StreamID,
1131 increment: f.Increment,
1132 })
1133}
1134
1135// operateHeaders takes action on the decoded headers.
1136func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
1137 s, ok := t.getStream(frame)
1138 if !ok {
1139 return
1140 }
1141 atomic.StoreUint32(&s.bytesReceived, 1)
1142 var state decodeState
1143 if err := state.decodeHeader(frame); err != nil {
1144 t.closeStream(s, err, true, http2.ErrCodeProtocol, status.New(codes.Internal, err.Error()), nil, false)
1145 // Something wrong. Stops reading even when there is remaining.
1146 return
1147 }
1148
1149 endStream := frame.StreamEnded()
1150 var isHeader bool
1151 defer func() {
1152 if t.statsHandler != nil {
1153 if isHeader {
1154 inHeader := &stats.InHeader{
1155 Client: true,
1156 WireLength: int(frame.Header().Length),
1157 }
1158 t.statsHandler.HandleRPC(s.ctx, inHeader)
1159 } else {
1160 inTrailer := &stats.InTrailer{
1161 Client: true,
1162 WireLength: int(frame.Header().Length),
1163 }
1164 t.statsHandler.HandleRPC(s.ctx, inTrailer)
1165 }
1166 }
1167 }()
1168 // If headers haven't been received yet.
1169 if atomic.SwapUint32(&s.headerDone, 1) == 0 {
1170 if !endStream {
1171 // Headers frame is not actually a trailers-only frame.
1172 isHeader = true
1173 // These values can be set without any synchronization because
1174 // stream goroutine will read it only after seeing a closed
1175 // headerChan which we'll close after setting this.
1176 s.recvCompress = state.encoding
1177 if len(state.mdata) > 0 {
1178 s.header = state.mdata
1179 }
1180 } else {
1181 s.noHeaders = true
1182 }
1183 close(s.headerChan)
1184 }
1185 if !endStream {
1186 return
1187 }
1188 // if client received END_STREAM from server while stream was still active, send RST_STREAM
1189 rst := s.getState() == streamActive
1190 t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.mdata, true)
1191}
1192
1193// reader runs as a separate goroutine in charge of reading data from network
1194// connection.
1195//
1196// TODO(zhaoq): currently one reader per transport. Investigate whether this is
1197// optimal.
1198// TODO(zhaoq): Check the validity of the incoming frame sequence.
1199func (t *http2Client) reader() {
1200 defer close(t.readerDone)
1201 // Check the validity of server preface.
1202 frame, err := t.framer.fr.ReadFrame()
1203 if err != nil {
1204 t.Close() // this kicks off resetTransport, so must be last before return
1205 return
1206 }
1207 t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
1208 if t.keepaliveEnabled {
1209 atomic.CompareAndSwapUint32(&t.activity, 0, 1)
1210 }
1211 sf, ok := frame.(*http2.SettingsFrame)
1212 if !ok {
1213 t.Close() // this kicks off resetTransport, so must be last before return
1214 return
1215 }
1216 t.onPrefaceReceipt()
1217 t.handleSettings(sf, true)
1218
1219 // loop to keep reading incoming messages on this transport.
1220 for {
1221 frame, err := t.framer.fr.ReadFrame()
1222 if t.keepaliveEnabled {
1223 atomic.CompareAndSwapUint32(&t.activity, 0, 1)
1224 }
1225 if err != nil {
1226 // Abort an active stream if the http2.Framer returns a
1227 // http2.StreamError. This can happen only if the server's response
1228 // is malformed http2.
1229 if se, ok := err.(http2.StreamError); ok {
1230 t.mu.Lock()
1231 s := t.activeStreams[se.StreamID]
1232 t.mu.Unlock()
1233 if s != nil {
1234 // use error detail to provide better err message
1235 code := http2ErrConvTab[se.Code]
1236 msg := t.framer.fr.ErrorDetail().Error()
1237 t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
1238 }
1239 continue
1240 } else {
1241 // Transport error.
1242 t.Close()
1243 return
1244 }
1245 }
1246 switch frame := frame.(type) {
1247 case *http2.MetaHeadersFrame:
1248 t.operateHeaders(frame)
1249 case *http2.DataFrame:
1250 t.handleData(frame)
1251 case *http2.RSTStreamFrame:
1252 t.handleRSTStream(frame)
1253 case *http2.SettingsFrame:
1254 t.handleSettings(frame, false)
1255 case *http2.PingFrame:
1256 t.handlePing(frame)
1257 case *http2.GoAwayFrame:
1258 t.handleGoAway(frame)
1259 case *http2.WindowUpdateFrame:
1260 t.handleWindowUpdate(frame)
1261 default:
1262 errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
1263 }
1264 }
1265}
1266
1267// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
1268func (t *http2Client) keepalive() {
1269 p := &ping{data: [8]byte{}}
1270 timer := time.NewTimer(t.kp.Time)
1271 for {
1272 select {
1273 case <-timer.C:
1274 if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
1275 timer.Reset(t.kp.Time)
1276 continue
1277 }
1278 // Check if keepalive should go dormant.
1279 t.mu.Lock()
1280 if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
1281 // Make awakenKeepalive writable.
1282 <-t.awakenKeepalive
1283 t.mu.Unlock()
1284 select {
1285 case <-t.awakenKeepalive:
1286 // If the control gets here a ping has been sent
1287 // need to reset the timer with keepalive.Timeout.
1288 case <-t.ctx.Done():
1289 return
1290 }
1291 } else {
1292 t.mu.Unlock()
1293 if channelz.IsOn() {
1294 atomic.AddInt64(&t.czData.kpCount, 1)
1295 }
1296 // Send ping.
1297 t.controlBuf.put(p)
1298 }
1299
1300 // By the time control gets here a ping has been sent one way or the other.
1301 timer.Reset(t.kp.Timeout)
1302 select {
1303 case <-timer.C:
1304 if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
1305 timer.Reset(t.kp.Time)
1306 continue
1307 }
1308 t.Close()
1309 return
1310 case <-t.ctx.Done():
1311 if !timer.Stop() {
1312 <-timer.C
1313 }
1314 return
1315 }
1316 case <-t.ctx.Done():
1317 if !timer.Stop() {
1318 <-timer.C
1319 }
1320 return
1321 }
1322 }
1323}
1324
1325func (t *http2Client) Error() <-chan struct{} {
1326 return t.ctx.Done()
1327}
1328
1329func (t *http2Client) GoAway() <-chan struct{} {
1330 return t.goAway
1331}
1332
1333func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
1334 s := channelz.SocketInternalMetric{
1335 StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
1336 StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
1337 StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
1338 MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
1339 MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
1340 KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
1341 LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
1342 LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
1343 LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
1344 LocalFlowControlWindow: int64(t.fc.getSize()),
1345 SocketOptions: channelz.GetSocketOption(t.conn),
1346 LocalAddr: t.localAddr,
1347 RemoteAddr: t.remoteAddr,
1348 // RemoteName :
1349 }
1350 if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
1351 s.Security = au.GetSecurityValue()
1352 }
1353 s.RemoteFlowControlWindow = t.getOutFlowWindow()
1354 return &s
1355}
1356
1357func (t *http2Client) IncrMsgSent() {
1358 atomic.AddInt64(&t.czData.msgSent, 1)
1359 atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
1360}
1361
1362func (t *http2Client) IncrMsgRecv() {
1363 atomic.AddInt64(&t.czData.msgRecv, 1)
1364 atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
1365}
1366
1367func (t *http2Client) getOutFlowWindow() int64 {
1368 resp := make(chan uint32, 1)
1369 timer := time.NewTimer(time.Second)
1370 defer timer.Stop()
1371 t.controlBuf.put(&outFlowControlSizeRequest{resp})
1372 select {
1373 case sz := <-resp:
1374 return int64(sz)
1375 case <-t.ctxDone:
1376 return -1
1377 case <-timer.C:
1378 return -2
1379 }
1380}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
new file mode 100644
index 0000000..df27403
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -0,0 +1,1180 @@
1/*
2 *
3 * Copyright 2014 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package transport
20
21import (
22 "bytes"
23 "context"
24 "errors"
25 "fmt"
26 "io"
27 "math"
28 "net"
29 "strconv"
30 "sync"
31 "sync/atomic"
32 "time"
33
34 "github.com/golang/protobuf/proto"
35 "golang.org/x/net/http2"
36 "golang.org/x/net/http2/hpack"
37
38 "google.golang.org/grpc/codes"
39 "google.golang.org/grpc/credentials"
40 "google.golang.org/grpc/grpclog"
41 "google.golang.org/grpc/internal/channelz"
42 "google.golang.org/grpc/internal/grpcrand"
43 "google.golang.org/grpc/keepalive"
44 "google.golang.org/grpc/metadata"
45 "google.golang.org/grpc/peer"
46 "google.golang.org/grpc/stats"
47 "google.golang.org/grpc/status"
48 "google.golang.org/grpc/tap"
49)
50
51var (
52 // ErrIllegalHeaderWrite indicates that setting header is illegal because of
53 // the stream's state.
54 ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
55 // ErrHeaderListSizeLimitViolation indicates that the header list size is larger
56 // than the limit set by peer.
57 ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
58)
59
60// http2Server implements the ServerTransport interface with HTTP2.
61type http2Server struct {
62 ctx context.Context
63 ctxDone <-chan struct{} // Cache the context.Done() chan
64 cancel context.CancelFunc
65 conn net.Conn
66 loopy *loopyWriter
67 readerDone chan struct{} // sync point to enable testing.
68 writerDone chan struct{} // sync point to enable testing.
69 remoteAddr net.Addr
70 localAddr net.Addr
71 maxStreamID uint32 // max stream ID ever seen
72 authInfo credentials.AuthInfo // auth info about the connection
73 inTapHandle tap.ServerInHandle
74 framer *framer
75 // The max number of concurrent streams.
76 maxStreams uint32
77 // controlBuf delivers all the control related tasks (e.g., window
78 // updates, reset streams, and various settings) to the controller.
79 controlBuf *controlBuffer
80 fc *trInFlow
81 stats stats.Handler
82 // Flag to keep track of reading activity on transport.
83 // 1 is true and 0 is false.
84 activity uint32 // Accessed atomically.
85 // Keepalive and max-age parameters for the server.
86 kp keepalive.ServerParameters
87
88 // Keepalive enforcement policy.
89 kep keepalive.EnforcementPolicy
90 // The time instance last ping was received.
91 lastPingAt time.Time
92 // Number of times the client has violated keepalive ping policy so far.
93 pingStrikes uint8
94 // Flag to signify that number of ping strikes should be reset to 0.
95 // This is set whenever data or header frames are sent.
96 // 1 means yes.
97 resetPingStrikes uint32 // Accessed atomically.
98 initialWindowSize int32
99 bdpEst *bdpEstimator
100 maxSendHeaderListSize *uint32
101
102 mu sync.Mutex // guard the following
103
104 // drainChan is initialized when drain(...) is called the first time.
105 // After which the server writes out the first GoAway(with ID 2^31-1) frame.
106 // Then an independent goroutine will be launched to later send the second GoAway.
107 // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
108 // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is
109 // already underway.
110 drainChan chan struct{}
111 state transportState
112 activeStreams map[uint32]*Stream
113 // idle is the time instant when the connection went idle.
114 // This is either the beginning of the connection or when the number of
115 // RPCs go down to 0.
116 // When the connection is busy, this value is set to 0.
117 idle time.Time
118
119 // Fields below are for channelz metric collection.
120 channelzID int64 // channelz unique identification number
121 czData *channelzData
122}
123
124// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
125// returned if something goes wrong.
126func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
127 writeBufSize := config.WriteBufferSize
128 readBufSize := config.ReadBufferSize
129 maxHeaderListSize := defaultServerMaxHeaderListSize
130 if config.MaxHeaderListSize != nil {
131 maxHeaderListSize = *config.MaxHeaderListSize
132 }
133 framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
134 // Send initial settings as connection preface to client.
135 var isettings []http2.Setting
136 // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
137 // permitted in the HTTP2 spec.
138 maxStreams := config.MaxStreams
139 if maxStreams == 0 {
140 maxStreams = math.MaxUint32
141 } else {
142 isettings = append(isettings, http2.Setting{
143 ID: http2.SettingMaxConcurrentStreams,
144 Val: maxStreams,
145 })
146 }
147 dynamicWindow := true
148 iwz := int32(initialWindowSize)
149 if config.InitialWindowSize >= defaultWindowSize {
150 iwz = config.InitialWindowSize
151 dynamicWindow = false
152 }
153 icwz := int32(initialWindowSize)
154 if config.InitialConnWindowSize >= defaultWindowSize {
155 icwz = config.InitialConnWindowSize
156 dynamicWindow = false
157 }
158 if iwz != defaultWindowSize {
159 isettings = append(isettings, http2.Setting{
160 ID: http2.SettingInitialWindowSize,
161 Val: uint32(iwz)})
162 }
163 if config.MaxHeaderListSize != nil {
164 isettings = append(isettings, http2.Setting{
165 ID: http2.SettingMaxHeaderListSize,
166 Val: *config.MaxHeaderListSize,
167 })
168 }
169 if err := framer.fr.WriteSettings(isettings...); err != nil {
170 return nil, connectionErrorf(false, err, "transport: %v", err)
171 }
172 // Adjust the connection flow control window if needed.
173 if delta := uint32(icwz - defaultWindowSize); delta > 0 {
174 if err := framer.fr.WriteWindowUpdate(0, delta); err != nil {
175 return nil, connectionErrorf(false, err, "transport: %v", err)
176 }
177 }
178 kp := config.KeepaliveParams
179 if kp.MaxConnectionIdle == 0 {
180 kp.MaxConnectionIdle = defaultMaxConnectionIdle
181 }
182 if kp.MaxConnectionAge == 0 {
183 kp.MaxConnectionAge = defaultMaxConnectionAge
184 }
185 // Add a jitter to MaxConnectionAge.
186 kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge)
187 if kp.MaxConnectionAgeGrace == 0 {
188 kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace
189 }
190 if kp.Time == 0 {
191 kp.Time = defaultServerKeepaliveTime
192 }
193 if kp.Timeout == 0 {
194 kp.Timeout = defaultServerKeepaliveTimeout
195 }
196 kep := config.KeepalivePolicy
197 if kep.MinTime == 0 {
198 kep.MinTime = defaultKeepalivePolicyMinTime
199 }
200 ctx, cancel := context.WithCancel(context.Background())
201 t := &http2Server{
202 ctx: ctx,
203 cancel: cancel,
204 ctxDone: ctx.Done(),
205 conn: conn,
206 remoteAddr: conn.RemoteAddr(),
207 localAddr: conn.LocalAddr(),
208 authInfo: config.AuthInfo,
209 framer: framer,
210 readerDone: make(chan struct{}),
211 writerDone: make(chan struct{}),
212 maxStreams: maxStreams,
213 inTapHandle: config.InTapHandle,
214 fc: &trInFlow{limit: uint32(icwz)},
215 state: reachable,
216 activeStreams: make(map[uint32]*Stream),
217 stats: config.StatsHandler,
218 kp: kp,
219 idle: time.Now(),
220 kep: kep,
221 initialWindowSize: iwz,
222 czData: new(channelzData),
223 }
224 t.controlBuf = newControlBuffer(t.ctxDone)
225 if dynamicWindow {
226 t.bdpEst = &bdpEstimator{
227 bdp: initialWindowSize,
228 updateFlowControl: t.updateFlowControl,
229 }
230 }
231 if t.stats != nil {
232 t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
233 RemoteAddr: t.remoteAddr,
234 LocalAddr: t.localAddr,
235 })
236 connBegin := &stats.ConnBegin{}
237 t.stats.HandleConn(t.ctx, connBegin)
238 }
239 if channelz.IsOn() {
240 t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
241 }
242 t.framer.writer.Flush()
243
244 defer func() {
245 if err != nil {
246 t.Close()
247 }
248 }()
249
250 // Check the validity of client preface.
251 preface := make([]byte, len(clientPreface))
252 if _, err := io.ReadFull(t.conn, preface); err != nil {
253 return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
254 }
255 if !bytes.Equal(preface, clientPreface) {
256 return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
257 }
258
259 frame, err := t.framer.fr.ReadFrame()
260 if err == io.EOF || err == io.ErrUnexpectedEOF {
261 return nil, err
262 }
263 if err != nil {
264 return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
265 }
266 atomic.StoreUint32(&t.activity, 1)
267 sf, ok := frame.(*http2.SettingsFrame)
268 if !ok {
269 return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
270 }
271 t.handleSettings(sf)
272
273 go func() {
274 t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
275 t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
276 if err := t.loopy.run(); err != nil {
277 errorf("transport: loopyWriter.run returning. Err: %v", err)
278 }
279 t.conn.Close()
280 close(t.writerDone)
281 }()
282 go t.keepalive()
283 return t, nil
284}
285
286// operateHeader takes action on the decoded headers.
287func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
288 streamID := frame.Header().StreamID
289 state := decodeState{serverSide: true}
290 if err := state.decodeHeader(frame); err != nil {
291 if se, ok := status.FromError(err); ok {
292 t.controlBuf.put(&cleanupStream{
293 streamID: streamID,
294 rst: true,
295 rstCode: statusCodeConvTab[se.Code()],
296 onWrite: func() {},
297 })
298 }
299 return false
300 }
301
302 buf := newRecvBuffer()
303 s := &Stream{
304 id: streamID,
305 st: t,
306 buf: buf,
307 fc: &inFlow{limit: uint32(t.initialWindowSize)},
308 recvCompress: state.encoding,
309 method: state.method,
310 contentSubtype: state.contentSubtype,
311 }
312 if frame.StreamEnded() {
313 // s is just created by the caller. No lock needed.
314 s.state = streamReadDone
315 }
316 if state.timeoutSet {
317 s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
318 } else {
319 s.ctx, s.cancel = context.WithCancel(t.ctx)
320 }
321 pr := &peer.Peer{
322 Addr: t.remoteAddr,
323 }
324 // Attach Auth info if there is any.
325 if t.authInfo != nil {
326 pr.AuthInfo = t.authInfo
327 }
328 s.ctx = peer.NewContext(s.ctx, pr)
329 // Attach the received metadata to the context.
330 if len(state.mdata) > 0 {
331 s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
332 }
333 if state.statsTags != nil {
334 s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags)
335 }
336 if state.statsTrace != nil {
337 s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace)
338 }
339 if t.inTapHandle != nil {
340 var err error
341 info := &tap.Info{
342 FullMethodName: state.method,
343 }
344 s.ctx, err = t.inTapHandle(s.ctx, info)
345 if err != nil {
346 warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
347 t.controlBuf.put(&cleanupStream{
348 streamID: s.id,
349 rst: true,
350 rstCode: http2.ErrCodeRefusedStream,
351 onWrite: func() {},
352 })
353 return false
354 }
355 }
356 t.mu.Lock()
357 if t.state != reachable {
358 t.mu.Unlock()
359 return false
360 }
361 if uint32(len(t.activeStreams)) >= t.maxStreams {
362 t.mu.Unlock()
363 t.controlBuf.put(&cleanupStream{
364 streamID: streamID,
365 rst: true,
366 rstCode: http2.ErrCodeRefusedStream,
367 onWrite: func() {},
368 })
369 return false
370 }
371 if streamID%2 != 1 || streamID <= t.maxStreamID {
372 t.mu.Unlock()
373 // illegal gRPC stream id.
374 errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
375 return true
376 }
377 t.maxStreamID = streamID
378 t.activeStreams[streamID] = s
379 if len(t.activeStreams) == 1 {
380 t.idle = time.Time{}
381 }
382 t.mu.Unlock()
383 if channelz.IsOn() {
384 atomic.AddInt64(&t.czData.streamsStarted, 1)
385 atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
386 }
387 s.requestRead = func(n int) {
388 t.adjustWindow(s, uint32(n))
389 }
390 s.ctx = traceCtx(s.ctx, s.method)
391 if t.stats != nil {
392 s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
393 inHeader := &stats.InHeader{
394 FullMethod: s.method,
395 RemoteAddr: t.remoteAddr,
396 LocalAddr: t.localAddr,
397 Compression: s.recvCompress,
398 WireLength: int(frame.Header().Length),
399 }
400 t.stats.HandleRPC(s.ctx, inHeader)
401 }
402 s.ctxDone = s.ctx.Done()
403 s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
404 s.trReader = &transportReader{
405 reader: &recvBufferReader{
406 ctx: s.ctx,
407 ctxDone: s.ctxDone,
408 recv: s.buf,
409 },
410 windowHandler: func(n int) {
411 t.updateWindow(s, uint32(n))
412 },
413 }
414 // Register the stream with loopy.
415 t.controlBuf.put(&registerStream{
416 streamID: s.id,
417 wq: s.wq,
418 })
419 handle(s)
420 return false
421}
422
423// HandleStreams receives incoming streams using the given handler. This is
424// typically run in a separate goroutine.
425// traceCtx attaches trace to ctx and returns the new context.
426func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
427 defer close(t.readerDone)
428 for {
429 frame, err := t.framer.fr.ReadFrame()
430 atomic.StoreUint32(&t.activity, 1)
431 if err != nil {
432 if se, ok := err.(http2.StreamError); ok {
433 warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
434 t.mu.Lock()
435 s := t.activeStreams[se.StreamID]
436 t.mu.Unlock()
437 if s != nil {
438 t.closeStream(s, true, se.Code, nil, false)
439 } else {
440 t.controlBuf.put(&cleanupStream{
441 streamID: se.StreamID,
442 rst: true,
443 rstCode: se.Code,
444 onWrite: func() {},
445 })
446 }
447 continue
448 }
449 if err == io.EOF || err == io.ErrUnexpectedEOF {
450 t.Close()
451 return
452 }
453 warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
454 t.Close()
455 return
456 }
457 switch frame := frame.(type) {
458 case *http2.MetaHeadersFrame:
459 if t.operateHeaders(frame, handle, traceCtx) {
460 t.Close()
461 break
462 }
463 case *http2.DataFrame:
464 t.handleData(frame)
465 case *http2.RSTStreamFrame:
466 t.handleRSTStream(frame)
467 case *http2.SettingsFrame:
468 t.handleSettings(frame)
469 case *http2.PingFrame:
470 t.handlePing(frame)
471 case *http2.WindowUpdateFrame:
472 t.handleWindowUpdate(frame)
473 case *http2.GoAwayFrame:
474 // TODO: Handle GoAway from the client appropriately.
475 default:
476 errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
477 }
478 }
479}
480
481func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
482 t.mu.Lock()
483 defer t.mu.Unlock()
484 if t.activeStreams == nil {
485 // The transport is closing.
486 return nil, false
487 }
488 s, ok := t.activeStreams[f.Header().StreamID]
489 if !ok {
490 // The stream is already done.
491 return nil, false
492 }
493 return s, true
494}
495
496// adjustWindow sends out extra window update over the initial window size
497// of stream if the application is requesting data larger in size than
498// the window.
499func (t *http2Server) adjustWindow(s *Stream, n uint32) {
500 if w := s.fc.maybeAdjust(n); w > 0 {
501 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
502 }
503
504}
505
506// updateWindow adjusts the inbound quota for the stream and the transport.
507// Window updates will deliver to the controller for sending when
508// the cumulative quota exceeds the corresponding threshold.
509func (t *http2Server) updateWindow(s *Stream, n uint32) {
510 if w := s.fc.onRead(n); w > 0 {
511 t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
512 increment: w,
513 })
514 }
515}
516
517// updateFlowControl updates the incoming flow control windows
518// for the transport and the stream based on the current bdp
519// estimation.
520func (t *http2Server) updateFlowControl(n uint32) {
521 t.mu.Lock()
522 for _, s := range t.activeStreams {
523 s.fc.newLimit(n)
524 }
525 t.initialWindowSize = int32(n)
526 t.mu.Unlock()
527 t.controlBuf.put(&outgoingWindowUpdate{
528 streamID: 0,
529 increment: t.fc.newLimit(n),
530 })
531 t.controlBuf.put(&outgoingSettings{
532 ss: []http2.Setting{
533 {
534 ID: http2.SettingInitialWindowSize,
535 Val: n,
536 },
537 },
538 })
539
540}
541
542func (t *http2Server) handleData(f *http2.DataFrame) {
543 size := f.Header().Length
544 var sendBDPPing bool
545 if t.bdpEst != nil {
546 sendBDPPing = t.bdpEst.add(size)
547 }
548 // Decouple connection's flow control from application's read.
549 // An update on connection's flow control should not depend on
550 // whether user application has read the data or not. Such a
551 // restriction is already imposed on the stream's flow control,
552 // and therefore the sender will be blocked anyways.
553 // Decoupling the connection flow control will prevent other
554 // active(fast) streams from starving in presence of slow or
555 // inactive streams.
556 if w := t.fc.onData(size); w > 0 {
557 t.controlBuf.put(&outgoingWindowUpdate{
558 streamID: 0,
559 increment: w,
560 })
561 }
562 if sendBDPPing {
563 // Avoid excessive ping detection (e.g. in an L7 proxy)
564 // by sending a window update prior to the BDP ping.
565 if w := t.fc.reset(); w > 0 {
566 t.controlBuf.put(&outgoingWindowUpdate{
567 streamID: 0,
568 increment: w,
569 })
570 }
571 t.controlBuf.put(bdpPing)
572 }
573 // Select the right stream to dispatch.
574 s, ok := t.getStream(f)
575 if !ok {
576 return
577 }
578 if size > 0 {
579 if err := s.fc.onData(size); err != nil {
580 t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false)
581 return
582 }
583 if f.Header().Flags.Has(http2.FlagDataPadded) {
584 if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
585 t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
586 }
587 }
588 // TODO(bradfitz, zhaoq): A copy is required here because there is no
589 // guarantee f.Data() is consumed before the arrival of next frame.
590 // Can this copy be eliminated?
591 if len(f.Data()) > 0 {
592 data := make([]byte, len(f.Data()))
593 copy(data, f.Data())
594 s.write(recvMsg{data: data})
595 }
596 }
597 if f.Header().Flags.Has(http2.FlagDataEndStream) {
598 // Received the end of stream from the client.
599 s.compareAndSwapState(streamActive, streamReadDone)
600 s.write(recvMsg{err: io.EOF})
601 }
602}
603
604func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
605 s, ok := t.getStream(f)
606 if !ok {
607 return
608 }
609 t.closeStream(s, false, 0, nil, false)
610}
611
612func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
613 if f.IsAck() {
614 return
615 }
616 var ss []http2.Setting
617 var updateFuncs []func()
618 f.ForeachSetting(func(s http2.Setting) error {
619 switch s.ID {
620 case http2.SettingMaxHeaderListSize:
621 updateFuncs = append(updateFuncs, func() {
622 t.maxSendHeaderListSize = new(uint32)
623 *t.maxSendHeaderListSize = s.Val
624 })
625 default:
626 ss = append(ss, s)
627 }
628 return nil
629 })
630 t.controlBuf.executeAndPut(func(interface{}) bool {
631 for _, f := range updateFuncs {
632 f()
633 }
634 return true
635 }, &incomingSettings{
636 ss: ss,
637 })
638}
639
640const (
641 maxPingStrikes = 2
642 defaultPingTimeout = 2 * time.Hour
643)
644
645func (t *http2Server) handlePing(f *http2.PingFrame) {
646 if f.IsAck() {
647 if f.Data == goAwayPing.data && t.drainChan != nil {
648 close(t.drainChan)
649 return
650 }
651 // Maybe it's a BDP ping.
652 if t.bdpEst != nil {
653 t.bdpEst.calculate(f.Data)
654 }
655 return
656 }
657 pingAck := &ping{ack: true}
658 copy(pingAck.data[:], f.Data[:])
659 t.controlBuf.put(pingAck)
660
661 now := time.Now()
662 defer func() {
663 t.lastPingAt = now
664 }()
665 // A reset ping strikes means that we don't need to check for policy
666 // violation for this ping and the pingStrikes counter should be set
667 // to 0.
668 if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) {
669 t.pingStrikes = 0
670 return
671 }
672 t.mu.Lock()
673 ns := len(t.activeStreams)
674 t.mu.Unlock()
675 if ns < 1 && !t.kep.PermitWithoutStream {
676 // Keepalive shouldn't be active thus, this new ping should
677 // have come after at least defaultPingTimeout.
678 if t.lastPingAt.Add(defaultPingTimeout).After(now) {
679 t.pingStrikes++
680 }
681 } else {
682 // Check if keepalive policy is respected.
683 if t.lastPingAt.Add(t.kep.MinTime).After(now) {
684 t.pingStrikes++
685 }
686 }
687
688 if t.pingStrikes > maxPingStrikes {
689 // Send goaway and close the connection.
690 errorf("transport: Got too many pings from the client, closing the connection.")
691 t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
692 }
693}
694
695func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
696 t.controlBuf.put(&incomingWindowUpdate{
697 streamID: f.Header().StreamID,
698 increment: f.Increment,
699 })
700}
701
702func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField {
703 for k, vv := range md {
704 if isReservedHeader(k) {
705 // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
706 continue
707 }
708 for _, v := range vv {
709 headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
710 }
711 }
712 return headerFields
713}
714
715func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
716 if t.maxSendHeaderListSize == nil {
717 return true
718 }
719 hdrFrame := it.(*headerFrame)
720 var sz int64
721 for _, f := range hdrFrame.hf {
722 if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
723 errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
724 return false
725 }
726 }
727 return true
728}
729
730// WriteHeader sends the header metedata md back to the client.
731func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
732 if s.updateHeaderSent() || s.getState() == streamDone {
733 return ErrIllegalHeaderWrite
734 }
735 s.hdrMu.Lock()
736 if md.Len() > 0 {
737 if s.header.Len() > 0 {
738 s.header = metadata.Join(s.header, md)
739 } else {
740 s.header = md
741 }
742 }
743 if err := t.writeHeaderLocked(s); err != nil {
744 s.hdrMu.Unlock()
745 return err
746 }
747 s.hdrMu.Unlock()
748 return nil
749}
750
751func (t *http2Server) writeHeaderLocked(s *Stream) error {
752 // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
753 // first and create a slice of that exact size.
754 headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
755 headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
756 headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
757 if s.sendCompress != "" {
758 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
759 }
760 headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
761 success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
762 streamID: s.id,
763 hf: headerFields,
764 endStream: false,
765 onWrite: func() {
766 atomic.StoreUint32(&t.resetPingStrikes, 1)
767 },
768 })
769 if !success {
770 if err != nil {
771 return err
772 }
773 t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
774 return ErrHeaderListSizeLimitViolation
775 }
776 if t.stats != nil {
777 // Note: WireLength is not set in outHeader.
778 // TODO(mmukhi): Revisit this later, if needed.
779 outHeader := &stats.OutHeader{}
780 t.stats.HandleRPC(s.Context(), outHeader)
781 }
782 return nil
783}
784
785// WriteStatus sends stream status to the client and terminates the stream.
786// There is no further I/O operations being able to perform on this stream.
787// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
788// OK is adopted.
789func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
790 if s.getState() == streamDone {
791 return nil
792 }
793 s.hdrMu.Lock()
794 // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
795 // first and create a slice of that exact size.
796 headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
797 if !s.updateHeaderSent() { // No headers have been sent.
798 if len(s.header) > 0 { // Send a separate header frame.
799 if err := t.writeHeaderLocked(s); err != nil {
800 s.hdrMu.Unlock()
801 return err
802 }
803 } else { // Send a trailer only response.
804 headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
805 headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
806 }
807 }
808 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
809 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
810
811 if p := st.Proto(); p != nil && len(p.Details) > 0 {
812 stBytes, err := proto.Marshal(p)
813 if err != nil {
814 // TODO: return error instead, when callers are able to handle it.
815 grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
816 } else {
817 headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
818 }
819 }
820
821 // Attach the trailer metadata.
822 headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer)
823 trailingHeader := &headerFrame{
824 streamID: s.id,
825 hf: headerFields,
826 endStream: true,
827 onWrite: func() {
828 atomic.StoreUint32(&t.resetPingStrikes, 1)
829 },
830 }
831 s.hdrMu.Unlock()
832 success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
833 if !success {
834 if err != nil {
835 return err
836 }
837 t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
838 return ErrHeaderListSizeLimitViolation
839 }
840 t.closeStream(s, false, 0, trailingHeader, true)
841 if t.stats != nil {
842 t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
843 }
844 return nil
845}
846
847// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
848// is returns if it fails (e.g., framing error, transport error).
849func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
850 if !s.isHeaderSent() { // Headers haven't been written yet.
851 if err := t.WriteHeader(s, nil); err != nil {
852 // TODO(mmukhi, dfawley): Make sure this is the right code to return.
853 return status.Errorf(codes.Internal, "transport: %v", err)
854 }
855 } else {
856 // Writing headers checks for this condition.
857 if s.getState() == streamDone {
858 // TODO(mmukhi, dfawley): Should the server write also return io.EOF?
859 s.cancel()
860 select {
861 case <-t.ctx.Done():
862 return ErrConnClosing
863 default:
864 }
865 return ContextErr(s.ctx.Err())
866 }
867 }
868 // Add some data to header frame so that we can equally distribute bytes across frames.
869 emptyLen := http2MaxFrameLen - len(hdr)
870 if emptyLen > len(data) {
871 emptyLen = len(data)
872 }
873 hdr = append(hdr, data[:emptyLen]...)
874 data = data[emptyLen:]
875 df := &dataFrame{
876 streamID: s.id,
877 h: hdr,
878 d: data,
879 onEachWrite: func() {
880 atomic.StoreUint32(&t.resetPingStrikes, 1)
881 },
882 }
883 if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
884 select {
885 case <-t.ctx.Done():
886 return ErrConnClosing
887 default:
888 }
889 return ContextErr(s.ctx.Err())
890 }
891 return t.controlBuf.put(df)
892}
893
894// keepalive running in a separate goroutine does the following:
895// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle.
896// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge.
897// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge.
898// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection
899// after an additional duration of keepalive.Timeout.
900func (t *http2Server) keepalive() {
901 p := &ping{}
902 var pingSent bool
903 maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
904 maxAge := time.NewTimer(t.kp.MaxConnectionAge)
905 keepalive := time.NewTimer(t.kp.Time)
906 // NOTE: All exit paths of this function should reset their
907 // respective timers. A failure to do so will cause the
908 // following clean-up to deadlock and eventually leak.
909 defer func() {
910 if !maxIdle.Stop() {
911 <-maxIdle.C
912 }
913 if !maxAge.Stop() {
914 <-maxAge.C
915 }
916 if !keepalive.Stop() {
917 <-keepalive.C
918 }
919 }()
920 for {
921 select {
922 case <-maxIdle.C:
923 t.mu.Lock()
924 idle := t.idle
925 if idle.IsZero() { // The connection is non-idle.
926 t.mu.Unlock()
927 maxIdle.Reset(t.kp.MaxConnectionIdle)
928 continue
929 }
930 val := t.kp.MaxConnectionIdle - time.Since(idle)
931 t.mu.Unlock()
932 if val <= 0 {
933 // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
934 // Gracefully close the connection.
935 t.drain(http2.ErrCodeNo, []byte{})
936 // Resetting the timer so that the clean-up doesn't deadlock.
937 maxIdle.Reset(infinity)
938 return
939 }
940 maxIdle.Reset(val)
941 case <-maxAge.C:
942 t.drain(http2.ErrCodeNo, []byte{})
943 maxAge.Reset(t.kp.MaxConnectionAgeGrace)
944 select {
945 case <-maxAge.C:
946 // Close the connection after grace period.
947 t.Close()
948 // Resetting the timer so that the clean-up doesn't deadlock.
949 maxAge.Reset(infinity)
950 case <-t.ctx.Done():
951 }
952 return
953 case <-keepalive.C:
954 if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
955 pingSent = false
956 keepalive.Reset(t.kp.Time)
957 continue
958 }
959 if pingSent {
960 t.Close()
961 // Resetting the timer so that the clean-up doesn't deadlock.
962 keepalive.Reset(infinity)
963 return
964 }
965 pingSent = true
966 if channelz.IsOn() {
967 atomic.AddInt64(&t.czData.kpCount, 1)
968 }
969 t.controlBuf.put(p)
970 keepalive.Reset(t.kp.Timeout)
971 case <-t.ctx.Done():
972 return
973 }
974 }
975}
976
977// Close starts shutting down the http2Server transport.
978// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
979// could cause some resource issue. Revisit this later.
980func (t *http2Server) Close() error {
981 t.mu.Lock()
982 if t.state == closing {
983 t.mu.Unlock()
984 return errors.New("transport: Close() was already called")
985 }
986 t.state = closing
987 streams := t.activeStreams
988 t.activeStreams = nil
989 t.mu.Unlock()
990 t.controlBuf.finish()
991 t.cancel()
992 err := t.conn.Close()
993 if channelz.IsOn() {
994 channelz.RemoveEntry(t.channelzID)
995 }
996 // Cancel all active streams.
997 for _, s := range streams {
998 s.cancel()
999 }
1000 if t.stats != nil {
1001 connEnd := &stats.ConnEnd{}
1002 t.stats.HandleConn(t.ctx, connEnd)
1003 }
1004 return err
1005}
1006
1007// closeStream clears the footprint of a stream when the stream is not needed
1008// any more.
1009func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
1010 if s.swapState(streamDone) == streamDone {
1011 // If the stream was already done, return.
1012 return
1013 }
1014 // In case stream sending and receiving are invoked in separate
1015 // goroutines (e.g., bi-directional streaming), cancel needs to be
1016 // called to interrupt the potential blocking on other goroutines.
1017 s.cancel()
1018 cleanup := &cleanupStream{
1019 streamID: s.id,
1020 rst: rst,
1021 rstCode: rstCode,
1022 onWrite: func() {
1023 t.mu.Lock()
1024 if t.activeStreams != nil {
1025 delete(t.activeStreams, s.id)
1026 if len(t.activeStreams) == 0 {
1027 t.idle = time.Now()
1028 }
1029 }
1030 t.mu.Unlock()
1031 if channelz.IsOn() {
1032 if eosReceived {
1033 atomic.AddInt64(&t.czData.streamsSucceeded, 1)
1034 } else {
1035 atomic.AddInt64(&t.czData.streamsFailed, 1)
1036 }
1037 }
1038 },
1039 }
1040 if hdr != nil {
1041 hdr.cleanup = cleanup
1042 t.controlBuf.put(hdr)
1043 } else {
1044 t.controlBuf.put(cleanup)
1045 }
1046}
1047
1048func (t *http2Server) RemoteAddr() net.Addr {
1049 return t.remoteAddr
1050}
1051
1052func (t *http2Server) Drain() {
1053 t.drain(http2.ErrCodeNo, []byte{})
1054}
1055
1056func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
1057 t.mu.Lock()
1058 defer t.mu.Unlock()
1059 if t.drainChan != nil {
1060 return
1061 }
1062 t.drainChan = make(chan struct{})
1063 t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
1064}
1065
1066var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
1067
1068// Handles outgoing GoAway and returns true if loopy needs to put itself
1069// in draining mode.
1070func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
1071 t.mu.Lock()
1072 if t.state == closing { // TODO(mmukhi): This seems unnecessary.
1073 t.mu.Unlock()
1074 // The transport is closing.
1075 return false, ErrConnClosing
1076 }
1077 sid := t.maxStreamID
1078 if !g.headsUp {
1079 // Stop accepting more streams now.
1080 t.state = draining
1081 if len(t.activeStreams) == 0 {
1082 g.closeConn = true
1083 }
1084 t.mu.Unlock()
1085 if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
1086 return false, err
1087 }
1088 if g.closeConn {
1089 // Abruptly close the connection following the GoAway (via
1090 // loopywriter). But flush out what's inside the buffer first.
1091 t.framer.writer.Flush()
1092 return false, fmt.Errorf("transport: Connection closing")
1093 }
1094 return true, nil
1095 }
1096 t.mu.Unlock()
1097 // For a graceful close, send out a GoAway with stream ID of MaxUInt32,
1098 // Follow that with a ping and wait for the ack to come back or a timer
1099 // to expire. During this time accept new streams since they might have
1100 // originated before the GoAway reaches the client.
1101 // After getting the ack or timer expiration send out another GoAway this
1102 // time with an ID of the max stream server intends to process.
1103 if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
1104 return false, err
1105 }
1106 if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
1107 return false, err
1108 }
1109 go func() {
1110 timer := time.NewTimer(time.Minute)
1111 defer timer.Stop()
1112 select {
1113 case <-t.drainChan:
1114 case <-timer.C:
1115 case <-t.ctx.Done():
1116 return
1117 }
1118 t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
1119 }()
1120 return false, nil
1121}
1122
1123func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
1124 s := channelz.SocketInternalMetric{
1125 StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
1126 StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
1127 StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
1128 MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
1129 MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
1130 KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
1131 LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
1132 LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
1133 LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
1134 LocalFlowControlWindow: int64(t.fc.getSize()),
1135 SocketOptions: channelz.GetSocketOption(t.conn),
1136 LocalAddr: t.localAddr,
1137 RemoteAddr: t.remoteAddr,
1138 // RemoteName :
1139 }
1140 if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
1141 s.Security = au.GetSecurityValue()
1142 }
1143 s.RemoteFlowControlWindow = t.getOutFlowWindow()
1144 return &s
1145}
1146
1147func (t *http2Server) IncrMsgSent() {
1148 atomic.AddInt64(&t.czData.msgSent, 1)
1149 atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
1150}
1151
1152func (t *http2Server) IncrMsgRecv() {
1153 atomic.AddInt64(&t.czData.msgRecv, 1)
1154 atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
1155}
1156
1157func (t *http2Server) getOutFlowWindow() int64 {
1158 resp := make(chan uint32)
1159 timer := time.NewTimer(time.Second)
1160 defer timer.Stop()
1161 t.controlBuf.put(&outFlowControlSizeRequest{resp})
1162 select {
1163 case sz := <-resp:
1164 return int64(sz)
1165 case <-t.ctxDone:
1166 return -1
1167 case <-timer.C:
1168 return -2
1169 }
1170}
1171
1172func getJitter(v time.Duration) time.Duration {
1173 if v == infinity {
1174 return 0
1175 }
1176 // Generate a jitter between +/- 10% of the value.
1177 r := int64(v / 10)
1178 j := grpcrand.Int63n(2*r) - r
1179 return time.Duration(j)
1180}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
new file mode 100644
index 0000000..77a2cfa
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -0,0 +1,623 @@
1/*
2 *
3 * Copyright 2014 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19package transport
20
21import (
22 "bufio"
23 "bytes"
24 "encoding/base64"
25 "fmt"
26 "io"
27 "math"
28 "net"
29 "net/http"
30 "strconv"
31 "strings"
32 "time"
33 "unicode/utf8"
34
35 "github.com/golang/protobuf/proto"
36 "golang.org/x/net/http2"
37 "golang.org/x/net/http2/hpack"
38 spb "google.golang.org/genproto/googleapis/rpc/status"
39 "google.golang.org/grpc/codes"
40 "google.golang.org/grpc/status"
41)
42
43const (
44 // http2MaxFrameLen specifies the max length of a HTTP2 frame.
45 http2MaxFrameLen = 16384 // 16KB frame
46 // http://http2.github.io/http2-spec/#SettingValues
47 http2InitHeaderTableSize = 4096
48 // baseContentType is the base content-type for gRPC. This is a valid
49 // content-type on it's own, but can also include a content-subtype such as
50 // "proto" as a suffix after "+" or ";". See
51 // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
52 // for more details.
53 baseContentType = "application/grpc"
54)
55
56var (
57 clientPreface = []byte(http2.ClientPreface)
58 http2ErrConvTab = map[http2.ErrCode]codes.Code{
59 http2.ErrCodeNo: codes.Internal,
60 http2.ErrCodeProtocol: codes.Internal,
61 http2.ErrCodeInternal: codes.Internal,
62 http2.ErrCodeFlowControl: codes.ResourceExhausted,
63 http2.ErrCodeSettingsTimeout: codes.Internal,
64 http2.ErrCodeStreamClosed: codes.Internal,
65 http2.ErrCodeFrameSize: codes.Internal,
66 http2.ErrCodeRefusedStream: codes.Unavailable,
67 http2.ErrCodeCancel: codes.Canceled,
68 http2.ErrCodeCompression: codes.Internal,
69 http2.ErrCodeConnect: codes.Internal,
70 http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
71 http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
72 http2.ErrCodeHTTP11Required: codes.Internal,
73 }
74 statusCodeConvTab = map[codes.Code]http2.ErrCode{
75 codes.Internal: http2.ErrCodeInternal,
76 codes.Canceled: http2.ErrCodeCancel,
77 codes.Unavailable: http2.ErrCodeRefusedStream,
78 codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
79 codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
80 }
81 httpStatusConvTab = map[int]codes.Code{
82 // 400 Bad Request - INTERNAL.
83 http.StatusBadRequest: codes.Internal,
84 // 401 Unauthorized - UNAUTHENTICATED.
85 http.StatusUnauthorized: codes.Unauthenticated,
86 // 403 Forbidden - PERMISSION_DENIED.
87 http.StatusForbidden: codes.PermissionDenied,
88 // 404 Not Found - UNIMPLEMENTED.
89 http.StatusNotFound: codes.Unimplemented,
90 // 429 Too Many Requests - UNAVAILABLE.
91 http.StatusTooManyRequests: codes.Unavailable,
92 // 502 Bad Gateway - UNAVAILABLE.
93 http.StatusBadGateway: codes.Unavailable,
94 // 503 Service Unavailable - UNAVAILABLE.
95 http.StatusServiceUnavailable: codes.Unavailable,
96 // 504 Gateway timeout - UNAVAILABLE.
97 http.StatusGatewayTimeout: codes.Unavailable,
98 }
99)
100
101// Records the states during HPACK decoding. Must be reset once the
102// decoding of the entire headers are finished.
103type decodeState struct {
104 encoding string
105 // statusGen caches the stream status received from the trailer the server
106 // sent. Client side only. Do not access directly. After all trailers are
107 // parsed, use the status method to retrieve the status.
108 statusGen *status.Status
109 // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
110 // intended for direct access outside of parsing.
111 rawStatusCode *int
112 rawStatusMsg string
113 httpStatus *int
114 // Server side only fields.
115 timeoutSet bool
116 timeout time.Duration
117 method string
118 // key-value metadata map from the peer.
119 mdata map[string][]string
120 statsTags []byte
121 statsTrace []byte
122 contentSubtype string
123 // whether decoding on server side or not
124 serverSide bool
125}
126
127// isReservedHeader checks whether hdr belongs to HTTP2 headers
128// reserved by gRPC protocol. Any other headers are classified as the
129// user-specified metadata.
130func isReservedHeader(hdr string) bool {
131 if hdr != "" && hdr[0] == ':' {
132 return true
133 }
134 switch hdr {
135 case "content-type",
136 "user-agent",
137 "grpc-message-type",
138 "grpc-encoding",
139 "grpc-message",
140 "grpc-status",
141 "grpc-timeout",
142 "grpc-status-details-bin",
143 // Intentionally exclude grpc-previous-rpc-attempts and
144 // grpc-retry-pushback-ms, which are "reserved", but their API
145 // intentionally works via metadata.
146 "te":
147 return true
148 default:
149 return false
150 }
151}
152
153// isWhitelistedHeader checks whether hdr should be propagated into metadata
154// visible to users, even though it is classified as "reserved", above.
155func isWhitelistedHeader(hdr string) bool {
156 switch hdr {
157 case ":authority", "user-agent":
158 return true
159 default:
160 return false
161 }
162}
163
164// contentSubtype returns the content-subtype for the given content-type. The
165// given content-type must be a valid content-type that starts with
166// "application/grpc". A content-subtype will follow "application/grpc" after a
167// "+" or ";". See
168// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
169// more details.
170//
171// If contentType is not a valid content-type for gRPC, the boolean
172// will be false, otherwise true. If content-type == "application/grpc",
173// "application/grpc+", or "application/grpc;", the boolean will be true,
174// but no content-subtype will be returned.
175//
176// contentType is assumed to be lowercase already.
177func contentSubtype(contentType string) (string, bool) {
178 if contentType == baseContentType {
179 return "", true
180 }
181 if !strings.HasPrefix(contentType, baseContentType) {
182 return "", false
183 }
184 // guaranteed since != baseContentType and has baseContentType prefix
185 switch contentType[len(baseContentType)] {
186 case '+', ';':
187 // this will return true for "application/grpc+" or "application/grpc;"
188 // which the previous validContentType function tested to be valid, so we
189 // just say that no content-subtype is specified in this case
190 return contentType[len(baseContentType)+1:], true
191 default:
192 return "", false
193 }
194}
195
196// contentSubtype is assumed to be lowercase
197func contentType(contentSubtype string) string {
198 if contentSubtype == "" {
199 return baseContentType
200 }
201 return baseContentType + "+" + contentSubtype
202}
203
204func (d *decodeState) status() *status.Status {
205 if d.statusGen == nil {
206 // No status-details were provided; generate status using code/msg.
207 d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg)
208 }
209 return d.statusGen
210}
211
212const binHdrSuffix = "-bin"
213
214func encodeBinHeader(v []byte) string {
215 return base64.RawStdEncoding.EncodeToString(v)
216}
217
218func decodeBinHeader(v string) ([]byte, error) {
219 if len(v)%4 == 0 {
220 // Input was padded, or padding was not necessary.
221 return base64.StdEncoding.DecodeString(v)
222 }
223 return base64.RawStdEncoding.DecodeString(v)
224}
225
226func encodeMetadataHeader(k, v string) string {
227 if strings.HasSuffix(k, binHdrSuffix) {
228 return encodeBinHeader(([]byte)(v))
229 }
230 return v
231}
232
233func decodeMetadataHeader(k, v string) (string, error) {
234 if strings.HasSuffix(k, binHdrSuffix) {
235 b, err := decodeBinHeader(v)
236 return string(b), err
237 }
238 return v, nil
239}
240
241func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
242 // frame.Truncated is set to true when framer detects that the current header
243 // list size hits MaxHeaderListSize limit.
244 if frame.Truncated {
245 return status.Error(codes.Internal, "peer header list size exceeded limit")
246 }
247 for _, hf := range frame.Fields {
248 if err := d.processHeaderField(hf); err != nil {
249 return err
250 }
251 }
252
253 if d.serverSide {
254 return nil
255 }
256
257 // If grpc status exists, no need to check further.
258 if d.rawStatusCode != nil || d.statusGen != nil {
259 return nil
260 }
261
262 // If grpc status doesn't exist and http status doesn't exist,
263 // then it's a malformed header.
264 if d.httpStatus == nil {
265 return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
266 }
267
268 if *(d.httpStatus) != http.StatusOK {
269 code, ok := httpStatusConvTab[*(d.httpStatus)]
270 if !ok {
271 code = codes.Unknown
272 }
273 return status.Error(code, http.StatusText(*(d.httpStatus)))
274 }
275
276 // gRPC status doesn't exist and http status is OK.
277 // Set rawStatusCode to be unknown and return nil error.
278 // So that, if the stream has ended this Unknown status
279 // will be propagated to the user.
280 // Otherwise, it will be ignored. In which case, status from
281 // a later trailer, that has StreamEnded flag set, is propagated.
282 code := int(codes.Unknown)
283 d.rawStatusCode = &code
284 return nil
285}
286
287func (d *decodeState) addMetadata(k, v string) {
288 if d.mdata == nil {
289 d.mdata = make(map[string][]string)
290 }
291 d.mdata[k] = append(d.mdata[k], v)
292}
293
294func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
295 switch f.Name {
296 case "content-type":
297 contentSubtype, validContentType := contentSubtype(f.Value)
298 if !validContentType {
299 return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
300 }
301 d.contentSubtype = contentSubtype
302 // TODO: do we want to propagate the whole content-type in the metadata,
303 // or come up with a way to just propagate the content-subtype if it was set?
304 // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
305 // in the metadata?
306 d.addMetadata(f.Name, f.Value)
307 case "grpc-encoding":
308 d.encoding = f.Value
309 case "grpc-status":
310 code, err := strconv.Atoi(f.Value)
311 if err != nil {
312 return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
313 }
314 d.rawStatusCode = &code
315 case "grpc-message":
316 d.rawStatusMsg = decodeGrpcMessage(f.Value)
317 case "grpc-status-details-bin":
318 v, err := decodeBinHeader(f.Value)
319 if err != nil {
320 return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
321 }
322 s := &spb.Status{}
323 if err := proto.Unmarshal(v, s); err != nil {
324 return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
325 }
326 d.statusGen = status.FromProto(s)
327 case "grpc-timeout":
328 d.timeoutSet = true
329 var err error
330 if d.timeout, err = decodeTimeout(f.Value); err != nil {
331 return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
332 }
333 case ":path":
334 d.method = f.Value
335 case ":status":
336 code, err := strconv.Atoi(f.Value)
337 if err != nil {
338 return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
339 }
340 d.httpStatus = &code
341 case "grpc-tags-bin":
342 v, err := decodeBinHeader(f.Value)
343 if err != nil {
344 return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
345 }
346 d.statsTags = v
347 d.addMetadata(f.Name, string(v))
348 case "grpc-trace-bin":
349 v, err := decodeBinHeader(f.Value)
350 if err != nil {
351 return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
352 }
353 d.statsTrace = v
354 d.addMetadata(f.Name, string(v))
355 default:
356 if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
357 break
358 }
359 v, err := decodeMetadataHeader(f.Name, f.Value)
360 if err != nil {
361 errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
362 return nil
363 }
364 d.addMetadata(f.Name, v)
365 }
366 return nil
367}
368
369type timeoutUnit uint8
370
371const (
372 hour timeoutUnit = 'H'
373 minute timeoutUnit = 'M'
374 second timeoutUnit = 'S'
375 millisecond timeoutUnit = 'm'
376 microsecond timeoutUnit = 'u'
377 nanosecond timeoutUnit = 'n'
378)
379
380func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
381 switch u {
382 case hour:
383 return time.Hour, true
384 case minute:
385 return time.Minute, true
386 case second:
387 return time.Second, true
388 case millisecond:
389 return time.Millisecond, true
390 case microsecond:
391 return time.Microsecond, true
392 case nanosecond:
393 return time.Nanosecond, true
394 default:
395 }
396 return
397}
398
399const maxTimeoutValue int64 = 100000000 - 1
400
401// div does integer division and round-up the result. Note that this is
402// equivalent to (d+r-1)/r but has less chance to overflow.
403func div(d, r time.Duration) int64 {
404 if m := d % r; m > 0 {
405 return int64(d/r + 1)
406 }
407 return int64(d / r)
408}
409
410// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
411func encodeTimeout(t time.Duration) string {
412 if t <= 0 {
413 return "0n"
414 }
415 if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
416 return strconv.FormatInt(d, 10) + "n"
417 }
418 if d := div(t, time.Microsecond); d <= maxTimeoutValue {
419 return strconv.FormatInt(d, 10) + "u"
420 }
421 if d := div(t, time.Millisecond); d <= maxTimeoutValue {
422 return strconv.FormatInt(d, 10) + "m"
423 }
424 if d := div(t, time.Second); d <= maxTimeoutValue {
425 return strconv.FormatInt(d, 10) + "S"
426 }
427 if d := div(t, time.Minute); d <= maxTimeoutValue {
428 return strconv.FormatInt(d, 10) + "M"
429 }
430 // Note that maxTimeoutValue * time.Hour > MaxInt64.
431 return strconv.FormatInt(div(t, time.Hour), 10) + "H"
432}
433
434func decodeTimeout(s string) (time.Duration, error) {
435 size := len(s)
436 if size < 2 {
437 return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
438 }
439 if size > 9 {
440 // Spec allows for 8 digits plus the unit.
441 return 0, fmt.Errorf("transport: timeout string is too long: %q", s)
442 }
443 unit := timeoutUnit(s[size-1])
444 d, ok := timeoutUnitToDuration(unit)
445 if !ok {
446 return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
447 }
448 t, err := strconv.ParseInt(s[:size-1], 10, 64)
449 if err != nil {
450 return 0, err
451 }
452 const maxHours = math.MaxInt64 / int64(time.Hour)
453 if d == time.Hour && t > maxHours {
454 // This timeout would overflow math.MaxInt64; clamp it.
455 return time.Duration(math.MaxInt64), nil
456 }
457 return d * time.Duration(t), nil
458}
459
460const (
461 spaceByte = ' '
462 tildeByte = '~'
463 percentByte = '%'
464)
465
466// encodeGrpcMessage is used to encode status code in header field
467// "grpc-message". It does percent encoding and also replaces invalid utf-8
468// characters with Unicode replacement character.
469//
470// It checks to see if each individual byte in msg is an allowable byte, and
471// then either percent encoding or passing it through. When percent encoding,
472// the byte is converted into hexadecimal notation with a '%' prepended.
473func encodeGrpcMessage(msg string) string {
474 if msg == "" {
475 return ""
476 }
477 lenMsg := len(msg)
478 for i := 0; i < lenMsg; i++ {
479 c := msg[i]
480 if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
481 return encodeGrpcMessageUnchecked(msg)
482 }
483 }
484 return msg
485}
486
487func encodeGrpcMessageUnchecked(msg string) string {
488 var buf bytes.Buffer
489 for len(msg) > 0 {
490 r, size := utf8.DecodeRuneInString(msg)
491 for _, b := range []byte(string(r)) {
492 if size > 1 {
493 // If size > 1, r is not ascii. Always do percent encoding.
494 buf.WriteString(fmt.Sprintf("%%%02X", b))
495 continue
496 }
497
498 // The for loop is necessary even if size == 1. r could be
499 // utf8.RuneError.
500 //
501 // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
502 if b >= spaceByte && b <= tildeByte && b != percentByte {
503 buf.WriteByte(b)
504 } else {
505 buf.WriteString(fmt.Sprintf("%%%02X", b))
506 }
507 }
508 msg = msg[size:]
509 }
510 return buf.String()
511}
512
513// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
514func decodeGrpcMessage(msg string) string {
515 if msg == "" {
516 return ""
517 }
518 lenMsg := len(msg)
519 for i := 0; i < lenMsg; i++ {
520 if msg[i] == percentByte && i+2 < lenMsg {
521 return decodeGrpcMessageUnchecked(msg)
522 }
523 }
524 return msg
525}
526
527func decodeGrpcMessageUnchecked(msg string) string {
528 var buf bytes.Buffer
529 lenMsg := len(msg)
530 for i := 0; i < lenMsg; i++ {
531 c := msg[i]
532 if c == percentByte && i+2 < lenMsg {
533 parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
534 if err != nil {
535 buf.WriteByte(c)
536 } else {
537 buf.WriteByte(byte(parsed))
538 i += 2
539 }
540 } else {
541 buf.WriteByte(c)
542 }
543 }
544 return buf.String()
545}
546
547type bufWriter struct {
548 buf []byte
549 offset int
550 batchSize int
551 conn net.Conn
552 err error
553
554 onFlush func()
555}
556
557func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
558 return &bufWriter{
559 buf: make([]byte, batchSize*2),
560 batchSize: batchSize,
561 conn: conn,
562 }
563}
564
565func (w *bufWriter) Write(b []byte) (n int, err error) {
566 if w.err != nil {
567 return 0, w.err
568 }
569 if w.batchSize == 0 { // Buffer has been disabled.
570 return w.conn.Write(b)
571 }
572 for len(b) > 0 {
573 nn := copy(w.buf[w.offset:], b)
574 b = b[nn:]
575 w.offset += nn
576 n += nn
577 if w.offset >= w.batchSize {
578 err = w.Flush()
579 }
580 }
581 return n, err
582}
583
584func (w *bufWriter) Flush() error {
585 if w.err != nil {
586 return w.err
587 }
588 if w.offset == 0 {
589 return nil
590 }
591 if w.onFlush != nil {
592 w.onFlush()
593 }
594 _, w.err = w.conn.Write(w.buf[:w.offset])
595 w.offset = 0
596 return w.err
597}
598
599type framer struct {
600 writer *bufWriter
601 fr *http2.Framer
602}
603
604func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
605 if writeBufferSize < 0 {
606 writeBufferSize = 0
607 }
608 var r io.Reader = conn
609 if readBufferSize > 0 {
610 r = bufio.NewReaderSize(r, readBufferSize)
611 }
612 w := newBufWriter(conn, writeBufferSize)
613 f := &framer{
614 writer: w,
615 fr: http2.NewFramer(w, r),
616 }
617 // Opt-in to Frame reuse API on framer to reduce garbage.
618 // Frames aren't safe to read from after a subsequent call to ReadFrame.
619 f.fr.SetReuseFrames()
620 f.fr.MaxHeaderListSize = maxHeaderListSize
621 f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
622 return f
623}
diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go
new file mode 100644
index 0000000..879df80
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/log.go
@@ -0,0 +1,44 @@
1/*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// This file contains wrappers for grpclog functions.
20// The transport package only logs to verbose level 2 by default.
21
22package transport
23
24import "google.golang.org/grpc/grpclog"
25
26const logLevel = 2
27
28func infof(format string, args ...interface{}) {
29 if grpclog.V(logLevel) {
30 grpclog.Infof(format, args...)
31 }
32}
33
34func warningf(format string, args ...interface{}) {
35 if grpclog.V(logLevel) {
36 grpclog.Warningf(format, args...)
37 }
38}
39
40func errorf(format string, args ...interface{}) {
41 if grpclog.V(logLevel) {
42 grpclog.Errorf(format, args...)
43 }
44}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
new file mode 100644
index 0000000..2580aa7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -0,0 +1,758 @@
1/*
2 *
3 * Copyright 2014 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19// Package transport defines and implements message oriented communication
20// channel to complete various transactions (e.g., an RPC). It is meant for
21// grpc-internal usage and is not intended to be imported directly by users.
22package transport
23
24import (
25 "context"
26 "errors"
27 "fmt"
28 "io"
29 "net"
30 "sync"
31 "sync/atomic"
32
33 "google.golang.org/grpc/codes"
34 "google.golang.org/grpc/credentials"
35 "google.golang.org/grpc/keepalive"
36 "google.golang.org/grpc/metadata"
37 "google.golang.org/grpc/stats"
38 "google.golang.org/grpc/status"
39 "google.golang.org/grpc/tap"
40)
41
42// recvMsg represents the received msg from the transport. All transport
43// protocol specific info has been removed.
44type recvMsg struct {
45 data []byte
46 // nil: received some data
47 // io.EOF: stream is completed. data is nil.
48 // other non-nil error: transport failure. data is nil.
49 err error
50}
51
52// recvBuffer is an unbounded channel of recvMsg structs.
53// Note recvBuffer differs from controlBuffer only in that recvBuffer
54// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
55// recvBuffer is written to much more often than
56// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
57type recvBuffer struct {
58 c chan recvMsg
59 mu sync.Mutex
60 backlog []recvMsg
61 err error
62}
63
64func newRecvBuffer() *recvBuffer {
65 b := &recvBuffer{
66 c: make(chan recvMsg, 1),
67 }
68 return b
69}
70
71func (b *recvBuffer) put(r recvMsg) {
72 b.mu.Lock()
73 if b.err != nil {
74 b.mu.Unlock()
75 // An error had occurred earlier, don't accept more
76 // data or errors.
77 return
78 }
79 b.err = r.err
80 if len(b.backlog) == 0 {
81 select {
82 case b.c <- r:
83 b.mu.Unlock()
84 return
85 default:
86 }
87 }
88 b.backlog = append(b.backlog, r)
89 b.mu.Unlock()
90}
91
92func (b *recvBuffer) load() {
93 b.mu.Lock()
94 if len(b.backlog) > 0 {
95 select {
96 case b.c <- b.backlog[0]:
97 b.backlog[0] = recvMsg{}
98 b.backlog = b.backlog[1:]
99 default:
100 }
101 }
102 b.mu.Unlock()
103}
104
105// get returns the channel that receives a recvMsg in the buffer.
106//
107// Upon receipt of a recvMsg, the caller should call load to send another
108// recvMsg onto the channel if there is any.
109func (b *recvBuffer) get() <-chan recvMsg {
110 return b.c
111}
112
113// recvBufferReader implements io.Reader interface to read the data from
114// recvBuffer.
115type recvBufferReader struct {
116 closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
117 ctx context.Context
118 ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
119 recv *recvBuffer
120 last []byte // Stores the remaining data in the previous calls.
121 err error
122}
123
124// Read reads the next len(p) bytes from last. If last is drained, it tries to
125// read additional data from recv. It blocks if there no additional data available
126// in recv. If Read returns any non-nil error, it will continue to return that error.
127func (r *recvBufferReader) Read(p []byte) (n int, err error) {
128 if r.err != nil {
129 return 0, r.err
130 }
131 if r.last != nil && len(r.last) > 0 {
132 // Read remaining data left in last call.
133 copied := copy(p, r.last)
134 r.last = r.last[copied:]
135 return copied, nil
136 }
137 if r.closeStream != nil {
138 n, r.err = r.readClient(p)
139 } else {
140 n, r.err = r.read(p)
141 }
142 return n, r.err
143}
144
145func (r *recvBufferReader) read(p []byte) (n int, err error) {
146 select {
147 case <-r.ctxDone:
148 return 0, ContextErr(r.ctx.Err())
149 case m := <-r.recv.get():
150 return r.readAdditional(m, p)
151 }
152}
153
154func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
155 // If the context is canceled, then closes the stream with nil metadata.
156 // closeStream writes its error parameter to r.recv as a recvMsg.
157 // r.readAdditional acts on that message and returns the necessary error.
158 select {
159 case <-r.ctxDone:
160 r.closeStream(ContextErr(r.ctx.Err()))
161 m := <-r.recv.get()
162 return r.readAdditional(m, p)
163 case m := <-r.recv.get():
164 return r.readAdditional(m, p)
165 }
166}
167
168func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
169 r.recv.load()
170 if m.err != nil {
171 return 0, m.err
172 }
173 copied := copy(p, m.data)
174 r.last = m.data[copied:]
175 return copied, nil
176}
177
178type streamState uint32
179
180const (
181 streamActive streamState = iota
182 streamWriteDone // EndStream sent
183 streamReadDone // EndStream received
184 streamDone // the entire stream is finished.
185)
186
187// Stream represents an RPC in the transport layer.
188type Stream struct {
189 id uint32
190 st ServerTransport // nil for client side Stream
191 ctx context.Context // the associated context of the stream
192 cancel context.CancelFunc // always nil for client side Stream
193 done chan struct{} // closed at the end of stream to unblock writers. On the client side.
194 ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
195 method string // the associated RPC method of the stream
196 recvCompress string
197 sendCompress string
198 buf *recvBuffer
199 trReader io.Reader
200 fc *inFlow
201 wq *writeQuota
202
203 // Callback to state application's intentions to read data. This
204 // is used to adjust flow control, if needed.
205 requestRead func(int)
206
207 headerChan chan struct{} // closed to indicate the end of header metadata.
208 headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
209
210 // hdrMu protects header and trailer metadata on the server-side.
211 hdrMu sync.Mutex
212 // On client side, header keeps the received header metadata.
213 //
214 // On server side, header keeps the header set by SetHeader(). The complete
215 // header will merged into this after t.WriteHeader() is called.
216 header metadata.MD
217 trailer metadata.MD // the key-value map of trailer metadata.
218
219 noHeaders bool // set if the client never received headers (set only after the stream is done).
220
221 // On the server-side, headerSent is atomically set to 1 when the headers are sent out.
222 headerSent uint32
223
224 state streamState
225
226 // On client-side it is the status error received from the server.
227 // On server-side it is unused.
228 status *status.Status
229
230 bytesReceived uint32 // indicates whether any bytes have been received on this stream
231 unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
232
233 // contentSubtype is the content-subtype for requests.
234 // this must be lowercase or the behavior is undefined.
235 contentSubtype string
236}
237
238// isHeaderSent is only valid on the server-side.
239func (s *Stream) isHeaderSent() bool {
240 return atomic.LoadUint32(&s.headerSent) == 1
241}
242
243// updateHeaderSent updates headerSent and returns true
244// if it was alreay set. It is valid only on server-side.
245func (s *Stream) updateHeaderSent() bool {
246 return atomic.SwapUint32(&s.headerSent, 1) == 1
247}
248
249func (s *Stream) swapState(st streamState) streamState {
250 return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
251}
252
253func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
254 return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
255}
256
257func (s *Stream) getState() streamState {
258 return streamState(atomic.LoadUint32((*uint32)(&s.state)))
259}
260
261func (s *Stream) waitOnHeader() error {
262 if s.headerChan == nil {
263 // On the server headerChan is always nil since a stream originates
264 // only after having received headers.
265 return nil
266 }
267 select {
268 case <-s.ctx.Done():
269 return ContextErr(s.ctx.Err())
270 case <-s.headerChan:
271 return nil
272 }
273}
274
275// RecvCompress returns the compression algorithm applied to the inbound
276// message. It is empty string if there is no compression applied.
277func (s *Stream) RecvCompress() string {
278 if err := s.waitOnHeader(); err != nil {
279 return ""
280 }
281 return s.recvCompress
282}
283
284// SetSendCompress sets the compression algorithm to the stream.
285func (s *Stream) SetSendCompress(str string) {
286 s.sendCompress = str
287}
288
289// Done returns a channel which is closed when it receives the final status
290// from the server.
291func (s *Stream) Done() <-chan struct{} {
292 return s.done
293}
294
295// Header returns the header metadata of the stream.
296//
297// On client side, it acquires the key-value pairs of header metadata once it is
298// available. It blocks until i) the metadata is ready or ii) there is no header
299// metadata or iii) the stream is canceled/expired.
300//
301// On server side, it returns the out header after t.WriteHeader is called.
302func (s *Stream) Header() (metadata.MD, error) {
303 if s.headerChan == nil && s.header != nil {
304 // On server side, return the header in stream. It will be the out
305 // header after t.WriteHeader is called.
306 return s.header.Copy(), nil
307 }
308 err := s.waitOnHeader()
309 // Even if the stream is closed, header is returned if available.
310 select {
311 case <-s.headerChan:
312 if s.header == nil {
313 return nil, nil
314 }
315 return s.header.Copy(), nil
316 default:
317 }
318 return nil, err
319}
320
321// TrailersOnly blocks until a header or trailers-only frame is received and
322// then returns true if the stream was trailers-only. If the stream ends
323// before headers are received, returns true, nil. If a context error happens
324// first, returns it as a status error. Client-side only.
325func (s *Stream) TrailersOnly() (bool, error) {
326 err := s.waitOnHeader()
327 if err != nil {
328 return false, err
329 }
330 // if !headerDone, some other connection error occurred.
331 return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil
332}
333
334// Trailer returns the cached trailer metedata. Note that if it is not called
335// after the entire stream is done, it could return an empty MD. Client
336// side only.
337// It can be safely read only after stream has ended that is either read
338// or write have returned io.EOF.
339func (s *Stream) Trailer() metadata.MD {
340 c := s.trailer.Copy()
341 return c
342}
343
344// ContentSubtype returns the content-subtype for a request. For example, a
345// content-subtype of "proto" will result in a content-type of
346// "application/grpc+proto". This will always be lowercase. See
347// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
348// more details.
349func (s *Stream) ContentSubtype() string {
350 return s.contentSubtype
351}
352
353// Context returns the context of the stream.
354func (s *Stream) Context() context.Context {
355 return s.ctx
356}
357
358// Method returns the method for the stream.
359func (s *Stream) Method() string {
360 return s.method
361}
362
363// Status returns the status received from the server.
364// Status can be read safely only after the stream has ended,
365// that is, after Done() is closed.
366func (s *Stream) Status() *status.Status {
367 return s.status
368}
369
370// SetHeader sets the header metadata. This can be called multiple times.
371// Server side only.
372// This should not be called in parallel to other data writes.
373func (s *Stream) SetHeader(md metadata.MD) error {
374 if md.Len() == 0 {
375 return nil
376 }
377 if s.isHeaderSent() || s.getState() == streamDone {
378 return ErrIllegalHeaderWrite
379 }
380 s.hdrMu.Lock()
381 s.header = metadata.Join(s.header, md)
382 s.hdrMu.Unlock()
383 return nil
384}
385
386// SendHeader sends the given header metadata. The given metadata is
387// combined with any metadata set by previous calls to SetHeader and
388// then written to the transport stream.
389func (s *Stream) SendHeader(md metadata.MD) error {
390 return s.st.WriteHeader(s, md)
391}
392
393// SetTrailer sets the trailer metadata which will be sent with the RPC status
394// by the server. This can be called multiple times. Server side only.
395// This should not be called parallel to other data writes.
396func (s *Stream) SetTrailer(md metadata.MD) error {
397 if md.Len() == 0 {
398 return nil
399 }
400 if s.getState() == streamDone {
401 return ErrIllegalHeaderWrite
402 }
403 s.hdrMu.Lock()
404 s.trailer = metadata.Join(s.trailer, md)
405 s.hdrMu.Unlock()
406 return nil
407}
408
409func (s *Stream) write(m recvMsg) {
410 s.buf.put(m)
411}
412
413// Read reads all p bytes from the wire for this stream.
414func (s *Stream) Read(p []byte) (n int, err error) {
415 // Don't request a read if there was an error earlier
416 if er := s.trReader.(*transportReader).er; er != nil {
417 return 0, er
418 }
419 s.requestRead(len(p))
420 return io.ReadFull(s.trReader, p)
421}
422
423// tranportReader reads all the data available for this Stream from the transport and
424// passes them into the decoder, which converts them into a gRPC message stream.
425// The error is io.EOF when the stream is done or another non-nil error if
426// the stream broke.
427type transportReader struct {
428 reader io.Reader
429 // The handler to control the window update procedure for both this
430 // particular stream and the associated transport.
431 windowHandler func(int)
432 er error
433}
434
435func (t *transportReader) Read(p []byte) (n int, err error) {
436 n, err = t.reader.Read(p)
437 if err != nil {
438 t.er = err
439 return
440 }
441 t.windowHandler(n)
442 return
443}
444
445// BytesReceived indicates whether any bytes have been received on this stream.
446func (s *Stream) BytesReceived() bool {
447 return atomic.LoadUint32(&s.bytesReceived) == 1
448}
449
450// Unprocessed indicates whether the server did not process this stream --
451// i.e. it sent a refused stream or GOAWAY including this stream ID.
452func (s *Stream) Unprocessed() bool {
453 return atomic.LoadUint32(&s.unprocessed) == 1
454}
455
456// GoString is implemented by Stream so context.String() won't
457// race when printing %#v.
458func (s *Stream) GoString() string {
459 return fmt.Sprintf("<stream: %p, %v>", s, s.method)
460}
461
462// state of transport
463type transportState int
464
465const (
466 reachable transportState = iota
467 closing
468 draining
469)
470
471// ServerConfig consists of all the configurations to establish a server transport.
472type ServerConfig struct {
473 MaxStreams uint32
474 AuthInfo credentials.AuthInfo
475 InTapHandle tap.ServerInHandle
476 StatsHandler stats.Handler
477 KeepaliveParams keepalive.ServerParameters
478 KeepalivePolicy keepalive.EnforcementPolicy
479 InitialWindowSize int32
480 InitialConnWindowSize int32
481 WriteBufferSize int
482 ReadBufferSize int
483 ChannelzParentID int64
484 MaxHeaderListSize *uint32
485}
486
487// NewServerTransport creates a ServerTransport with conn or non-nil error
488// if it fails.
489func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) {
490 return newHTTP2Server(conn, config)
491}
492
493// ConnectOptions covers all relevant options for communicating with the server.
494type ConnectOptions struct {
495 // UserAgent is the application user agent.
496 UserAgent string
497 // Dialer specifies how to dial a network address.
498 Dialer func(context.Context, string) (net.Conn, error)
499 // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
500 FailOnNonTempDialError bool
501 // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
502 PerRPCCredentials []credentials.PerRPCCredentials
503 // TransportCredentials stores the Authenticator required to setup a client
504 // connection. Only one of TransportCredentials and CredsBundle is non-nil.
505 TransportCredentials credentials.TransportCredentials
506 // CredsBundle is the credentials bundle to be used. Only one of
507 // TransportCredentials and CredsBundle is non-nil.
508 CredsBundle credentials.Bundle
509 // KeepaliveParams stores the keepalive parameters.
510 KeepaliveParams keepalive.ClientParameters
511 // StatsHandler stores the handler for stats.
512 StatsHandler stats.Handler
513 // InitialWindowSize sets the initial window size for a stream.
514 InitialWindowSize int32
515 // InitialConnWindowSize sets the initial window size for a connection.
516 InitialConnWindowSize int32
517 // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire.
518 WriteBufferSize int
519 // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
520 ReadBufferSize int
521 // ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
522 ChannelzParentID int64
523 // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
524 MaxHeaderListSize *uint32
525}
526
527// TargetInfo contains the information of the target such as network address and metadata.
528type TargetInfo struct {
529 Addr string
530 Metadata interface{}
531 Authority string
532}
533
534// NewClientTransport establishes the transport with the required ConnectOptions
535// and returns it to the caller.
536func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
537 return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
538}
539
540// Options provides additional hints and information for message
541// transmission.
542type Options struct {
543 // Last indicates whether this write is the last piece for
544 // this stream.
545 Last bool
546}
547
548// CallHdr carries the information of a particular RPC.
549type CallHdr struct {
550 // Host specifies the peer's host.
551 Host string
552
553 // Method specifies the operation to perform.
554 Method string
555
556 // SendCompress specifies the compression algorithm applied on
557 // outbound message.
558 SendCompress string
559
560 // Creds specifies credentials.PerRPCCredentials for a call.
561 Creds credentials.PerRPCCredentials
562
563 // ContentSubtype specifies the content-subtype for a request. For example, a
564 // content-subtype of "proto" will result in a content-type of
565 // "application/grpc+proto". The value of ContentSubtype must be all
566 // lowercase, otherwise the behavior is undefined. See
567 // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
568 // for more details.
569 ContentSubtype string
570
571 PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
572}
573
574// ClientTransport is the common interface for all gRPC client-side transport
575// implementations.
576type ClientTransport interface {
577 // Close tears down this transport. Once it returns, the transport
578 // should not be accessed any more. The caller must make sure this
579 // is called only once.
580 Close() error
581
582 // GracefulClose starts to tear down the transport. It stops accepting
583 // new RPCs and wait the completion of the pending RPCs.
584 GracefulClose() error
585
586 // Write sends the data for the given stream. A nil stream indicates
587 // the write is to be performed on the transport as a whole.
588 Write(s *Stream, hdr []byte, data []byte, opts *Options) error
589
590 // NewStream creates a Stream for an RPC.
591 NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
592
593 // CloseStream clears the footprint of a stream when the stream is
594 // not needed any more. The err indicates the error incurred when
595 // CloseStream is called. Must be called when a stream is finished
596 // unless the associated transport is closing.
597 CloseStream(stream *Stream, err error)
598
599 // Error returns a channel that is closed when some I/O error
600 // happens. Typically the caller should have a goroutine to monitor
601 // this in order to take action (e.g., close the current transport
602 // and create a new one) in error case. It should not return nil
603 // once the transport is initiated.
604 Error() <-chan struct{}
605
606 // GoAway returns a channel that is closed when ClientTransport
607 // receives the draining signal from the server (e.g., GOAWAY frame in
608 // HTTP/2).
609 GoAway() <-chan struct{}
610
611 // GetGoAwayReason returns the reason why GoAway frame was received.
612 GetGoAwayReason() GoAwayReason
613
614 // IncrMsgSent increments the number of message sent through this transport.
615 IncrMsgSent()
616
617 // IncrMsgRecv increments the number of message received through this transport.
618 IncrMsgRecv()
619}
620
621// ServerTransport is the common interface for all gRPC server-side transport
622// implementations.
623//
624// Methods may be called concurrently from multiple goroutines, but
625// Write methods for a given Stream will be called serially.
626type ServerTransport interface {
627 // HandleStreams receives incoming streams using the given handler.
628 HandleStreams(func(*Stream), func(context.Context, string) context.Context)
629
630 // WriteHeader sends the header metadata for the given stream.
631 // WriteHeader may not be called on all streams.
632 WriteHeader(s *Stream, md metadata.MD) error
633
634 // Write sends the data for the given stream.
635 // Write may not be called on all streams.
636 Write(s *Stream, hdr []byte, data []byte, opts *Options) error
637
638 // WriteStatus sends the status of a stream to the client. WriteStatus is
639 // the final call made on a stream and always occurs.
640 WriteStatus(s *Stream, st *status.Status) error
641
642 // Close tears down the transport. Once it is called, the transport
643 // should not be accessed any more. All the pending streams and their
644 // handlers will be terminated asynchronously.
645 Close() error
646
647 // RemoteAddr returns the remote network address.
648 RemoteAddr() net.Addr
649
650 // Drain notifies the client this ServerTransport stops accepting new RPCs.
651 Drain()
652
653 // IncrMsgSent increments the number of message sent through this transport.
654 IncrMsgSent()
655
656 // IncrMsgRecv increments the number of message received through this transport.
657 IncrMsgRecv()
658}
659
660// connectionErrorf creates an ConnectionError with the specified error description.
661func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
662 return ConnectionError{
663 Desc: fmt.Sprintf(format, a...),
664 temp: temp,
665 err: e,
666 }
667}
668
669// ConnectionError is an error that results in the termination of the
670// entire connection and the retry of all the active streams.
671type ConnectionError struct {
672 Desc string
673 temp bool
674 err error
675}
676
677func (e ConnectionError) Error() string {
678 return fmt.Sprintf("connection error: desc = %q", e.Desc)
679}
680
681// Temporary indicates if this connection error is temporary or fatal.
682func (e ConnectionError) Temporary() bool {
683 return e.temp
684}
685
686// Origin returns the original error of this connection error.
687func (e ConnectionError) Origin() error {
688 // Never return nil error here.
689 // If the original error is nil, return itself.
690 if e.err == nil {
691 return e
692 }
693 return e.err
694}
695
696var (
697 // ErrConnClosing indicates that the transport is closing.
698 ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
699 // errStreamDrain indicates that the stream is rejected because the
700 // connection is draining. This could be caused by goaway or balancer
701 // removing the address.
702 errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
703 // errStreamDone is returned from write at the client side to indiacte application
704 // layer of an error.
705 errStreamDone = errors.New("the stream is done")
706 // StatusGoAway indicates that the server sent a GOAWAY that included this
707 // stream's ID in unprocessed RPCs.
708 statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
709)
710
711// GoAwayReason contains the reason for the GoAway frame received.
712type GoAwayReason uint8
713
714const (
715 // GoAwayInvalid indicates that no GoAway frame is received.
716 GoAwayInvalid GoAwayReason = 0
717 // GoAwayNoReason is the default value when GoAway frame is received.
718 GoAwayNoReason GoAwayReason = 1
719 // GoAwayTooManyPings indicates that a GoAway frame with
720 // ErrCodeEnhanceYourCalm was received and that the debug data said
721 // "too_many_pings".
722 GoAwayTooManyPings GoAwayReason = 2
723)
724
725// channelzData is used to store channelz related data for http2Client and http2Server.
726// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
727// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
728// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
729type channelzData struct {
730 kpCount int64
731 // The number of streams that have started, including already finished ones.
732 streamsStarted int64
733 // Client side: The number of streams that have ended successfully by receiving
734 // EoS bit set frame from server.
735 // Server side: The number of streams that have ended successfully by sending
736 // frame with EoS bit set.
737 streamsSucceeded int64
738 streamsFailed int64
739 // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
740 // instead of time.Time since it's more costly to atomically update time.Time variable than int64
741 // variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
742 lastStreamCreatedTime int64
743 msgSent int64
744 msgRecv int64
745 lastMsgSentTime int64
746 lastMsgRecvTime int64
747}
748
749// ContextErr converts the error from context package into a status error.
750func ContextErr(err error) error {
751 switch err {
752 case context.DeadlineExceeded:
753 return status.Error(codes.DeadlineExceeded, err.Error())
754 case context.Canceled:
755 return status.Error(codes.Canceled, err.Error())
756 }
757 return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
758}