diff options
author | appilon <apilon@hashicorp.com> | 2019-02-27 16:43:31 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-02-27 16:43:31 -0500 |
commit | 844b5a68d8af4791755b8f0ad293cc99f5959183 (patch) | |
tree | 255c250a5c9d4801c74092d33b7337d8c14438ff /vendor/golang.org/x/net/http2 | |
parent | 303b299eeb6b06e939e35905e4b34cb410dd9dc3 (diff) | |
parent | 15c0b25d011f37e7c20aeca9eaf461f78285b8d9 (diff) | |
download | terraform-provider-statuscake-844b5a68d8af4791755b8f0ad293cc99f5959183.tar.gz terraform-provider-statuscake-844b5a68d8af4791755b8f0ad293cc99f5959183.tar.zst terraform-provider-statuscake-844b5a68d8af4791755b8f0ad293cc99f5959183.zip |
Merge pull request #27 from terraform-providers/go-modules-2019-02-22
[MODULES] Switch to Go Modules
Diffstat (limited to 'vendor/golang.org/x/net/http2')
34 files changed, 11835 insertions, 0 deletions
diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore new file mode 100644 index 0000000..190f122 --- /dev/null +++ b/vendor/golang.org/x/net/http2/.gitignore | |||
@@ -0,0 +1,2 @@ | |||
1 | *~ | ||
2 | h2i/h2i | ||
diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile new file mode 100644 index 0000000..53fc525 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Dockerfile | |||
@@ -0,0 +1,51 @@ | |||
1 | # | ||
2 | # This Dockerfile builds a recent curl with HTTP/2 client support, using | ||
3 | # a recent nghttp2 build. | ||
4 | # | ||
5 | # See the Makefile for how to tag it. If Docker and that image is found, the | ||
6 | # Go tests use this curl binary for integration tests. | ||
7 | # | ||
8 | |||
9 | FROM ubuntu:trusty | ||
10 | |||
11 | RUN apt-get update && \ | ||
12 | apt-get upgrade -y && \ | ||
13 | apt-get install -y git-core build-essential wget | ||
14 | |||
15 | RUN apt-get install -y --no-install-recommends \ | ||
16 | autotools-dev libtool pkg-config zlib1g-dev \ | ||
17 | libcunit1-dev libssl-dev libxml2-dev libevent-dev \ | ||
18 | automake autoconf | ||
19 | |||
20 | # The list of packages nghttp2 recommends for h2load: | ||
21 | RUN apt-get install -y --no-install-recommends make binutils \ | ||
22 | autoconf automake autotools-dev \ | ||
23 | libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ | ||
24 | libev-dev libevent-dev libjansson-dev libjemalloc-dev \ | ||
25 | cython python3.4-dev python-setuptools | ||
26 | |||
27 | # Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: | ||
28 | ENV NGHTTP2_VER 895da9a | ||
29 | RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git | ||
30 | |||
31 | WORKDIR /root/nghttp2 | ||
32 | RUN git reset --hard $NGHTTP2_VER | ||
33 | RUN autoreconf -i | ||
34 | RUN automake | ||
35 | RUN autoconf | ||
36 | RUN ./configure | ||
37 | RUN make | ||
38 | RUN make install | ||
39 | |||
40 | WORKDIR /root | ||
41 | RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz | ||
42 | RUN tar -zxvf curl-7.45.0.tar.gz | ||
43 | WORKDIR /root/curl-7.45.0 | ||
44 | RUN ./configure --with-ssl --with-nghttp2=/usr/local | ||
45 | RUN make | ||
46 | RUN make install | ||
47 | RUN ldconfig | ||
48 | |||
49 | CMD ["-h"] | ||
50 | ENTRYPOINT ["/usr/local/bin/curl"] | ||
51 | |||
diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile new file mode 100644 index 0000000..55fd826 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | curlimage: | ||
2 | docker build -t gohttp2/curl . | ||
3 | |||
diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README new file mode 100644 index 0000000..360d5aa --- /dev/null +++ b/vendor/golang.org/x/net/http2/README | |||
@@ -0,0 +1,20 @@ | |||
1 | This is a work-in-progress HTTP/2 implementation for Go. | ||
2 | |||
3 | It will eventually live in the Go standard library and won't require | ||
4 | any changes to your code to use. It will just be automatic. | ||
5 | |||
6 | Status: | ||
7 | |||
8 | * The server support is pretty good. A few things are missing | ||
9 | but are being worked on. | ||
10 | * The client work has just started but shares a lot of code | ||
11 | is coming along much quicker. | ||
12 | |||
13 | Docs are at https://godoc.org/golang.org/x/net/http2 | ||
14 | |||
15 | Demo test server at https://http2.golang.org/ | ||
16 | |||
17 | Help & bug reports welcome! | ||
18 | |||
19 | Contributing: https://golang.org/doc/contribute.html | ||
20 | Bugs: https://golang.org/issue/new?title=x/net/http2:+ | ||
diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go new file mode 100644 index 0000000..698860b --- /dev/null +++ b/vendor/golang.org/x/net/http2/ciphers.go | |||
@@ -0,0 +1,641 @@ | |||
1 | // Copyright 2017 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package http2 | ||
6 | |||
7 | // A list of the possible cipher suite ids. Taken from | ||
8 | // http://www.iana.org/assignments/tls-parameters/tls-parameters.txt | ||
9 | |||
10 | const ( | ||
11 | cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 | ||
12 | cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 | ||
13 | cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 | ||
14 | cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 | ||
15 | cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 | ||
16 | cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 | ||
17 | cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 | ||
18 | cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 | ||
19 | cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 | ||
20 | cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 | ||
21 | cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A | ||
22 | cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B | ||
23 | cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C | ||
24 | cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D | ||
25 | cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E | ||
26 | cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F | ||
27 | cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 | ||
28 | cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 | ||
29 | cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 | ||
30 | cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 | ||
31 | cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 | ||
32 | cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 | ||
33 | cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 | ||
34 | cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 | ||
35 | cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 | ||
36 | cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 | ||
37 | cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A | ||
38 | cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B | ||
39 | // Reserved uint16 = 0x001C-1D | ||
40 | cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E | ||
41 | cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F | ||
42 | cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 | ||
43 | cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 | ||
44 | cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 | ||
45 | cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 | ||
46 | cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 | ||
47 | cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 | ||
48 | cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 | ||
49 | cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 | ||
50 | cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 | ||
51 | cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 | ||
52 | cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A | ||
53 | cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B | ||
54 | cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C | ||
55 | cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D | ||
56 | cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E | ||
57 | cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F | ||
58 | cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 | ||
59 | cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 | ||
60 | cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 | ||
61 | cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 | ||
62 | cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 | ||
63 | cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 | ||
64 | cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 | ||
65 | cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 | ||
66 | cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 | ||
67 | cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 | ||
68 | cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A | ||
69 | cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B | ||
70 | cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C | ||
71 | cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D | ||
72 | cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E | ||
73 | cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F | ||
74 | cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 | ||
75 | cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 | ||
76 | cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 | ||
77 | cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 | ||
78 | cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 | ||
79 | cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 | ||
80 | cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 | ||
81 | // Reserved uint16 = 0x0047-4F | ||
82 | // Reserved uint16 = 0x0050-58 | ||
83 | // Reserved uint16 = 0x0059-5C | ||
84 | // Unassigned uint16 = 0x005D-5F | ||
85 | // Reserved uint16 = 0x0060-66 | ||
86 | cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 | ||
87 | cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 | ||
88 | cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 | ||
89 | cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A | ||
90 | cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B | ||
91 | cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C | ||
92 | cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D | ||
93 | // Unassigned uint16 = 0x006E-83 | ||
94 | cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 | ||
95 | cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 | ||
96 | cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 | ||
97 | cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 | ||
98 | cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 | ||
99 | cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 | ||
100 | cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A | ||
101 | cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B | ||
102 | cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C | ||
103 | cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D | ||
104 | cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E | ||
105 | cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F | ||
106 | cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 | ||
107 | cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 | ||
108 | cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 | ||
109 | cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 | ||
110 | cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 | ||
111 | cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 | ||
112 | cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 | ||
113 | cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 | ||
114 | cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 | ||
115 | cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 | ||
116 | cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A | ||
117 | cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B | ||
118 | cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C | ||
119 | cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D | ||
120 | cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E | ||
121 | cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F | ||
122 | cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 | ||
123 | cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 | ||
124 | cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 | ||
125 | cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 | ||
126 | cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 | ||
127 | cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 | ||
128 | cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 | ||
129 | cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 | ||
130 | cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 | ||
131 | cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 | ||
132 | cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA | ||
133 | cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB | ||
134 | cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC | ||
135 | cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD | ||
136 | cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE | ||
137 | cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF | ||
138 | cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 | ||
139 | cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 | ||
140 | cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 | ||
141 | cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 | ||
142 | cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 | ||
143 | cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 | ||
144 | cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 | ||
145 | cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 | ||
146 | cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 | ||
147 | cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 | ||
148 | cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA | ||
149 | cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB | ||
150 | cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC | ||
151 | cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD | ||
152 | cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE | ||
153 | cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF | ||
154 | cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 | ||
155 | cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 | ||
156 | cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 | ||
157 | cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 | ||
158 | cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 | ||
159 | cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 | ||
160 | // Unassigned uint16 = 0x00C6-FE | ||
161 | cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF | ||
162 | // Unassigned uint16 = 0x01-55,* | ||
163 | cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 | ||
164 | // Unassigned uint16 = 0x5601 - 0xC000 | ||
165 | cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 | ||
166 | cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 | ||
167 | cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 | ||
168 | cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 | ||
169 | cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 | ||
170 | cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 | ||
171 | cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 | ||
172 | cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 | ||
173 | cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 | ||
174 | cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A | ||
175 | cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B | ||
176 | cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C | ||
177 | cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D | ||
178 | cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E | ||
179 | cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F | ||
180 | cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 | ||
181 | cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 | ||
182 | cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 | ||
183 | cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 | ||
184 | cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 | ||
185 | cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 | ||
186 | cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 | ||
187 | cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 | ||
188 | cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 | ||
189 | cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 | ||
190 | cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A | ||
191 | cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B | ||
192 | cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C | ||
193 | cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D | ||
194 | cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E | ||
195 | cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F | ||
196 | cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 | ||
197 | cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 | ||
198 | cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 | ||
199 | cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 | ||
200 | cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 | ||
201 | cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 | ||
202 | cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 | ||
203 | cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 | ||
204 | cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 | ||
205 | cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 | ||
206 | cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A | ||
207 | cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B | ||
208 | cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C | ||
209 | cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D | ||
210 | cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E | ||
211 | cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F | ||
212 | cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 | ||
213 | cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 | ||
214 | cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 | ||
215 | cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 | ||
216 | cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 | ||
217 | cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 | ||
218 | cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 | ||
219 | cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 | ||
220 | cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 | ||
221 | cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 | ||
222 | cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A | ||
223 | cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B | ||
224 | cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C | ||
225 | cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D | ||
226 | cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E | ||
227 | cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F | ||
228 | cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 | ||
229 | cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 | ||
230 | cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 | ||
231 | cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 | ||
232 | cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 | ||
233 | cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 | ||
234 | cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 | ||
235 | cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 | ||
236 | cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 | ||
237 | cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 | ||
238 | cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A | ||
239 | cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B | ||
240 | cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C | ||
241 | cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D | ||
242 | cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E | ||
243 | cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F | ||
244 | cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 | ||
245 | cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 | ||
246 | cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 | ||
247 | cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 | ||
248 | cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 | ||
249 | cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 | ||
250 | cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 | ||
251 | cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 | ||
252 | cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 | ||
253 | cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 | ||
254 | cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A | ||
255 | cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B | ||
256 | cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C | ||
257 | cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D | ||
258 | cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E | ||
259 | cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F | ||
260 | cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 | ||
261 | cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 | ||
262 | cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 | ||
263 | cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 | ||
264 | cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 | ||
265 | cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 | ||
266 | cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 | ||
267 | cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 | ||
268 | cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 | ||
269 | cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 | ||
270 | cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A | ||
271 | cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B | ||
272 | cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C | ||
273 | cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D | ||
274 | cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E | ||
275 | cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F | ||
276 | cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 | ||
277 | cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 | ||
278 | cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 | ||
279 | cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 | ||
280 | cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 | ||
281 | cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 | ||
282 | cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 | ||
283 | cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 | ||
284 | cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 | ||
285 | cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 | ||
286 | cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A | ||
287 | cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B | ||
288 | cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C | ||
289 | cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D | ||
290 | cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E | ||
291 | cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F | ||
292 | cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 | ||
293 | cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 | ||
294 | cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 | ||
295 | cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 | ||
296 | cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 | ||
297 | cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 | ||
298 | cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 | ||
299 | cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 | ||
300 | cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 | ||
301 | cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 | ||
302 | cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A | ||
303 | cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B | ||
304 | cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C | ||
305 | cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D | ||
306 | cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E | ||
307 | cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F | ||
308 | cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 | ||
309 | cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 | ||
310 | cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 | ||
311 | cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 | ||
312 | cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 | ||
313 | cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 | ||
314 | cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 | ||
315 | cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 | ||
316 | cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 | ||
317 | cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 | ||
318 | cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A | ||
319 | cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B | ||
320 | cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C | ||
321 | cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D | ||
322 | cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E | ||
323 | cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F | ||
324 | cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 | ||
325 | cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 | ||
326 | cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 | ||
327 | cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 | ||
328 | cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 | ||
329 | cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 | ||
330 | cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 | ||
331 | cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 | ||
332 | cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 | ||
333 | cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 | ||
334 | cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA | ||
335 | cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB | ||
336 | cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC | ||
337 | cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD | ||
338 | cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE | ||
339 | cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF | ||
340 | // Unassigned uint16 = 0xC0B0-FF | ||
341 | // Unassigned uint16 = 0xC1-CB,* | ||
342 | // Unassigned uint16 = 0xCC00-A7 | ||
343 | cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 | ||
344 | cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 | ||
345 | cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA | ||
346 | cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB | ||
347 | cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC | ||
348 | cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD | ||
349 | cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE | ||
350 | ) | ||
351 | |||
352 | // isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. | ||
353 | // References: | ||
354 | // https://tools.ietf.org/html/rfc7540#appendix-A | ||
355 | // Reject cipher suites from Appendix A. | ||
356 | // "This list includes those cipher suites that do not | ||
357 | // offer an ephemeral key exchange and those that are | ||
358 | // based on the TLS null, stream or block cipher type" | ||
359 | func isBadCipher(cipher uint16) bool { | ||
360 | switch cipher { | ||
361 | case cipher_TLS_NULL_WITH_NULL_NULL, | ||
362 | cipher_TLS_RSA_WITH_NULL_MD5, | ||
363 | cipher_TLS_RSA_WITH_NULL_SHA, | ||
364 | cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, | ||
365 | cipher_TLS_RSA_WITH_RC4_128_MD5, | ||
366 | cipher_TLS_RSA_WITH_RC4_128_SHA, | ||
367 | cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, | ||
368 | cipher_TLS_RSA_WITH_IDEA_CBC_SHA, | ||
369 | cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, | ||
370 | cipher_TLS_RSA_WITH_DES_CBC_SHA, | ||
371 | cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, | ||
372 | cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, | ||
373 | cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, | ||
374 | cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, | ||
375 | cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, | ||
376 | cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, | ||
377 | cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, | ||
378 | cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, | ||
379 | cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, | ||
380 | cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, | ||
381 | cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, | ||
382 | cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, | ||
383 | cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, | ||
384 | cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, | ||
385 | cipher_TLS_DH_anon_WITH_RC4_128_MD5, | ||
386 | cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, | ||
387 | cipher_TLS_DH_anon_WITH_DES_CBC_SHA, | ||
388 | cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, | ||
389 | cipher_TLS_KRB5_WITH_DES_CBC_SHA, | ||
390 | cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, | ||
391 | cipher_TLS_KRB5_WITH_RC4_128_SHA, | ||
392 | cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, | ||
393 | cipher_TLS_KRB5_WITH_DES_CBC_MD5, | ||
394 | cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, | ||
395 | cipher_TLS_KRB5_WITH_RC4_128_MD5, | ||
396 | cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, | ||
397 | cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, | ||
398 | cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, | ||
399 | cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, | ||
400 | cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, | ||
401 | cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, | ||
402 | cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, | ||
403 | cipher_TLS_PSK_WITH_NULL_SHA, | ||
404 | cipher_TLS_DHE_PSK_WITH_NULL_SHA, | ||
405 | cipher_TLS_RSA_PSK_WITH_NULL_SHA, | ||
406 | cipher_TLS_RSA_WITH_AES_128_CBC_SHA, | ||
407 | cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, | ||
408 | cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, | ||
409 | cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, | ||
410 | cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, | ||
411 | cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, | ||
412 | cipher_TLS_RSA_WITH_AES_256_CBC_SHA, | ||
413 | cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, | ||
414 | cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, | ||
415 | cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, | ||
416 | cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, | ||
417 | cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, | ||
418 | cipher_TLS_RSA_WITH_NULL_SHA256, | ||
419 | cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, | ||
420 | cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, | ||
421 | cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, | ||
422 | cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, | ||
423 | cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, | ||
424 | cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, | ||
425 | cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, | ||
426 | cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, | ||
427 | cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, | ||
428 | cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, | ||
429 | cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, | ||
430 | cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, | ||
431 | cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, | ||
432 | cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, | ||
433 | cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, | ||
434 | cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, | ||
435 | cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, | ||
436 | cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, | ||
437 | cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, | ||
438 | cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, | ||
439 | cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, | ||
440 | cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, | ||
441 | cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, | ||
442 | cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, | ||
443 | cipher_TLS_PSK_WITH_RC4_128_SHA, | ||
444 | cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, | ||
445 | cipher_TLS_PSK_WITH_AES_128_CBC_SHA, | ||
446 | cipher_TLS_PSK_WITH_AES_256_CBC_SHA, | ||
447 | cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, | ||
448 | cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, | ||
449 | cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, | ||
450 | cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, | ||
451 | cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, | ||
452 | cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, | ||
453 | cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, | ||
454 | cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, | ||
455 | cipher_TLS_RSA_WITH_SEED_CBC_SHA, | ||
456 | cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, | ||
457 | cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, | ||
458 | cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, | ||
459 | cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, | ||
460 | cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, | ||
461 | cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, | ||
462 | cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, | ||
463 | cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, | ||
464 | cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, | ||
465 | cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, | ||
466 | cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, | ||
467 | cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, | ||
468 | cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, | ||
469 | cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, | ||
470 | cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, | ||
471 | cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, | ||
472 | cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, | ||
473 | cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, | ||
474 | cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, | ||
475 | cipher_TLS_PSK_WITH_NULL_SHA256, | ||
476 | cipher_TLS_PSK_WITH_NULL_SHA384, | ||
477 | cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, | ||
478 | cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, | ||
479 | cipher_TLS_DHE_PSK_WITH_NULL_SHA256, | ||
480 | cipher_TLS_DHE_PSK_WITH_NULL_SHA384, | ||
481 | cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, | ||
482 | cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, | ||
483 | cipher_TLS_RSA_PSK_WITH_NULL_SHA256, | ||
484 | cipher_TLS_RSA_PSK_WITH_NULL_SHA384, | ||
485 | cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, | ||
486 | cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, | ||
487 | cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, | ||
488 | cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, | ||
489 | cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, | ||
490 | cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, | ||
491 | cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, | ||
492 | cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, | ||
493 | cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, | ||
494 | cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, | ||
495 | cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, | ||
496 | cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, | ||
497 | cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, | ||
498 | cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, | ||
499 | cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, | ||
500 | cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, | ||
501 | cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, | ||
502 | cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, | ||
503 | cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, | ||
504 | cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, | ||
505 | cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, | ||
506 | cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, | ||
507 | cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, | ||
508 | cipher_TLS_ECDH_RSA_WITH_NULL_SHA, | ||
509 | cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, | ||
510 | cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, | ||
511 | cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, | ||
512 | cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, | ||
513 | cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, | ||
514 | cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, | ||
515 | cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, | ||
516 | cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, | ||
517 | cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, | ||
518 | cipher_TLS_ECDH_anon_WITH_NULL_SHA, | ||
519 | cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, | ||
520 | cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, | ||
521 | cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, | ||
522 | cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, | ||
523 | cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, | ||
524 | cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, | ||
525 | cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, | ||
526 | cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, | ||
527 | cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, | ||
528 | cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, | ||
529 | cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, | ||
530 | cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, | ||
531 | cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, | ||
532 | cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, | ||
533 | cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, | ||
534 | cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, | ||
535 | cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, | ||
536 | cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, | ||
537 | cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, | ||
538 | cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, | ||
539 | cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, | ||
540 | cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, | ||
541 | cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, | ||
542 | cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, | ||
543 | cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, | ||
544 | cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, | ||
545 | cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, | ||
546 | cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, | ||
547 | cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, | ||
548 | cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, | ||
549 | cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, | ||
550 | cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, | ||
551 | cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, | ||
552 | cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, | ||
553 | cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, | ||
554 | cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, | ||
555 | cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, | ||
556 | cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, | ||
557 | cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, | ||
558 | cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, | ||
559 | cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, | ||
560 | cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, | ||
561 | cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, | ||
562 | cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, | ||
563 | cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, | ||
564 | cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, | ||
565 | cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, | ||
566 | cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, | ||
567 | cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, | ||
568 | cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, | ||
569 | cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, | ||
570 | cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, | ||
571 | cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, | ||
572 | cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, | ||
573 | cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, | ||
574 | cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, | ||
575 | cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, | ||
576 | cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, | ||
577 | cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, | ||
578 | cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, | ||
579 | cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, | ||
580 | cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, | ||
581 | cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, | ||
582 | cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, | ||
583 | cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, | ||
584 | cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, | ||
585 | cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, | ||
586 | cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, | ||
587 | cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, | ||
588 | cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, | ||
589 | cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, | ||
590 | cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, | ||
591 | cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, | ||
592 | cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, | ||
593 | cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, | ||
594 | cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, | ||
595 | cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, | ||
596 | cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, | ||
597 | cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, | ||
598 | cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, | ||
599 | cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, | ||
600 | cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, | ||
601 | cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, | ||
602 | cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, | ||
603 | cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, | ||
604 | cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, | ||
605 | cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, | ||
606 | cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, | ||
607 | cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, | ||
608 | cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, | ||
609 | cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, | ||
610 | cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, | ||
611 | cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, | ||
612 | cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, | ||
613 | cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, | ||
614 | cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, | ||
615 | cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, | ||
616 | cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, | ||
617 | cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, | ||
618 | cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, | ||
619 | cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, | ||
620 | cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, | ||
621 | cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, | ||
622 | cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, | ||
623 | cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, | ||
624 | cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, | ||
625 | cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, | ||
626 | cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, | ||
627 | cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, | ||
628 | cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, | ||
629 | cipher_TLS_RSA_WITH_AES_128_CCM, | ||
630 | cipher_TLS_RSA_WITH_AES_256_CCM, | ||
631 | cipher_TLS_RSA_WITH_AES_128_CCM_8, | ||
632 | cipher_TLS_RSA_WITH_AES_256_CCM_8, | ||
633 | cipher_TLS_PSK_WITH_AES_128_CCM, | ||
634 | cipher_TLS_PSK_WITH_AES_256_CCM, | ||
635 | cipher_TLS_PSK_WITH_AES_128_CCM_8, | ||
636 | cipher_TLS_PSK_WITH_AES_256_CCM_8: | ||
637 | return true | ||
638 | default: | ||
639 | return false | ||
640 | } | ||
641 | } | ||
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go new file mode 100644 index 0000000..bdf5652 --- /dev/null +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go | |||
@@ -0,0 +1,256 @@ | |||
1 | // Copyright 2015 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // Transport code's client connection pooling. | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import ( | ||
10 | "crypto/tls" | ||
11 | "net/http" | ||
12 | "sync" | ||
13 | ) | ||
14 | |||
15 | // ClientConnPool manages a pool of HTTP/2 client connections. | ||
16 | type ClientConnPool interface { | ||
17 | GetClientConn(req *http.Request, addr string) (*ClientConn, error) | ||
18 | MarkDead(*ClientConn) | ||
19 | } | ||
20 | |||
21 | // clientConnPoolIdleCloser is the interface implemented by ClientConnPool | ||
22 | // implementations which can close their idle connections. | ||
23 | type clientConnPoolIdleCloser interface { | ||
24 | ClientConnPool | ||
25 | closeIdleConnections() | ||
26 | } | ||
27 | |||
28 | var ( | ||
29 | _ clientConnPoolIdleCloser = (*clientConnPool)(nil) | ||
30 | _ clientConnPoolIdleCloser = noDialClientConnPool{} | ||
31 | ) | ||
32 | |||
33 | // TODO: use singleflight for dialing and addConnCalls? | ||
34 | type clientConnPool struct { | ||
35 | t *Transport | ||
36 | |||
37 | mu sync.Mutex // TODO: maybe switch to RWMutex | ||
38 | // TODO: add support for sharing conns based on cert names | ||
39 | // (e.g. share conn for googleapis.com and appspot.com) | ||
40 | conns map[string][]*ClientConn // key is host:port | ||
41 | dialing map[string]*dialCall // currently in-flight dials | ||
42 | keys map[*ClientConn][]string | ||
43 | addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls | ||
44 | } | ||
45 | |||
46 | func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { | ||
47 | return p.getClientConn(req, addr, dialOnMiss) | ||
48 | } | ||
49 | |||
50 | const ( | ||
51 | dialOnMiss = true | ||
52 | noDialOnMiss = false | ||
53 | ) | ||
54 | |||
55 | func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { | ||
56 | if isConnectionCloseRequest(req) && dialOnMiss { | ||
57 | // It gets its own connection. | ||
58 | const singleUse = true | ||
59 | cc, err := p.t.dialClientConn(addr, singleUse) | ||
60 | if err != nil { | ||
61 | return nil, err | ||
62 | } | ||
63 | return cc, nil | ||
64 | } | ||
65 | p.mu.Lock() | ||
66 | for _, cc := range p.conns[addr] { | ||
67 | if cc.CanTakeNewRequest() { | ||
68 | p.mu.Unlock() | ||
69 | return cc, nil | ||
70 | } | ||
71 | } | ||
72 | if !dialOnMiss { | ||
73 | p.mu.Unlock() | ||
74 | return nil, ErrNoCachedConn | ||
75 | } | ||
76 | call := p.getStartDialLocked(addr) | ||
77 | p.mu.Unlock() | ||
78 | <-call.done | ||
79 | return call.res, call.err | ||
80 | } | ||
81 | |||
82 | // dialCall is an in-flight Transport dial call to a host. | ||
83 | type dialCall struct { | ||
84 | p *clientConnPool | ||
85 | done chan struct{} // closed when done | ||
86 | res *ClientConn // valid after done is closed | ||
87 | err error // valid after done is closed | ||
88 | } | ||
89 | |||
90 | // requires p.mu is held. | ||
91 | func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { | ||
92 | if call, ok := p.dialing[addr]; ok { | ||
93 | // A dial is already in-flight. Don't start another. | ||
94 | return call | ||
95 | } | ||
96 | call := &dialCall{p: p, done: make(chan struct{})} | ||
97 | if p.dialing == nil { | ||
98 | p.dialing = make(map[string]*dialCall) | ||
99 | } | ||
100 | p.dialing[addr] = call | ||
101 | go call.dial(addr) | ||
102 | return call | ||
103 | } | ||
104 | |||
105 | // run in its own goroutine. | ||
106 | func (c *dialCall) dial(addr string) { | ||
107 | const singleUse = false // shared conn | ||
108 | c.res, c.err = c.p.t.dialClientConn(addr, singleUse) | ||
109 | close(c.done) | ||
110 | |||
111 | c.p.mu.Lock() | ||
112 | delete(c.p.dialing, addr) | ||
113 | if c.err == nil { | ||
114 | c.p.addConnLocked(addr, c.res) | ||
115 | } | ||
116 | c.p.mu.Unlock() | ||
117 | } | ||
118 | |||
119 | // addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't | ||
120 | // already exist. It coalesces concurrent calls with the same key. | ||
121 | // This is used by the http1 Transport code when it creates a new connection. Because | ||
122 | // the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know | ||
123 | // the protocol), it can get into a situation where it has multiple TLS connections. | ||
124 | // This code decides which ones live or die. | ||
125 | // The return value used is whether c was used. | ||
126 | // c is never closed. | ||
127 | func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { | ||
128 | p.mu.Lock() | ||
129 | for _, cc := range p.conns[key] { | ||
130 | if cc.CanTakeNewRequest() { | ||
131 | p.mu.Unlock() | ||
132 | return false, nil | ||
133 | } | ||
134 | } | ||
135 | call, dup := p.addConnCalls[key] | ||
136 | if !dup { | ||
137 | if p.addConnCalls == nil { | ||
138 | p.addConnCalls = make(map[string]*addConnCall) | ||
139 | } | ||
140 | call = &addConnCall{ | ||
141 | p: p, | ||
142 | done: make(chan struct{}), | ||
143 | } | ||
144 | p.addConnCalls[key] = call | ||
145 | go call.run(t, key, c) | ||
146 | } | ||
147 | p.mu.Unlock() | ||
148 | |||
149 | <-call.done | ||
150 | if call.err != nil { | ||
151 | return false, call.err | ||
152 | } | ||
153 | return !dup, nil | ||
154 | } | ||
155 | |||
156 | type addConnCall struct { | ||
157 | p *clientConnPool | ||
158 | done chan struct{} // closed when done | ||
159 | err error | ||
160 | } | ||
161 | |||
162 | func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { | ||
163 | cc, err := t.NewClientConn(tc) | ||
164 | |||
165 | p := c.p | ||
166 | p.mu.Lock() | ||
167 | if err != nil { | ||
168 | c.err = err | ||
169 | } else { | ||
170 | p.addConnLocked(key, cc) | ||
171 | } | ||
172 | delete(p.addConnCalls, key) | ||
173 | p.mu.Unlock() | ||
174 | close(c.done) | ||
175 | } | ||
176 | |||
177 | func (p *clientConnPool) addConn(key string, cc *ClientConn) { | ||
178 | p.mu.Lock() | ||
179 | p.addConnLocked(key, cc) | ||
180 | p.mu.Unlock() | ||
181 | } | ||
182 | |||
183 | // p.mu must be held | ||
184 | func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { | ||
185 | for _, v := range p.conns[key] { | ||
186 | if v == cc { | ||
187 | return | ||
188 | } | ||
189 | } | ||
190 | if p.conns == nil { | ||
191 | p.conns = make(map[string][]*ClientConn) | ||
192 | } | ||
193 | if p.keys == nil { | ||
194 | p.keys = make(map[*ClientConn][]string) | ||
195 | } | ||
196 | p.conns[key] = append(p.conns[key], cc) | ||
197 | p.keys[cc] = append(p.keys[cc], key) | ||
198 | } | ||
199 | |||
200 | func (p *clientConnPool) MarkDead(cc *ClientConn) { | ||
201 | p.mu.Lock() | ||
202 | defer p.mu.Unlock() | ||
203 | for _, key := range p.keys[cc] { | ||
204 | vv, ok := p.conns[key] | ||
205 | if !ok { | ||
206 | continue | ||
207 | } | ||
208 | newList := filterOutClientConn(vv, cc) | ||
209 | if len(newList) > 0 { | ||
210 | p.conns[key] = newList | ||
211 | } else { | ||
212 | delete(p.conns, key) | ||
213 | } | ||
214 | } | ||
215 | delete(p.keys, cc) | ||
216 | } | ||
217 | |||
218 | func (p *clientConnPool) closeIdleConnections() { | ||
219 | p.mu.Lock() | ||
220 | defer p.mu.Unlock() | ||
221 | // TODO: don't close a cc if it was just added to the pool | ||
222 | // milliseconds ago and has never been used. There's currently | ||
223 | // a small race window with the HTTP/1 Transport's integration | ||
224 | // where it can add an idle conn just before using it, and | ||
225 | // somebody else can concurrently call CloseIdleConns and | ||
226 | // break some caller's RoundTrip. | ||
227 | for _, vv := range p.conns { | ||
228 | for _, cc := range vv { | ||
229 | cc.closeIfIdle() | ||
230 | } | ||
231 | } | ||
232 | } | ||
233 | |||
234 | func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { | ||
235 | out := in[:0] | ||
236 | for _, v := range in { | ||
237 | if v != exclude { | ||
238 | out = append(out, v) | ||
239 | } | ||
240 | } | ||
241 | // If we filtered it out, zero out the last item to prevent | ||
242 | // the GC from seeing it. | ||
243 | if len(in) != len(out) { | ||
244 | in[len(in)-1] = nil | ||
245 | } | ||
246 | return out | ||
247 | } | ||
248 | |||
249 | // noDialClientConnPool is an implementation of http2.ClientConnPool | ||
250 | // which never dials. We let the HTTP/1.1 client dial and use its TLS | ||
251 | // connection instead. | ||
252 | type noDialClientConnPool struct{ *clientConnPool } | ||
253 | |||
254 | func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { | ||
255 | return p.getClientConn(req, addr, noDialOnMiss) | ||
256 | } | ||
diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go new file mode 100644 index 0000000..b65fc6d --- /dev/null +++ b/vendor/golang.org/x/net/http2/configure_transport.go | |||
@@ -0,0 +1,80 @@ | |||
1 | // Copyright 2015 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // +build go1.6 | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import ( | ||
10 | "crypto/tls" | ||
11 | "fmt" | ||
12 | "net/http" | ||
13 | ) | ||
14 | |||
15 | func configureTransport(t1 *http.Transport) (*Transport, error) { | ||
16 | connPool := new(clientConnPool) | ||
17 | t2 := &Transport{ | ||
18 | ConnPool: noDialClientConnPool{connPool}, | ||
19 | t1: t1, | ||
20 | } | ||
21 | connPool.t = t2 | ||
22 | if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { | ||
23 | return nil, err | ||
24 | } | ||
25 | if t1.TLSClientConfig == nil { | ||
26 | t1.TLSClientConfig = new(tls.Config) | ||
27 | } | ||
28 | if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { | ||
29 | t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) | ||
30 | } | ||
31 | if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { | ||
32 | t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") | ||
33 | } | ||
34 | upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { | ||
35 | addr := authorityAddr("https", authority) | ||
36 | if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { | ||
37 | go c.Close() | ||
38 | return erringRoundTripper{err} | ||
39 | } else if !used { | ||
40 | // Turns out we don't need this c. | ||
41 | // For example, two goroutines made requests to the same host | ||
42 | // at the same time, both kicking off TCP dials. (since protocol | ||
43 | // was unknown) | ||
44 | go c.Close() | ||
45 | } | ||
46 | return t2 | ||
47 | } | ||
48 | if m := t1.TLSNextProto; len(m) == 0 { | ||
49 | t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ | ||
50 | "h2": upgradeFn, | ||
51 | } | ||
52 | } else { | ||
53 | m["h2"] = upgradeFn | ||
54 | } | ||
55 | return t2, nil | ||
56 | } | ||
57 | |||
58 | // registerHTTPSProtocol calls Transport.RegisterProtocol but | ||
59 | // converting panics into errors. | ||
60 | func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { | ||
61 | defer func() { | ||
62 | if e := recover(); e != nil { | ||
63 | err = fmt.Errorf("%v", e) | ||
64 | } | ||
65 | }() | ||
66 | t.RegisterProtocol("https", rt) | ||
67 | return nil | ||
68 | } | ||
69 | |||
70 | // noDialH2RoundTripper is a RoundTripper which only tries to complete the request | ||
71 | // if there's already has a cached connection to the host. | ||
72 | type noDialH2RoundTripper struct{ t *Transport } | ||
73 | |||
74 | func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { | ||
75 | res, err := rt.t.RoundTrip(req) | ||
76 | if err == ErrNoCachedConn { | ||
77 | return nil, http.ErrSkipAltProtocol | ||
78 | } | ||
79 | return res, err | ||
80 | } | ||
diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 0000000..a3067f8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/databuffer.go | |||
@@ -0,0 +1,146 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package http2 | ||
6 | |||
7 | import ( | ||
8 | "errors" | ||
9 | "fmt" | ||
10 | "sync" | ||
11 | ) | ||
12 | |||
13 | // Buffer chunks are allocated from a pool to reduce pressure on GC. | ||
14 | // The maximum wasted space per dataBuffer is 2x the largest size class, | ||
15 | // which happens when the dataBuffer has multiple chunks and there is | ||
16 | // one unread byte in both the first and last chunks. We use a few size | ||
17 | // classes to minimize overheads for servers that typically receive very | ||
18 | // small request bodies. | ||
19 | // | ||
20 | // TODO: Benchmark to determine if the pools are necessary. The GC may have | ||
21 | // improved enough that we can instead allocate chunks like this: | ||
22 | // make([]byte, max(16<<10, expectedBytesRemaining)) | ||
23 | var ( | ||
24 | dataChunkSizeClasses = []int{ | ||
25 | 1 << 10, | ||
26 | 2 << 10, | ||
27 | 4 << 10, | ||
28 | 8 << 10, | ||
29 | 16 << 10, | ||
30 | } | ||
31 | dataChunkPools = [...]sync.Pool{ | ||
32 | {New: func() interface{} { return make([]byte, 1<<10) }}, | ||
33 | {New: func() interface{} { return make([]byte, 2<<10) }}, | ||
34 | {New: func() interface{} { return make([]byte, 4<<10) }}, | ||
35 | {New: func() interface{} { return make([]byte, 8<<10) }}, | ||
36 | {New: func() interface{} { return make([]byte, 16<<10) }}, | ||
37 | } | ||
38 | ) | ||
39 | |||
40 | func getDataBufferChunk(size int64) []byte { | ||
41 | i := 0 | ||
42 | for ; i < len(dataChunkSizeClasses)-1; i++ { | ||
43 | if size <= int64(dataChunkSizeClasses[i]) { | ||
44 | break | ||
45 | } | ||
46 | } | ||
47 | return dataChunkPools[i].Get().([]byte) | ||
48 | } | ||
49 | |||
50 | func putDataBufferChunk(p []byte) { | ||
51 | for i, n := range dataChunkSizeClasses { | ||
52 | if len(p) == n { | ||
53 | dataChunkPools[i].Put(p) | ||
54 | return | ||
55 | } | ||
56 | } | ||
57 | panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) | ||
58 | } | ||
59 | |||
60 | // dataBuffer is an io.ReadWriter backed by a list of data chunks. | ||
61 | // Each dataBuffer is used to read DATA frames on a single stream. | ||
62 | // The buffer is divided into chunks so the server can limit the | ||
63 | // total memory used by a single connection without limiting the | ||
64 | // request body size on any single stream. | ||
65 | type dataBuffer struct { | ||
66 | chunks [][]byte | ||
67 | r int // next byte to read is chunks[0][r] | ||
68 | w int // next byte to write is chunks[len(chunks)-1][w] | ||
69 | size int // total buffered bytes | ||
70 | expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) | ||
71 | } | ||
72 | |||
73 | var errReadEmpty = errors.New("read from empty dataBuffer") | ||
74 | |||
75 | // Read copies bytes from the buffer into p. | ||
76 | // It is an error to read when no data is available. | ||
77 | func (b *dataBuffer) Read(p []byte) (int, error) { | ||
78 | if b.size == 0 { | ||
79 | return 0, errReadEmpty | ||
80 | } | ||
81 | var ntotal int | ||
82 | for len(p) > 0 && b.size > 0 { | ||
83 | readFrom := b.bytesFromFirstChunk() | ||
84 | n := copy(p, readFrom) | ||
85 | p = p[n:] | ||
86 | ntotal += n | ||
87 | b.r += n | ||
88 | b.size -= n | ||
89 | // If the first chunk has been consumed, advance to the next chunk. | ||
90 | if b.r == len(b.chunks[0]) { | ||
91 | putDataBufferChunk(b.chunks[0]) | ||
92 | end := len(b.chunks) - 1 | ||
93 | copy(b.chunks[:end], b.chunks[1:]) | ||
94 | b.chunks[end] = nil | ||
95 | b.chunks = b.chunks[:end] | ||
96 | b.r = 0 | ||
97 | } | ||
98 | } | ||
99 | return ntotal, nil | ||
100 | } | ||
101 | |||
102 | func (b *dataBuffer) bytesFromFirstChunk() []byte { | ||
103 | if len(b.chunks) == 1 { | ||
104 | return b.chunks[0][b.r:b.w] | ||
105 | } | ||
106 | return b.chunks[0][b.r:] | ||
107 | } | ||
108 | |||
109 | // Len returns the number of bytes of the unread portion of the buffer. | ||
110 | func (b *dataBuffer) Len() int { | ||
111 | return b.size | ||
112 | } | ||
113 | |||
114 | // Write appends p to the buffer. | ||
115 | func (b *dataBuffer) Write(p []byte) (int, error) { | ||
116 | ntotal := len(p) | ||
117 | for len(p) > 0 { | ||
118 | // If the last chunk is empty, allocate a new chunk. Try to allocate | ||
119 | // enough to fully copy p plus any additional bytes we expect to | ||
120 | // receive. However, this may allocate less than len(p). | ||
121 | want := int64(len(p)) | ||
122 | if b.expected > want { | ||
123 | want = b.expected | ||
124 | } | ||
125 | chunk := b.lastChunkOrAlloc(want) | ||
126 | n := copy(chunk[b.w:], p) | ||
127 | p = p[n:] | ||
128 | b.w += n | ||
129 | b.size += n | ||
130 | b.expected -= int64(n) | ||
131 | } | ||
132 | return ntotal, nil | ||
133 | } | ||
134 | |||
135 | func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { | ||
136 | if len(b.chunks) != 0 { | ||
137 | last := b.chunks[len(b.chunks)-1] | ||
138 | if b.w < len(last) { | ||
139 | return last | ||
140 | } | ||
141 | } | ||
142 | chunk := getDataBufferChunk(want) | ||
143 | b.chunks = append(b.chunks, chunk) | ||
144 | b.w = 0 | ||
145 | return chunk | ||
146 | } | ||
diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go new file mode 100644 index 0000000..71f2c46 --- /dev/null +++ b/vendor/golang.org/x/net/http2/errors.go | |||
@@ -0,0 +1,133 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package http2 | ||
6 | |||
7 | import ( | ||
8 | "errors" | ||
9 | "fmt" | ||
10 | ) | ||
11 | |||
12 | // An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. | ||
13 | type ErrCode uint32 | ||
14 | |||
15 | const ( | ||
16 | ErrCodeNo ErrCode = 0x0 | ||
17 | ErrCodeProtocol ErrCode = 0x1 | ||
18 | ErrCodeInternal ErrCode = 0x2 | ||
19 | ErrCodeFlowControl ErrCode = 0x3 | ||
20 | ErrCodeSettingsTimeout ErrCode = 0x4 | ||
21 | ErrCodeStreamClosed ErrCode = 0x5 | ||
22 | ErrCodeFrameSize ErrCode = 0x6 | ||
23 | ErrCodeRefusedStream ErrCode = 0x7 | ||
24 | ErrCodeCancel ErrCode = 0x8 | ||
25 | ErrCodeCompression ErrCode = 0x9 | ||
26 | ErrCodeConnect ErrCode = 0xa | ||
27 | ErrCodeEnhanceYourCalm ErrCode = 0xb | ||
28 | ErrCodeInadequateSecurity ErrCode = 0xc | ||
29 | ErrCodeHTTP11Required ErrCode = 0xd | ||
30 | ) | ||
31 | |||
32 | var errCodeName = map[ErrCode]string{ | ||
33 | ErrCodeNo: "NO_ERROR", | ||
34 | ErrCodeProtocol: "PROTOCOL_ERROR", | ||
35 | ErrCodeInternal: "INTERNAL_ERROR", | ||
36 | ErrCodeFlowControl: "FLOW_CONTROL_ERROR", | ||
37 | ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", | ||
38 | ErrCodeStreamClosed: "STREAM_CLOSED", | ||
39 | ErrCodeFrameSize: "FRAME_SIZE_ERROR", | ||
40 | ErrCodeRefusedStream: "REFUSED_STREAM", | ||
41 | ErrCodeCancel: "CANCEL", | ||
42 | ErrCodeCompression: "COMPRESSION_ERROR", | ||
43 | ErrCodeConnect: "CONNECT_ERROR", | ||
44 | ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", | ||
45 | ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", | ||
46 | ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", | ||
47 | } | ||
48 | |||
49 | func (e ErrCode) String() string { | ||
50 | if s, ok := errCodeName[e]; ok { | ||
51 | return s | ||
52 | } | ||
53 | return fmt.Sprintf("unknown error code 0x%x", uint32(e)) | ||
54 | } | ||
55 | |||
56 | // ConnectionError is an error that results in the termination of the | ||
57 | // entire connection. | ||
58 | type ConnectionError ErrCode | ||
59 | |||
60 | func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } | ||
61 | |||
62 | // StreamError is an error that only affects one stream within an | ||
63 | // HTTP/2 connection. | ||
64 | type StreamError struct { | ||
65 | StreamID uint32 | ||
66 | Code ErrCode | ||
67 | Cause error // optional additional detail | ||
68 | } | ||
69 | |||
70 | func streamError(id uint32, code ErrCode) StreamError { | ||
71 | return StreamError{StreamID: id, Code: code} | ||
72 | } | ||
73 | |||
74 | func (e StreamError) Error() string { | ||
75 | if e.Cause != nil { | ||
76 | return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) | ||
77 | } | ||
78 | return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) | ||
79 | } | ||
80 | |||
81 | // 6.9.1 The Flow Control Window | ||
82 | // "If a sender receives a WINDOW_UPDATE that causes a flow control | ||
83 | // window to exceed this maximum it MUST terminate either the stream | ||
84 | // or the connection, as appropriate. For streams, [...]; for the | ||
85 | // connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." | ||
86 | type goAwayFlowError struct{} | ||
87 | |||
88 | func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } | ||
89 | |||
90 | // connError represents an HTTP/2 ConnectionError error code, along | ||
91 | // with a string (for debugging) explaining why. | ||
92 | // | ||
93 | // Errors of this type are only returned by the frame parser functions | ||
94 | // and converted into ConnectionError(Code), after stashing away | ||
95 | // the Reason into the Framer's errDetail field, accessible via | ||
96 | // the (*Framer).ErrorDetail method. | ||
97 | type connError struct { | ||
98 | Code ErrCode // the ConnectionError error code | ||
99 | Reason string // additional reason | ||
100 | } | ||
101 | |||
102 | func (e connError) Error() string { | ||
103 | return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) | ||
104 | } | ||
105 | |||
106 | type pseudoHeaderError string | ||
107 | |||
108 | func (e pseudoHeaderError) Error() string { | ||
109 | return fmt.Sprintf("invalid pseudo-header %q", string(e)) | ||
110 | } | ||
111 | |||
112 | type duplicatePseudoHeaderError string | ||
113 | |||
114 | func (e duplicatePseudoHeaderError) Error() string { | ||
115 | return fmt.Sprintf("duplicate pseudo-header %q", string(e)) | ||
116 | } | ||
117 | |||
118 | type headerFieldNameError string | ||
119 | |||
120 | func (e headerFieldNameError) Error() string { | ||
121 | return fmt.Sprintf("invalid header field name %q", string(e)) | ||
122 | } | ||
123 | |||
124 | type headerFieldValueError string | ||
125 | |||
126 | func (e headerFieldValueError) Error() string { | ||
127 | return fmt.Sprintf("invalid header field value %q", string(e)) | ||
128 | } | ||
129 | |||
130 | var ( | ||
131 | errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") | ||
132 | errPseudoAfterRegular = errors.New("pseudo header field after regular") | ||
133 | ) | ||
diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go new file mode 100644 index 0000000..957de25 --- /dev/null +++ b/vendor/golang.org/x/net/http2/flow.go | |||
@@ -0,0 +1,50 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // Flow control | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | // flow is the flow control window's size. | ||
10 | type flow struct { | ||
11 | // n is the number of DATA bytes we're allowed to send. | ||
12 | // A flow is kept both on a conn and a per-stream. | ||
13 | n int32 | ||
14 | |||
15 | // conn points to the shared connection-level flow that is | ||
16 | // shared by all streams on that conn. It is nil for the flow | ||
17 | // that's on the conn directly. | ||
18 | conn *flow | ||
19 | } | ||
20 | |||
21 | func (f *flow) setConnFlow(cf *flow) { f.conn = cf } | ||
22 | |||
23 | func (f *flow) available() int32 { | ||
24 | n := f.n | ||
25 | if f.conn != nil && f.conn.n < n { | ||
26 | n = f.conn.n | ||
27 | } | ||
28 | return n | ||
29 | } | ||
30 | |||
31 | func (f *flow) take(n int32) { | ||
32 | if n > f.available() { | ||
33 | panic("internal error: took too much") | ||
34 | } | ||
35 | f.n -= n | ||
36 | if f.conn != nil { | ||
37 | f.conn.n -= n | ||
38 | } | ||
39 | } | ||
40 | |||
41 | // add adds n bytes (positive or negative) to the flow control window. | ||
42 | // It returns false if the sum would exceed 2^31-1. | ||
43 | func (f *flow) add(n int32) bool { | ||
44 | remain := (1<<31 - 1) - f.n | ||
45 | if n > remain { | ||
46 | return false | ||
47 | } | ||
48 | f.n += n | ||
49 | return true | ||
50 | } | ||
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go new file mode 100644 index 0000000..3b14890 --- /dev/null +++ b/vendor/golang.org/x/net/http2/frame.go | |||
@@ -0,0 +1,1579 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package http2 | ||
6 | |||
7 | import ( | ||
8 | "bytes" | ||
9 | "encoding/binary" | ||
10 | "errors" | ||
11 | "fmt" | ||
12 | "io" | ||
13 | "log" | ||
14 | "strings" | ||
15 | "sync" | ||
16 | |||
17 | "golang.org/x/net/http2/hpack" | ||
18 | "golang.org/x/net/lex/httplex" | ||
19 | ) | ||
20 | |||
21 | const frameHeaderLen = 9 | ||
22 | |||
23 | var padZeros = make([]byte, 255) // zeros for padding | ||
24 | |||
25 | // A FrameType is a registered frame type as defined in | ||
26 | // http://http2.github.io/http2-spec/#rfc.section.11.2 | ||
27 | type FrameType uint8 | ||
28 | |||
29 | const ( | ||
30 | FrameData FrameType = 0x0 | ||
31 | FrameHeaders FrameType = 0x1 | ||
32 | FramePriority FrameType = 0x2 | ||
33 | FrameRSTStream FrameType = 0x3 | ||
34 | FrameSettings FrameType = 0x4 | ||
35 | FramePushPromise FrameType = 0x5 | ||
36 | FramePing FrameType = 0x6 | ||
37 | FrameGoAway FrameType = 0x7 | ||
38 | FrameWindowUpdate FrameType = 0x8 | ||
39 | FrameContinuation FrameType = 0x9 | ||
40 | ) | ||
41 | |||
42 | var frameName = map[FrameType]string{ | ||
43 | FrameData: "DATA", | ||
44 | FrameHeaders: "HEADERS", | ||
45 | FramePriority: "PRIORITY", | ||
46 | FrameRSTStream: "RST_STREAM", | ||
47 | FrameSettings: "SETTINGS", | ||
48 | FramePushPromise: "PUSH_PROMISE", | ||
49 | FramePing: "PING", | ||
50 | FrameGoAway: "GOAWAY", | ||
51 | FrameWindowUpdate: "WINDOW_UPDATE", | ||
52 | FrameContinuation: "CONTINUATION", | ||
53 | } | ||
54 | |||
55 | func (t FrameType) String() string { | ||
56 | if s, ok := frameName[t]; ok { | ||
57 | return s | ||
58 | } | ||
59 | return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) | ||
60 | } | ||
61 | |||
62 | // Flags is a bitmask of HTTP/2 flags. | ||
63 | // The meaning of flags varies depending on the frame type. | ||
64 | type Flags uint8 | ||
65 | |||
66 | // Has reports whether f contains all (0 or more) flags in v. | ||
67 | func (f Flags) Has(v Flags) bool { | ||
68 | return (f & v) == v | ||
69 | } | ||
70 | |||
71 | // Frame-specific FrameHeader flag bits. | ||
72 | const ( | ||
73 | // Data Frame | ||
74 | FlagDataEndStream Flags = 0x1 | ||
75 | FlagDataPadded Flags = 0x8 | ||
76 | |||
77 | // Headers Frame | ||
78 | FlagHeadersEndStream Flags = 0x1 | ||
79 | FlagHeadersEndHeaders Flags = 0x4 | ||
80 | FlagHeadersPadded Flags = 0x8 | ||
81 | FlagHeadersPriority Flags = 0x20 | ||
82 | |||
83 | // Settings Frame | ||
84 | FlagSettingsAck Flags = 0x1 | ||
85 | |||
86 | // Ping Frame | ||
87 | FlagPingAck Flags = 0x1 | ||
88 | |||
89 | // Continuation Frame | ||
90 | FlagContinuationEndHeaders Flags = 0x4 | ||
91 | |||
92 | FlagPushPromiseEndHeaders Flags = 0x4 | ||
93 | FlagPushPromisePadded Flags = 0x8 | ||
94 | ) | ||
95 | |||
96 | var flagName = map[FrameType]map[Flags]string{ | ||
97 | FrameData: { | ||
98 | FlagDataEndStream: "END_STREAM", | ||
99 | FlagDataPadded: "PADDED", | ||
100 | }, | ||
101 | FrameHeaders: { | ||
102 | FlagHeadersEndStream: "END_STREAM", | ||
103 | FlagHeadersEndHeaders: "END_HEADERS", | ||
104 | FlagHeadersPadded: "PADDED", | ||
105 | FlagHeadersPriority: "PRIORITY", | ||
106 | }, | ||
107 | FrameSettings: { | ||
108 | FlagSettingsAck: "ACK", | ||
109 | }, | ||
110 | FramePing: { | ||
111 | FlagPingAck: "ACK", | ||
112 | }, | ||
113 | FrameContinuation: { | ||
114 | FlagContinuationEndHeaders: "END_HEADERS", | ||
115 | }, | ||
116 | FramePushPromise: { | ||
117 | FlagPushPromiseEndHeaders: "END_HEADERS", | ||
118 | FlagPushPromisePadded: "PADDED", | ||
119 | }, | ||
120 | } | ||
121 | |||
122 | // a frameParser parses a frame given its FrameHeader and payload | ||
123 | // bytes. The length of payload will always equal fh.Length (which | ||
124 | // might be 0). | ||
125 | type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) | ||
126 | |||
127 | var frameParsers = map[FrameType]frameParser{ | ||
128 | FrameData: parseDataFrame, | ||
129 | FrameHeaders: parseHeadersFrame, | ||
130 | FramePriority: parsePriorityFrame, | ||
131 | FrameRSTStream: parseRSTStreamFrame, | ||
132 | FrameSettings: parseSettingsFrame, | ||
133 | FramePushPromise: parsePushPromise, | ||
134 | FramePing: parsePingFrame, | ||
135 | FrameGoAway: parseGoAwayFrame, | ||
136 | FrameWindowUpdate: parseWindowUpdateFrame, | ||
137 | FrameContinuation: parseContinuationFrame, | ||
138 | } | ||
139 | |||
140 | func typeFrameParser(t FrameType) frameParser { | ||
141 | if f := frameParsers[t]; f != nil { | ||
142 | return f | ||
143 | } | ||
144 | return parseUnknownFrame | ||
145 | } | ||
146 | |||
147 | // A FrameHeader is the 9 byte header of all HTTP/2 frames. | ||
148 | // | ||
149 | // See http://http2.github.io/http2-spec/#FrameHeader | ||
150 | type FrameHeader struct { | ||
151 | valid bool // caller can access []byte fields in the Frame | ||
152 | |||
153 | // Type is the 1 byte frame type. There are ten standard frame | ||
154 | // types, but extension frame types may be written by WriteRawFrame | ||
155 | // and will be returned by ReadFrame (as UnknownFrame). | ||
156 | Type FrameType | ||
157 | |||
158 | // Flags are the 1 byte of 8 potential bit flags per frame. | ||
159 | // They are specific to the frame type. | ||
160 | Flags Flags | ||
161 | |||
162 | // Length is the length of the frame, not including the 9 byte header. | ||
163 | // The maximum size is one byte less than 16MB (uint24), but only | ||
164 | // frames up to 16KB are allowed without peer agreement. | ||
165 | Length uint32 | ||
166 | |||
167 | // StreamID is which stream this frame is for. Certain frames | ||
168 | // are not stream-specific, in which case this field is 0. | ||
169 | StreamID uint32 | ||
170 | } | ||
171 | |||
172 | // Header returns h. It exists so FrameHeaders can be embedded in other | ||
173 | // specific frame types and implement the Frame interface. | ||
174 | func (h FrameHeader) Header() FrameHeader { return h } | ||
175 | |||
176 | func (h FrameHeader) String() string { | ||
177 | var buf bytes.Buffer | ||
178 | buf.WriteString("[FrameHeader ") | ||
179 | h.writeDebug(&buf) | ||
180 | buf.WriteByte(']') | ||
181 | return buf.String() | ||
182 | } | ||
183 | |||
184 | func (h FrameHeader) writeDebug(buf *bytes.Buffer) { | ||
185 | buf.WriteString(h.Type.String()) | ||
186 | if h.Flags != 0 { | ||
187 | buf.WriteString(" flags=") | ||
188 | set := 0 | ||
189 | for i := uint8(0); i < 8; i++ { | ||
190 | if h.Flags&(1<<i) == 0 { | ||
191 | continue | ||
192 | } | ||
193 | set++ | ||
194 | if set > 1 { | ||
195 | buf.WriteByte('|') | ||
196 | } | ||
197 | name := flagName[h.Type][Flags(1<<i)] | ||
198 | if name != "" { | ||
199 | buf.WriteString(name) | ||
200 | } else { | ||
201 | fmt.Fprintf(buf, "0x%x", 1<<i) | ||
202 | } | ||
203 | } | ||
204 | } | ||
205 | if h.StreamID != 0 { | ||
206 | fmt.Fprintf(buf, " stream=%d", h.StreamID) | ||
207 | } | ||
208 | fmt.Fprintf(buf, " len=%d", h.Length) | ||
209 | } | ||
210 | |||
211 | func (h *FrameHeader) checkValid() { | ||
212 | if !h.valid { | ||
213 | panic("Frame accessor called on non-owned Frame") | ||
214 | } | ||
215 | } | ||
216 | |||
217 | func (h *FrameHeader) invalidate() { h.valid = false } | ||
218 | |||
219 | // frame header bytes. | ||
220 | // Used only by ReadFrameHeader. | ||
221 | var fhBytes = sync.Pool{ | ||
222 | New: func() interface{} { | ||
223 | buf := make([]byte, frameHeaderLen) | ||
224 | return &buf | ||
225 | }, | ||
226 | } | ||
227 | |||
228 | // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. | ||
229 | // Most users should use Framer.ReadFrame instead. | ||
230 | func ReadFrameHeader(r io.Reader) (FrameHeader, error) { | ||
231 | bufp := fhBytes.Get().(*[]byte) | ||
232 | defer fhBytes.Put(bufp) | ||
233 | return readFrameHeader(*bufp, r) | ||
234 | } | ||
235 | |||
236 | func readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) { | ||
237 | _, err := io.ReadFull(r, buf[:frameHeaderLen]) | ||
238 | if err != nil { | ||
239 | return FrameHeader{}, err | ||
240 | } | ||
241 | return FrameHeader{ | ||
242 | Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])), | ||
243 | Type: FrameType(buf[3]), | ||
244 | Flags: Flags(buf[4]), | ||
245 | StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1), | ||
246 | valid: true, | ||
247 | }, nil | ||
248 | } | ||
249 | |||
250 | // A Frame is the base interface implemented by all frame types. | ||
251 | // Callers will generally type-assert the specific frame type: | ||
252 | // *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc. | ||
253 | // | ||
254 | // Frames are only valid until the next call to Framer.ReadFrame. | ||
255 | type Frame interface { | ||
256 | Header() FrameHeader | ||
257 | |||
258 | // invalidate is called by Framer.ReadFrame to make this | ||
259 | // frame's buffers as being invalid, since the subsequent | ||
260 | // frame will reuse them. | ||
261 | invalidate() | ||
262 | } | ||
263 | |||
264 | // A Framer reads and writes Frames. | ||
265 | type Framer struct { | ||
266 | r io.Reader | ||
267 | lastFrame Frame | ||
268 | errDetail error | ||
269 | |||
270 | // lastHeaderStream is non-zero if the last frame was an | ||
271 | // unfinished HEADERS/CONTINUATION. | ||
272 | lastHeaderStream uint32 | ||
273 | |||
274 | maxReadSize uint32 | ||
275 | headerBuf [frameHeaderLen]byte | ||
276 | |||
277 | // TODO: let getReadBuf be configurable, and use a less memory-pinning | ||
278 | // allocator in server.go to minimize memory pinned for many idle conns. | ||
279 | // Will probably also need to make frame invalidation have a hook too. | ||
280 | getReadBuf func(size uint32) []byte | ||
281 | readBuf []byte // cache for default getReadBuf | ||
282 | |||
283 | maxWriteSize uint32 // zero means unlimited; TODO: implement | ||
284 | |||
285 | w io.Writer | ||
286 | wbuf []byte | ||
287 | |||
288 | // AllowIllegalWrites permits the Framer's Write methods to | ||
289 | // write frames that do not conform to the HTTP/2 spec. This | ||
290 | // permits using the Framer to test other HTTP/2 | ||
291 | // implementations' conformance to the spec. | ||
292 | // If false, the Write methods will prefer to return an error | ||
293 | // rather than comply. | ||
294 | AllowIllegalWrites bool | ||
295 | |||
296 | // AllowIllegalReads permits the Framer's ReadFrame method | ||
297 | // to return non-compliant frames or frame orders. | ||
298 | // This is for testing and permits using the Framer to test | ||
299 | // other HTTP/2 implementations' conformance to the spec. | ||
300 | // It is not compatible with ReadMetaHeaders. | ||
301 | AllowIllegalReads bool | ||
302 | |||
303 | // ReadMetaHeaders if non-nil causes ReadFrame to merge | ||
304 | // HEADERS and CONTINUATION frames together and return | ||
305 | // MetaHeadersFrame instead. | ||
306 | ReadMetaHeaders *hpack.Decoder | ||
307 | |||
308 | // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE. | ||
309 | // It's used only if ReadMetaHeaders is set; 0 means a sane default | ||
310 | // (currently 16MB) | ||
311 | // If the limit is hit, MetaHeadersFrame.Truncated is set true. | ||
312 | MaxHeaderListSize uint32 | ||
313 | |||
314 | // TODO: track which type of frame & with which flags was sent | ||
315 | // last. Then return an error (unless AllowIllegalWrites) if | ||
316 | // we're in the middle of a header block and a | ||
317 | // non-Continuation or Continuation on a different stream is | ||
318 | // attempted to be written. | ||
319 | |||
320 | logReads, logWrites bool | ||
321 | |||
322 | debugFramer *Framer // only use for logging written writes | ||
323 | debugFramerBuf *bytes.Buffer | ||
324 | debugReadLoggerf func(string, ...interface{}) | ||
325 | debugWriteLoggerf func(string, ...interface{}) | ||
326 | |||
327 | frameCache *frameCache // nil if frames aren't reused (default) | ||
328 | } | ||
329 | |||
330 | func (fr *Framer) maxHeaderListSize() uint32 { | ||
331 | if fr.MaxHeaderListSize == 0 { | ||
332 | return 16 << 20 // sane default, per docs | ||
333 | } | ||
334 | return fr.MaxHeaderListSize | ||
335 | } | ||
336 | |||
337 | func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { | ||
338 | // Write the FrameHeader. | ||
339 | f.wbuf = append(f.wbuf[:0], | ||
340 | 0, // 3 bytes of length, filled in in endWrite | ||
341 | 0, | ||
342 | 0, | ||
343 | byte(ftype), | ||
344 | byte(flags), | ||
345 | byte(streamID>>24), | ||
346 | byte(streamID>>16), | ||
347 | byte(streamID>>8), | ||
348 | byte(streamID)) | ||
349 | } | ||
350 | |||
351 | func (f *Framer) endWrite() error { | ||
352 | // Now that we know the final size, fill in the FrameHeader in | ||
353 | // the space previously reserved for it. Abuse append. | ||
354 | length := len(f.wbuf) - frameHeaderLen | ||
355 | if length >= (1 << 24) { | ||
356 | return ErrFrameTooLarge | ||
357 | } | ||
358 | _ = append(f.wbuf[:0], | ||
359 | byte(length>>16), | ||
360 | byte(length>>8), | ||
361 | byte(length)) | ||
362 | if f.logWrites { | ||
363 | f.logWrite() | ||
364 | } | ||
365 | |||
366 | n, err := f.w.Write(f.wbuf) | ||
367 | if err == nil && n != len(f.wbuf) { | ||
368 | err = io.ErrShortWrite | ||
369 | } | ||
370 | return err | ||
371 | } | ||
372 | |||
373 | func (f *Framer) logWrite() { | ||
374 | if f.debugFramer == nil { | ||
375 | f.debugFramerBuf = new(bytes.Buffer) | ||
376 | f.debugFramer = NewFramer(nil, f.debugFramerBuf) | ||
377 | f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below | ||
378 | // Let us read anything, even if we accidentally wrote it | ||
379 | // in the wrong order: | ||
380 | f.debugFramer.AllowIllegalReads = true | ||
381 | } | ||
382 | f.debugFramerBuf.Write(f.wbuf) | ||
383 | fr, err := f.debugFramer.ReadFrame() | ||
384 | if err != nil { | ||
385 | f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) | ||
386 | return | ||
387 | } | ||
388 | f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) | ||
389 | } | ||
390 | |||
391 | func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } | ||
392 | func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } | ||
393 | func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } | ||
394 | func (f *Framer) writeUint32(v uint32) { | ||
395 | f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) | ||
396 | } | ||
397 | |||
398 | const ( | ||
399 | minMaxFrameSize = 1 << 14 | ||
400 | maxFrameSize = 1<<24 - 1 | ||
401 | ) | ||
402 | |||
403 | // SetReuseFrames allows the Framer to reuse Frames. | ||
404 | // If called on a Framer, Frames returned by calls to ReadFrame are only | ||
405 | // valid until the next call to ReadFrame. | ||
406 | func (fr *Framer) SetReuseFrames() { | ||
407 | if fr.frameCache != nil { | ||
408 | return | ||
409 | } | ||
410 | fr.frameCache = &frameCache{} | ||
411 | } | ||
412 | |||
413 | type frameCache struct { | ||
414 | dataFrame DataFrame | ||
415 | } | ||
416 | |||
417 | func (fc *frameCache) getDataFrame() *DataFrame { | ||
418 | if fc == nil { | ||
419 | return &DataFrame{} | ||
420 | } | ||
421 | return &fc.dataFrame | ||
422 | } | ||
423 | |||
424 | // NewFramer returns a Framer that writes frames to w and reads them from r. | ||
425 | func NewFramer(w io.Writer, r io.Reader) *Framer { | ||
426 | fr := &Framer{ | ||
427 | w: w, | ||
428 | r: r, | ||
429 | logReads: logFrameReads, | ||
430 | logWrites: logFrameWrites, | ||
431 | debugReadLoggerf: log.Printf, | ||
432 | debugWriteLoggerf: log.Printf, | ||
433 | } | ||
434 | fr.getReadBuf = func(size uint32) []byte { | ||
435 | if cap(fr.readBuf) >= int(size) { | ||
436 | return fr.readBuf[:size] | ||
437 | } | ||
438 | fr.readBuf = make([]byte, size) | ||
439 | return fr.readBuf | ||
440 | } | ||
441 | fr.SetMaxReadFrameSize(maxFrameSize) | ||
442 | return fr | ||
443 | } | ||
444 | |||
445 | // SetMaxReadFrameSize sets the maximum size of a frame | ||
446 | // that will be read by a subsequent call to ReadFrame. | ||
447 | // It is the caller's responsibility to advertise this | ||
448 | // limit with a SETTINGS frame. | ||
449 | func (fr *Framer) SetMaxReadFrameSize(v uint32) { | ||
450 | if v > maxFrameSize { | ||
451 | v = maxFrameSize | ||
452 | } | ||
453 | fr.maxReadSize = v | ||
454 | } | ||
455 | |||
456 | // ErrorDetail returns a more detailed error of the last error | ||
457 | // returned by Framer.ReadFrame. For instance, if ReadFrame | ||
458 | // returns a StreamError with code PROTOCOL_ERROR, ErrorDetail | ||
459 | // will say exactly what was invalid. ErrorDetail is not guaranteed | ||
460 | // to return a non-nil value and like the rest of the http2 package, | ||
461 | // its return value is not protected by an API compatibility promise. | ||
462 | // ErrorDetail is reset after the next call to ReadFrame. | ||
463 | func (fr *Framer) ErrorDetail() error { | ||
464 | return fr.errDetail | ||
465 | } | ||
466 | |||
467 | // ErrFrameTooLarge is returned from Framer.ReadFrame when the peer | ||
468 | // sends a frame that is larger than declared with SetMaxReadFrameSize. | ||
469 | var ErrFrameTooLarge = errors.New("http2: frame too large") | ||
470 | |||
471 | // terminalReadFrameError reports whether err is an unrecoverable | ||
472 | // error from ReadFrame and no other frames should be read. | ||
473 | func terminalReadFrameError(err error) bool { | ||
474 | if _, ok := err.(StreamError); ok { | ||
475 | return false | ||
476 | } | ||
477 | return err != nil | ||
478 | } | ||
479 | |||
480 | // ReadFrame reads a single frame. The returned Frame is only valid | ||
481 | // until the next call to ReadFrame. | ||
482 | // | ||
483 | // If the frame is larger than previously set with SetMaxReadFrameSize, the | ||
484 | // returned error is ErrFrameTooLarge. Other errors may be of type | ||
485 | // ConnectionError, StreamError, or anything else from the underlying | ||
486 | // reader. | ||
487 | func (fr *Framer) ReadFrame() (Frame, error) { | ||
488 | fr.errDetail = nil | ||
489 | if fr.lastFrame != nil { | ||
490 | fr.lastFrame.invalidate() | ||
491 | } | ||
492 | fh, err := readFrameHeader(fr.headerBuf[:], fr.r) | ||
493 | if err != nil { | ||
494 | return nil, err | ||
495 | } | ||
496 | if fh.Length > fr.maxReadSize { | ||
497 | return nil, ErrFrameTooLarge | ||
498 | } | ||
499 | payload := fr.getReadBuf(fh.Length) | ||
500 | if _, err := io.ReadFull(fr.r, payload); err != nil { | ||
501 | return nil, err | ||
502 | } | ||
503 | f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) | ||
504 | if err != nil { | ||
505 | if ce, ok := err.(connError); ok { | ||
506 | return nil, fr.connError(ce.Code, ce.Reason) | ||
507 | } | ||
508 | return nil, err | ||
509 | } | ||
510 | if err := fr.checkFrameOrder(f); err != nil { | ||
511 | return nil, err | ||
512 | } | ||
513 | if fr.logReads { | ||
514 | fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) | ||
515 | } | ||
516 | if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { | ||
517 | return fr.readMetaFrame(f.(*HeadersFrame)) | ||
518 | } | ||
519 | return f, nil | ||
520 | } | ||
521 | |||
522 | // connError returns ConnectionError(code) but first | ||
523 | // stashes away a public reason to the caller can optionally relay it | ||
524 | // to the peer before hanging up on them. This might help others debug | ||
525 | // their implementations. | ||
526 | func (fr *Framer) connError(code ErrCode, reason string) error { | ||
527 | fr.errDetail = errors.New(reason) | ||
528 | return ConnectionError(code) | ||
529 | } | ||
530 | |||
531 | // checkFrameOrder reports an error if f is an invalid frame to return | ||
532 | // next from ReadFrame. Mostly it checks whether HEADERS and | ||
533 | // CONTINUATION frames are contiguous. | ||
534 | func (fr *Framer) checkFrameOrder(f Frame) error { | ||
535 | last := fr.lastFrame | ||
536 | fr.lastFrame = f | ||
537 | if fr.AllowIllegalReads { | ||
538 | return nil | ||
539 | } | ||
540 | |||
541 | fh := f.Header() | ||
542 | if fr.lastHeaderStream != 0 { | ||
543 | if fh.Type != FrameContinuation { | ||
544 | return fr.connError(ErrCodeProtocol, | ||
545 | fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", | ||
546 | fh.Type, fh.StreamID, | ||
547 | last.Header().Type, fr.lastHeaderStream)) | ||
548 | } | ||
549 | if fh.StreamID != fr.lastHeaderStream { | ||
550 | return fr.connError(ErrCodeProtocol, | ||
551 | fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", | ||
552 | fh.StreamID, fr.lastHeaderStream)) | ||
553 | } | ||
554 | } else if fh.Type == FrameContinuation { | ||
555 | return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) | ||
556 | } | ||
557 | |||
558 | switch fh.Type { | ||
559 | case FrameHeaders, FrameContinuation: | ||
560 | if fh.Flags.Has(FlagHeadersEndHeaders) { | ||
561 | fr.lastHeaderStream = 0 | ||
562 | } else { | ||
563 | fr.lastHeaderStream = fh.StreamID | ||
564 | } | ||
565 | } | ||
566 | |||
567 | return nil | ||
568 | } | ||
569 | |||
570 | // A DataFrame conveys arbitrary, variable-length sequences of octets | ||
571 | // associated with a stream. | ||
572 | // See http://http2.github.io/http2-spec/#rfc.section.6.1 | ||
573 | type DataFrame struct { | ||
574 | FrameHeader | ||
575 | data []byte | ||
576 | } | ||
577 | |||
578 | func (f *DataFrame) StreamEnded() bool { | ||
579 | return f.FrameHeader.Flags.Has(FlagDataEndStream) | ||
580 | } | ||
581 | |||
582 | // Data returns the frame's data octets, not including any padding | ||
583 | // size byte or padding suffix bytes. | ||
584 | // The caller must not retain the returned memory past the next | ||
585 | // call to ReadFrame. | ||
586 | func (f *DataFrame) Data() []byte { | ||
587 | f.checkValid() | ||
588 | return f.data | ||
589 | } | ||
590 | |||
591 | func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { | ||
592 | if fh.StreamID == 0 { | ||
593 | // DATA frames MUST be associated with a stream. If a | ||
594 | // DATA frame is received whose stream identifier | ||
595 | // field is 0x0, the recipient MUST respond with a | ||
596 | // connection error (Section 5.4.1) of type | ||
597 | // PROTOCOL_ERROR. | ||
598 | return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} | ||
599 | } | ||
600 | f := fc.getDataFrame() | ||
601 | f.FrameHeader = fh | ||
602 | |||
603 | var padSize byte | ||
604 | if fh.Flags.Has(FlagDataPadded) { | ||
605 | var err error | ||
606 | payload, padSize, err = readByte(payload) | ||
607 | if err != nil { | ||
608 | return nil, err | ||
609 | } | ||
610 | } | ||
611 | if int(padSize) > len(payload) { | ||
612 | // If the length of the padding is greater than the | ||
613 | // length of the frame payload, the recipient MUST | ||
614 | // treat this as a connection error. | ||
615 | // Filed: https://github.com/http2/http2-spec/issues/610 | ||
616 | return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} | ||
617 | } | ||
618 | f.data = payload[:len(payload)-int(padSize)] | ||
619 | return f, nil | ||
620 | } | ||
621 | |||
622 | var ( | ||
623 | errStreamID = errors.New("invalid stream ID") | ||
624 | errDepStreamID = errors.New("invalid dependent stream ID") | ||
625 | errPadLength = errors.New("pad length too large") | ||
626 | errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") | ||
627 | ) | ||
628 | |||
629 | func validStreamIDOrZero(streamID uint32) bool { | ||
630 | return streamID&(1<<31) == 0 | ||
631 | } | ||
632 | |||
633 | func validStreamID(streamID uint32) bool { | ||
634 | return streamID != 0 && streamID&(1<<31) == 0 | ||
635 | } | ||
636 | |||
637 | // WriteData writes a DATA frame. | ||
638 | // | ||
639 | // It will perform exactly one Write to the underlying Writer. | ||
640 | // It is the caller's responsibility not to violate the maximum frame size | ||
641 | // and to not call other Write methods concurrently. | ||
642 | func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { | ||
643 | return f.WriteDataPadded(streamID, endStream, data, nil) | ||
644 | } | ||
645 | |||
646 | // WriteData writes a DATA frame with optional padding. | ||
647 | // | ||
648 | // If pad is nil, the padding bit is not sent. | ||
649 | // The length of pad must not exceed 255 bytes. | ||
650 | // The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. | ||
651 | // | ||
652 | // It will perform exactly one Write to the underlying Writer. | ||
653 | // It is the caller's responsibility not to violate the maximum frame size | ||
654 | // and to not call other Write methods concurrently. | ||
655 | func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { | ||
656 | if !validStreamID(streamID) && !f.AllowIllegalWrites { | ||
657 | return errStreamID | ||
658 | } | ||
659 | if len(pad) > 0 { | ||
660 | if len(pad) > 255 { | ||
661 | return errPadLength | ||
662 | } | ||
663 | if !f.AllowIllegalWrites { | ||
664 | for _, b := range pad { | ||
665 | if b != 0 { | ||
666 | // "Padding octets MUST be set to zero when sending." | ||
667 | return errPadBytes | ||
668 | } | ||
669 | } | ||
670 | } | ||
671 | } | ||
672 | var flags Flags | ||
673 | if endStream { | ||
674 | flags |= FlagDataEndStream | ||
675 | } | ||
676 | if pad != nil { | ||
677 | flags |= FlagDataPadded | ||
678 | } | ||
679 | f.startWrite(FrameData, flags, streamID) | ||
680 | if pad != nil { | ||
681 | f.wbuf = append(f.wbuf, byte(len(pad))) | ||
682 | } | ||
683 | f.wbuf = append(f.wbuf, data...) | ||
684 | f.wbuf = append(f.wbuf, pad...) | ||
685 | return f.endWrite() | ||
686 | } | ||
687 | |||
688 | // A SettingsFrame conveys configuration parameters that affect how | ||
689 | // endpoints communicate, such as preferences and constraints on peer | ||
690 | // behavior. | ||
691 | // | ||
692 | // See http://http2.github.io/http2-spec/#SETTINGS | ||
693 | type SettingsFrame struct { | ||
694 | FrameHeader | ||
695 | p []byte | ||
696 | } | ||
697 | |||
698 | func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||
699 | if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { | ||
700 | // When this (ACK 0x1) bit is set, the payload of the | ||
701 | // SETTINGS frame MUST be empty. Receipt of a | ||
702 | // SETTINGS frame with the ACK flag set and a length | ||
703 | // field value other than 0 MUST be treated as a | ||
704 | // connection error (Section 5.4.1) of type | ||
705 | // FRAME_SIZE_ERROR. | ||
706 | return nil, ConnectionError(ErrCodeFrameSize) | ||
707 | } | ||
708 | if fh.StreamID != 0 { | ||
709 | // SETTINGS frames always apply to a connection, | ||
710 | // never a single stream. The stream identifier for a | ||
711 | // SETTINGS frame MUST be zero (0x0). If an endpoint | ||
712 | // receives a SETTINGS frame whose stream identifier | ||
713 | // field is anything other than 0x0, the endpoint MUST | ||
714 | // respond with a connection error (Section 5.4.1) of | ||
715 | // type PROTOCOL_ERROR. | ||
716 | return nil, ConnectionError(ErrCodeProtocol) | ||
717 | } | ||
718 | if len(p)%6 != 0 { | ||
719 | // Expecting even number of 6 byte settings. | ||
720 | return nil, ConnectionError(ErrCodeFrameSize) | ||
721 | } | ||
722 | f := &SettingsFrame{FrameHeader: fh, p: p} | ||
723 | if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { | ||
724 | // Values above the maximum flow control window size of 2^31 - 1 MUST | ||
725 | // be treated as a connection error (Section 5.4.1) of type | ||
726 | // FLOW_CONTROL_ERROR. | ||
727 | return nil, ConnectionError(ErrCodeFlowControl) | ||
728 | } | ||
729 | return f, nil | ||
730 | } | ||
731 | |||
732 | func (f *SettingsFrame) IsAck() bool { | ||
733 | return f.FrameHeader.Flags.Has(FlagSettingsAck) | ||
734 | } | ||
735 | |||
736 | func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { | ||
737 | f.checkValid() | ||
738 | buf := f.p | ||
739 | for len(buf) > 0 { | ||
740 | settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) | ||
741 | if settingID == s { | ||
742 | return binary.BigEndian.Uint32(buf[2:6]), true | ||
743 | } | ||
744 | buf = buf[6:] | ||
745 | } | ||
746 | return 0, false | ||
747 | } | ||
748 | |||
749 | // ForeachSetting runs fn for each setting. | ||
750 | // It stops and returns the first error. | ||
751 | func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { | ||
752 | f.checkValid() | ||
753 | buf := f.p | ||
754 | for len(buf) > 0 { | ||
755 | if err := fn(Setting{ | ||
756 | SettingID(binary.BigEndian.Uint16(buf[:2])), | ||
757 | binary.BigEndian.Uint32(buf[2:6]), | ||
758 | }); err != nil { | ||
759 | return err | ||
760 | } | ||
761 | buf = buf[6:] | ||
762 | } | ||
763 | return nil | ||
764 | } | ||
765 | |||
766 | // WriteSettings writes a SETTINGS frame with zero or more settings | ||
767 | // specified and the ACK bit not set. | ||
768 | // | ||
769 | // It will perform exactly one Write to the underlying Writer. | ||
770 | // It is the caller's responsibility to not call other Write methods concurrently. | ||
771 | func (f *Framer) WriteSettings(settings ...Setting) error { | ||
772 | f.startWrite(FrameSettings, 0, 0) | ||
773 | for _, s := range settings { | ||
774 | f.writeUint16(uint16(s.ID)) | ||
775 | f.writeUint32(s.Val) | ||
776 | } | ||
777 | return f.endWrite() | ||
778 | } | ||
779 | |||
780 | // WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. | ||
781 | // | ||
782 | // It will perform exactly one Write to the underlying Writer. | ||
783 | // It is the caller's responsibility to not call other Write methods concurrently. | ||
784 | func (f *Framer) WriteSettingsAck() error { | ||
785 | f.startWrite(FrameSettings, FlagSettingsAck, 0) | ||
786 | return f.endWrite() | ||
787 | } | ||
788 | |||
789 | // A PingFrame is a mechanism for measuring a minimal round trip time | ||
790 | // from the sender, as well as determining whether an idle connection | ||
791 | // is still functional. | ||
792 | // See http://http2.github.io/http2-spec/#rfc.section.6.7 | ||
793 | type PingFrame struct { | ||
794 | FrameHeader | ||
795 | Data [8]byte | ||
796 | } | ||
797 | |||
798 | func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } | ||
799 | |||
800 | func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { | ||
801 | if len(payload) != 8 { | ||
802 | return nil, ConnectionError(ErrCodeFrameSize) | ||
803 | } | ||
804 | if fh.StreamID != 0 { | ||
805 | return nil, ConnectionError(ErrCodeProtocol) | ||
806 | } | ||
807 | f := &PingFrame{FrameHeader: fh} | ||
808 | copy(f.Data[:], payload) | ||
809 | return f, nil | ||
810 | } | ||
811 | |||
812 | func (f *Framer) WritePing(ack bool, data [8]byte) error { | ||
813 | var flags Flags | ||
814 | if ack { | ||
815 | flags = FlagPingAck | ||
816 | } | ||
817 | f.startWrite(FramePing, flags, 0) | ||
818 | f.writeBytes(data[:]) | ||
819 | return f.endWrite() | ||
820 | } | ||
821 | |||
822 | // A GoAwayFrame informs the remote peer to stop creating streams on this connection. | ||
823 | // See http://http2.github.io/http2-spec/#rfc.section.6.8 | ||
824 | type GoAwayFrame struct { | ||
825 | FrameHeader | ||
826 | LastStreamID uint32 | ||
827 | ErrCode ErrCode | ||
828 | debugData []byte | ||
829 | } | ||
830 | |||
831 | // DebugData returns any debug data in the GOAWAY frame. Its contents | ||
832 | // are not defined. | ||
833 | // The caller must not retain the returned memory past the next | ||
834 | // call to ReadFrame. | ||
835 | func (f *GoAwayFrame) DebugData() []byte { | ||
836 | f.checkValid() | ||
837 | return f.debugData | ||
838 | } | ||
839 | |||
840 | func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||
841 | if fh.StreamID != 0 { | ||
842 | return nil, ConnectionError(ErrCodeProtocol) | ||
843 | } | ||
844 | if len(p) < 8 { | ||
845 | return nil, ConnectionError(ErrCodeFrameSize) | ||
846 | } | ||
847 | return &GoAwayFrame{ | ||
848 | FrameHeader: fh, | ||
849 | LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), | ||
850 | ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), | ||
851 | debugData: p[8:], | ||
852 | }, nil | ||
853 | } | ||
854 | |||
855 | func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { | ||
856 | f.startWrite(FrameGoAway, 0, 0) | ||
857 | f.writeUint32(maxStreamID & (1<<31 - 1)) | ||
858 | f.writeUint32(uint32(code)) | ||
859 | f.writeBytes(debugData) | ||
860 | return f.endWrite() | ||
861 | } | ||
862 | |||
863 | // An UnknownFrame is the frame type returned when the frame type is unknown | ||
864 | // or no specific frame type parser exists. | ||
865 | type UnknownFrame struct { | ||
866 | FrameHeader | ||
867 | p []byte | ||
868 | } | ||
869 | |||
870 | // Payload returns the frame's payload (after the header). It is not | ||
871 | // valid to call this method after a subsequent call to | ||
872 | // Framer.ReadFrame, nor is it valid to retain the returned slice. | ||
873 | // The memory is owned by the Framer and is invalidated when the next | ||
874 | // frame is read. | ||
875 | func (f *UnknownFrame) Payload() []byte { | ||
876 | f.checkValid() | ||
877 | return f.p | ||
878 | } | ||
879 | |||
880 | func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||
881 | return &UnknownFrame{fh, p}, nil | ||
882 | } | ||
883 | |||
884 | // A WindowUpdateFrame is used to implement flow control. | ||
885 | // See http://http2.github.io/http2-spec/#rfc.section.6.9 | ||
886 | type WindowUpdateFrame struct { | ||
887 | FrameHeader | ||
888 | Increment uint32 // never read with high bit set | ||
889 | } | ||
890 | |||
891 | func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||
892 | if len(p) != 4 { | ||
893 | return nil, ConnectionError(ErrCodeFrameSize) | ||
894 | } | ||
895 | inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit | ||
896 | if inc == 0 { | ||
897 | // A receiver MUST treat the receipt of a | ||
898 | // WINDOW_UPDATE frame with an flow control window | ||
899 | // increment of 0 as a stream error (Section 5.4.2) of | ||
900 | // type PROTOCOL_ERROR; errors on the connection flow | ||
901 | // control window MUST be treated as a connection | ||
902 | // error (Section 5.4.1). | ||
903 | if fh.StreamID == 0 { | ||
904 | return nil, ConnectionError(ErrCodeProtocol) | ||
905 | } | ||
906 | return nil, streamError(fh.StreamID, ErrCodeProtocol) | ||
907 | } | ||
908 | return &WindowUpdateFrame{ | ||
909 | FrameHeader: fh, | ||
910 | Increment: inc, | ||
911 | }, nil | ||
912 | } | ||
913 | |||
914 | // WriteWindowUpdate writes a WINDOW_UPDATE frame. | ||
915 | // The increment value must be between 1 and 2,147,483,647, inclusive. | ||
916 | // If the Stream ID is zero, the window update applies to the | ||
917 | // connection as a whole. | ||
918 | func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { | ||
919 | // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." | ||
920 | if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { | ||
921 | return errors.New("illegal window increment value") | ||
922 | } | ||
923 | f.startWrite(FrameWindowUpdate, 0, streamID) | ||
924 | f.writeUint32(incr) | ||
925 | return f.endWrite() | ||
926 | } | ||
927 | |||
928 | // A HeadersFrame is used to open a stream and additionally carries a | ||
929 | // header block fragment. | ||
930 | type HeadersFrame struct { | ||
931 | FrameHeader | ||
932 | |||
933 | // Priority is set if FlagHeadersPriority is set in the FrameHeader. | ||
934 | Priority PriorityParam | ||
935 | |||
936 | headerFragBuf []byte // not owned | ||
937 | } | ||
938 | |||
939 | func (f *HeadersFrame) HeaderBlockFragment() []byte { | ||
940 | f.checkValid() | ||
941 | return f.headerFragBuf | ||
942 | } | ||
943 | |||
944 | func (f *HeadersFrame) HeadersEnded() bool { | ||
945 | return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) | ||
946 | } | ||
947 | |||
948 | func (f *HeadersFrame) StreamEnded() bool { | ||
949 | return f.FrameHeader.Flags.Has(FlagHeadersEndStream) | ||
950 | } | ||
951 | |||
952 | func (f *HeadersFrame) HasPriority() bool { | ||
953 | return f.FrameHeader.Flags.Has(FlagHeadersPriority) | ||
954 | } | ||
955 | |||
956 | func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { | ||
957 | hf := &HeadersFrame{ | ||
958 | FrameHeader: fh, | ||
959 | } | ||
960 | if fh.StreamID == 0 { | ||
961 | // HEADERS frames MUST be associated with a stream. If a HEADERS frame | ||
962 | // is received whose stream identifier field is 0x0, the recipient MUST | ||
963 | // respond with a connection error (Section 5.4.1) of type | ||
964 | // PROTOCOL_ERROR. | ||
965 | return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} | ||
966 | } | ||
967 | var padLength uint8 | ||
968 | if fh.Flags.Has(FlagHeadersPadded) { | ||
969 | if p, padLength, err = readByte(p); err != nil { | ||
970 | return | ||
971 | } | ||
972 | } | ||
973 | if fh.Flags.Has(FlagHeadersPriority) { | ||
974 | var v uint32 | ||
975 | p, v, err = readUint32(p) | ||
976 | if err != nil { | ||
977 | return nil, err | ||
978 | } | ||
979 | hf.Priority.StreamDep = v & 0x7fffffff | ||
980 | hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set | ||
981 | p, hf.Priority.Weight, err = readByte(p) | ||
982 | if err != nil { | ||
983 | return nil, err | ||
984 | } | ||
985 | } | ||
986 | if len(p)-int(padLength) <= 0 { | ||
987 | return nil, streamError(fh.StreamID, ErrCodeProtocol) | ||
988 | } | ||
989 | hf.headerFragBuf = p[:len(p)-int(padLength)] | ||
990 | return hf, nil | ||
991 | } | ||
992 | |||
993 | // HeadersFrameParam are the parameters for writing a HEADERS frame. | ||
994 | type HeadersFrameParam struct { | ||
995 | // StreamID is the required Stream ID to initiate. | ||
996 | StreamID uint32 | ||
997 | // BlockFragment is part (or all) of a Header Block. | ||
998 | BlockFragment []byte | ||
999 | |||
1000 | // EndStream indicates that the header block is the last that | ||
1001 | // the endpoint will send for the identified stream. Setting | ||
1002 | // this flag causes the stream to enter one of "half closed" | ||
1003 | // states. | ||
1004 | EndStream bool | ||
1005 | |||
1006 | // EndHeaders indicates that this frame contains an entire | ||
1007 | // header block and is not followed by any | ||
1008 | // CONTINUATION frames. | ||
1009 | EndHeaders bool | ||
1010 | |||
1011 | // PadLength is the optional number of bytes of zeros to add | ||
1012 | // to this frame. | ||
1013 | PadLength uint8 | ||
1014 | |||
1015 | // Priority, if non-zero, includes stream priority information | ||
1016 | // in the HEADER frame. | ||
1017 | Priority PriorityParam | ||
1018 | } | ||
1019 | |||
1020 | // WriteHeaders writes a single HEADERS frame. | ||
1021 | // | ||
1022 | // This is a low-level header writing method. Encoding headers and | ||
1023 | // splitting them into any necessary CONTINUATION frames is handled | ||
1024 | // elsewhere. | ||
1025 | // | ||
1026 | // It will perform exactly one Write to the underlying Writer. | ||
1027 | // It is the caller's responsibility to not call other Write methods concurrently. | ||
1028 | func (f *Framer) WriteHeaders(p HeadersFrameParam) error { | ||
1029 | if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { | ||
1030 | return errStreamID | ||
1031 | } | ||
1032 | var flags Flags | ||
1033 | if p.PadLength != 0 { | ||
1034 | flags |= FlagHeadersPadded | ||
1035 | } | ||
1036 | if p.EndStream { | ||
1037 | flags |= FlagHeadersEndStream | ||
1038 | } | ||
1039 | if p.EndHeaders { | ||
1040 | flags |= FlagHeadersEndHeaders | ||
1041 | } | ||
1042 | if !p.Priority.IsZero() { | ||
1043 | flags |= FlagHeadersPriority | ||
1044 | } | ||
1045 | f.startWrite(FrameHeaders, flags, p.StreamID) | ||
1046 | if p.PadLength != 0 { | ||
1047 | f.writeByte(p.PadLength) | ||
1048 | } | ||
1049 | if !p.Priority.IsZero() { | ||
1050 | v := p.Priority.StreamDep | ||
1051 | if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { | ||
1052 | return errDepStreamID | ||
1053 | } | ||
1054 | if p.Priority.Exclusive { | ||
1055 | v |= 1 << 31 | ||
1056 | } | ||
1057 | f.writeUint32(v) | ||
1058 | f.writeByte(p.Priority.Weight) | ||
1059 | } | ||
1060 | f.wbuf = append(f.wbuf, p.BlockFragment...) | ||
1061 | f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) | ||
1062 | return f.endWrite() | ||
1063 | } | ||
1064 | |||
1065 | // A PriorityFrame specifies the sender-advised priority of a stream. | ||
1066 | // See http://http2.github.io/http2-spec/#rfc.section.6.3 | ||
1067 | type PriorityFrame struct { | ||
1068 | FrameHeader | ||
1069 | PriorityParam | ||
1070 | } | ||
1071 | |||
1072 | // PriorityParam are the stream prioritzation parameters. | ||
1073 | type PriorityParam struct { | ||
1074 | // StreamDep is a 31-bit stream identifier for the | ||
1075 | // stream that this stream depends on. Zero means no | ||
1076 | // dependency. | ||
1077 | StreamDep uint32 | ||
1078 | |||
1079 | // Exclusive is whether the dependency is exclusive. | ||
1080 | Exclusive bool | ||
1081 | |||
1082 | // Weight is the stream's zero-indexed weight. It should be | ||
1083 | // set together with StreamDep, or neither should be set. Per | ||
1084 | // the spec, "Add one to the value to obtain a weight between | ||
1085 | // 1 and 256." | ||
1086 | Weight uint8 | ||
1087 | } | ||
1088 | |||
1089 | func (p PriorityParam) IsZero() bool { | ||
1090 | return p == PriorityParam{} | ||
1091 | } | ||
1092 | |||
1093 | func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { | ||
1094 | if fh.StreamID == 0 { | ||
1095 | return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} | ||
1096 | } | ||
1097 | if len(payload) != 5 { | ||
1098 | return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} | ||
1099 | } | ||
1100 | v := binary.BigEndian.Uint32(payload[:4]) | ||
1101 | streamID := v & 0x7fffffff // mask off high bit | ||
1102 | return &PriorityFrame{ | ||
1103 | FrameHeader: fh, | ||
1104 | PriorityParam: PriorityParam{ | ||
1105 | Weight: payload[4], | ||
1106 | StreamDep: streamID, | ||
1107 | Exclusive: streamID != v, // was high bit set? | ||
1108 | }, | ||
1109 | }, nil | ||
1110 | } | ||
1111 | |||
1112 | // WritePriority writes a PRIORITY frame. | ||
1113 | // | ||
1114 | // It will perform exactly one Write to the underlying Writer. | ||
1115 | // It is the caller's responsibility to not call other Write methods concurrently. | ||
1116 | func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { | ||
1117 | if !validStreamID(streamID) && !f.AllowIllegalWrites { | ||
1118 | return errStreamID | ||
1119 | } | ||
1120 | if !validStreamIDOrZero(p.StreamDep) { | ||
1121 | return errDepStreamID | ||
1122 | } | ||
1123 | f.startWrite(FramePriority, 0, streamID) | ||
1124 | v := p.StreamDep | ||
1125 | if p.Exclusive { | ||
1126 | v |= 1 << 31 | ||
1127 | } | ||
1128 | f.writeUint32(v) | ||
1129 | f.writeByte(p.Weight) | ||
1130 | return f.endWrite() | ||
1131 | } | ||
1132 | |||
1133 | // A RSTStreamFrame allows for abnormal termination of a stream. | ||
1134 | // See http://http2.github.io/http2-spec/#rfc.section.6.4 | ||
1135 | type RSTStreamFrame struct { | ||
1136 | FrameHeader | ||
1137 | ErrCode ErrCode | ||
1138 | } | ||
1139 | |||
1140 | func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||
1141 | if len(p) != 4 { | ||
1142 | return nil, ConnectionError(ErrCodeFrameSize) | ||
1143 | } | ||
1144 | if fh.StreamID == 0 { | ||
1145 | return nil, ConnectionError(ErrCodeProtocol) | ||
1146 | } | ||
1147 | return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil | ||
1148 | } | ||
1149 | |||
1150 | // WriteRSTStream writes a RST_STREAM frame. | ||
1151 | // | ||
1152 | // It will perform exactly one Write to the underlying Writer. | ||
1153 | // It is the caller's responsibility to not call other Write methods concurrently. | ||
1154 | func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { | ||
1155 | if !validStreamID(streamID) && !f.AllowIllegalWrites { | ||
1156 | return errStreamID | ||
1157 | } | ||
1158 | f.startWrite(FrameRSTStream, 0, streamID) | ||
1159 | f.writeUint32(uint32(code)) | ||
1160 | return f.endWrite() | ||
1161 | } | ||
1162 | |||
1163 | // A ContinuationFrame is used to continue a sequence of header block fragments. | ||
1164 | // See http://http2.github.io/http2-spec/#rfc.section.6.10 | ||
1165 | type ContinuationFrame struct { | ||
1166 | FrameHeader | ||
1167 | headerFragBuf []byte | ||
1168 | } | ||
1169 | |||
1170 | func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { | ||
1171 | if fh.StreamID == 0 { | ||
1172 | return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} | ||
1173 | } | ||
1174 | return &ContinuationFrame{fh, p}, nil | ||
1175 | } | ||
1176 | |||
1177 | func (f *ContinuationFrame) HeaderBlockFragment() []byte { | ||
1178 | f.checkValid() | ||
1179 | return f.headerFragBuf | ||
1180 | } | ||
1181 | |||
1182 | func (f *ContinuationFrame) HeadersEnded() bool { | ||
1183 | return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) | ||
1184 | } | ||
1185 | |||
1186 | // WriteContinuation writes a CONTINUATION frame. | ||
1187 | // | ||
1188 | // It will perform exactly one Write to the underlying Writer. | ||
1189 | // It is the caller's responsibility to not call other Write methods concurrently. | ||
1190 | func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { | ||
1191 | if !validStreamID(streamID) && !f.AllowIllegalWrites { | ||
1192 | return errStreamID | ||
1193 | } | ||
1194 | var flags Flags | ||
1195 | if endHeaders { | ||
1196 | flags |= FlagContinuationEndHeaders | ||
1197 | } | ||
1198 | f.startWrite(FrameContinuation, flags, streamID) | ||
1199 | f.wbuf = append(f.wbuf, headerBlockFragment...) | ||
1200 | return f.endWrite() | ||
1201 | } | ||
1202 | |||
1203 | // A PushPromiseFrame is used to initiate a server stream. | ||
1204 | // See http://http2.github.io/http2-spec/#rfc.section.6.6 | ||
1205 | type PushPromiseFrame struct { | ||
1206 | FrameHeader | ||
1207 | PromiseID uint32 | ||
1208 | headerFragBuf []byte // not owned | ||
1209 | } | ||
1210 | |||
1211 | func (f *PushPromiseFrame) HeaderBlockFragment() []byte { | ||
1212 | f.checkValid() | ||
1213 | return f.headerFragBuf | ||
1214 | } | ||
1215 | |||
1216 | func (f *PushPromiseFrame) HeadersEnded() bool { | ||
1217 | return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) | ||
1218 | } | ||
1219 | |||
1220 | func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { | ||
1221 | pp := &PushPromiseFrame{ | ||
1222 | FrameHeader: fh, | ||
1223 | } | ||
1224 | if pp.StreamID == 0 { | ||
1225 | // PUSH_PROMISE frames MUST be associated with an existing, | ||
1226 | // peer-initiated stream. The stream identifier of a | ||
1227 | // PUSH_PROMISE frame indicates the stream it is associated | ||
1228 | // with. If the stream identifier field specifies the value | ||
1229 | // 0x0, a recipient MUST respond with a connection error | ||
1230 | // (Section 5.4.1) of type PROTOCOL_ERROR. | ||
1231 | return nil, ConnectionError(ErrCodeProtocol) | ||
1232 | } | ||
1233 | // The PUSH_PROMISE frame includes optional padding. | ||
1234 | // Padding fields and flags are identical to those defined for DATA frames | ||
1235 | var padLength uint8 | ||
1236 | if fh.Flags.Has(FlagPushPromisePadded) { | ||
1237 | if p, padLength, err = readByte(p); err != nil { | ||
1238 | return | ||
1239 | } | ||
1240 | } | ||
1241 | |||
1242 | p, pp.PromiseID, err = readUint32(p) | ||
1243 | if err != nil { | ||
1244 | return | ||
1245 | } | ||
1246 | pp.PromiseID = pp.PromiseID & (1<<31 - 1) | ||
1247 | |||
1248 | if int(padLength) > len(p) { | ||
1249 | // like the DATA frame, error out if padding is longer than the body. | ||
1250 | return nil, ConnectionError(ErrCodeProtocol) | ||
1251 | } | ||
1252 | pp.headerFragBuf = p[:len(p)-int(padLength)] | ||
1253 | return pp, nil | ||
1254 | } | ||
1255 | |||
1256 | // PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. | ||
1257 | type PushPromiseParam struct { | ||
1258 | // StreamID is the required Stream ID to initiate. | ||
1259 | StreamID uint32 | ||
1260 | |||
1261 | // PromiseID is the required Stream ID which this | ||
1262 | // Push Promises | ||
1263 | PromiseID uint32 | ||
1264 | |||
1265 | // BlockFragment is part (or all) of a Header Block. | ||
1266 | BlockFragment []byte | ||
1267 | |||
1268 | // EndHeaders indicates that this frame contains an entire | ||
1269 | // header block and is not followed by any | ||
1270 | // CONTINUATION frames. | ||
1271 | EndHeaders bool | ||
1272 | |||
1273 | // PadLength is the optional number of bytes of zeros to add | ||
1274 | // to this frame. | ||
1275 | PadLength uint8 | ||
1276 | } | ||
1277 | |||
1278 | // WritePushPromise writes a single PushPromise Frame. | ||
1279 | // | ||
1280 | // As with Header Frames, This is the low level call for writing | ||
1281 | // individual frames. Continuation frames are handled elsewhere. | ||
1282 | // | ||
1283 | // It will perform exactly one Write to the underlying Writer. | ||
1284 | // It is the caller's responsibility to not call other Write methods concurrently. | ||
1285 | func (f *Framer) WritePushPromise(p PushPromiseParam) error { | ||
1286 | if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { | ||
1287 | return errStreamID | ||
1288 | } | ||
1289 | var flags Flags | ||
1290 | if p.PadLength != 0 { | ||
1291 | flags |= FlagPushPromisePadded | ||
1292 | } | ||
1293 | if p.EndHeaders { | ||
1294 | flags |= FlagPushPromiseEndHeaders | ||
1295 | } | ||
1296 | f.startWrite(FramePushPromise, flags, p.StreamID) | ||
1297 | if p.PadLength != 0 { | ||
1298 | f.writeByte(p.PadLength) | ||
1299 | } | ||
1300 | if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { | ||
1301 | return errStreamID | ||
1302 | } | ||
1303 | f.writeUint32(p.PromiseID) | ||
1304 | f.wbuf = append(f.wbuf, p.BlockFragment...) | ||
1305 | f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) | ||
1306 | return f.endWrite() | ||
1307 | } | ||
1308 | |||
1309 | // WriteRawFrame writes a raw frame. This can be used to write | ||
1310 | // extension frames unknown to this package. | ||
1311 | func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { | ||
1312 | f.startWrite(t, flags, streamID) | ||
1313 | f.writeBytes(payload) | ||
1314 | return f.endWrite() | ||
1315 | } | ||
1316 | |||
1317 | func readByte(p []byte) (remain []byte, b byte, err error) { | ||
1318 | if len(p) == 0 { | ||
1319 | return nil, 0, io.ErrUnexpectedEOF | ||
1320 | } | ||
1321 | return p[1:], p[0], nil | ||
1322 | } | ||
1323 | |||
1324 | func readUint32(p []byte) (remain []byte, v uint32, err error) { | ||
1325 | if len(p) < 4 { | ||
1326 | return nil, 0, io.ErrUnexpectedEOF | ||
1327 | } | ||
1328 | return p[4:], binary.BigEndian.Uint32(p[:4]), nil | ||
1329 | } | ||
1330 | |||
1331 | type streamEnder interface { | ||
1332 | StreamEnded() bool | ||
1333 | } | ||
1334 | |||
1335 | type headersEnder interface { | ||
1336 | HeadersEnded() bool | ||
1337 | } | ||
1338 | |||
1339 | type headersOrContinuation interface { | ||
1340 | headersEnder | ||
1341 | HeaderBlockFragment() []byte | ||
1342 | } | ||
1343 | |||
1344 | // A MetaHeadersFrame is the representation of one HEADERS frame and | ||
1345 | // zero or more contiguous CONTINUATION frames and the decoding of | ||
1346 | // their HPACK-encoded contents. | ||
1347 | // | ||
1348 | // This type of frame does not appear on the wire and is only returned | ||
1349 | // by the Framer when Framer.ReadMetaHeaders is set. | ||
1350 | type MetaHeadersFrame struct { | ||
1351 | *HeadersFrame | ||
1352 | |||
1353 | // Fields are the fields contained in the HEADERS and | ||
1354 | // CONTINUATION frames. The underlying slice is owned by the | ||
1355 | // Framer and must not be retained after the next call to | ||
1356 | // ReadFrame. | ||
1357 | // | ||
1358 | // Fields are guaranteed to be in the correct http2 order and | ||
1359 | // not have unknown pseudo header fields or invalid header | ||
1360 | // field names or values. Required pseudo header fields may be | ||
1361 | // missing, however. Use the MetaHeadersFrame.Pseudo accessor | ||
1362 | // method access pseudo headers. | ||
1363 | Fields []hpack.HeaderField | ||
1364 | |||
1365 | // Truncated is whether the max header list size limit was hit | ||
1366 | // and Fields is incomplete. The hpack decoder state is still | ||
1367 | // valid, however. | ||
1368 | Truncated bool | ||
1369 | } | ||
1370 | |||
1371 | // PseudoValue returns the given pseudo header field's value. | ||
1372 | // The provided pseudo field should not contain the leading colon. | ||
1373 | func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { | ||
1374 | for _, hf := range mh.Fields { | ||
1375 | if !hf.IsPseudo() { | ||
1376 | return "" | ||
1377 | } | ||
1378 | if hf.Name[1:] == pseudo { | ||
1379 | return hf.Value | ||
1380 | } | ||
1381 | } | ||
1382 | return "" | ||
1383 | } | ||
1384 | |||
1385 | // RegularFields returns the regular (non-pseudo) header fields of mh. | ||
1386 | // The caller does not own the returned slice. | ||
1387 | func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { | ||
1388 | for i, hf := range mh.Fields { | ||
1389 | if !hf.IsPseudo() { | ||
1390 | return mh.Fields[i:] | ||
1391 | } | ||
1392 | } | ||
1393 | return nil | ||
1394 | } | ||
1395 | |||
1396 | // PseudoFields returns the pseudo header fields of mh. | ||
1397 | // The caller does not own the returned slice. | ||
1398 | func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { | ||
1399 | for i, hf := range mh.Fields { | ||
1400 | if !hf.IsPseudo() { | ||
1401 | return mh.Fields[:i] | ||
1402 | } | ||
1403 | } | ||
1404 | return mh.Fields | ||
1405 | } | ||
1406 | |||
1407 | func (mh *MetaHeadersFrame) checkPseudos() error { | ||
1408 | var isRequest, isResponse bool | ||
1409 | pf := mh.PseudoFields() | ||
1410 | for i, hf := range pf { | ||
1411 | switch hf.Name { | ||
1412 | case ":method", ":path", ":scheme", ":authority": | ||
1413 | isRequest = true | ||
1414 | case ":status": | ||
1415 | isResponse = true | ||
1416 | default: | ||
1417 | return pseudoHeaderError(hf.Name) | ||
1418 | } | ||
1419 | // Check for duplicates. | ||
1420 | // This would be a bad algorithm, but N is 4. | ||
1421 | // And this doesn't allocate. | ||
1422 | for _, hf2 := range pf[:i] { | ||
1423 | if hf.Name == hf2.Name { | ||
1424 | return duplicatePseudoHeaderError(hf.Name) | ||
1425 | } | ||
1426 | } | ||
1427 | } | ||
1428 | if isRequest && isResponse { | ||
1429 | return errMixPseudoHeaderTypes | ||
1430 | } | ||
1431 | return nil | ||
1432 | } | ||
1433 | |||
1434 | func (fr *Framer) maxHeaderStringLen() int { | ||
1435 | v := fr.maxHeaderListSize() | ||
1436 | if uint32(int(v)) == v { | ||
1437 | return int(v) | ||
1438 | } | ||
1439 | // They had a crazy big number for MaxHeaderBytes anyway, | ||
1440 | // so give them unlimited header lengths: | ||
1441 | return 0 | ||
1442 | } | ||
1443 | |||
1444 | // readMetaFrame returns 0 or more CONTINUATION frames from fr and | ||
1445 | // merge them into into the provided hf and returns a MetaHeadersFrame | ||
1446 | // with the decoded hpack values. | ||
1447 | func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { | ||
1448 | if fr.AllowIllegalReads { | ||
1449 | return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") | ||
1450 | } | ||
1451 | mh := &MetaHeadersFrame{ | ||
1452 | HeadersFrame: hf, | ||
1453 | } | ||
1454 | var remainSize = fr.maxHeaderListSize() | ||
1455 | var sawRegular bool | ||
1456 | |||
1457 | var invalid error // pseudo header field errors | ||
1458 | hdec := fr.ReadMetaHeaders | ||
1459 | hdec.SetEmitEnabled(true) | ||
1460 | hdec.SetMaxStringLength(fr.maxHeaderStringLen()) | ||
1461 | hdec.SetEmitFunc(func(hf hpack.HeaderField) { | ||
1462 | if VerboseLogs && fr.logReads { | ||
1463 | fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) | ||
1464 | } | ||
1465 | if !httplex.ValidHeaderFieldValue(hf.Value) { | ||
1466 | invalid = headerFieldValueError(hf.Value) | ||
1467 | } | ||
1468 | isPseudo := strings.HasPrefix(hf.Name, ":") | ||
1469 | if isPseudo { | ||
1470 | if sawRegular { | ||
1471 | invalid = errPseudoAfterRegular | ||
1472 | } | ||
1473 | } else { | ||
1474 | sawRegular = true | ||
1475 | if !validWireHeaderFieldName(hf.Name) { | ||
1476 | invalid = headerFieldNameError(hf.Name) | ||
1477 | } | ||
1478 | } | ||
1479 | |||
1480 | if invalid != nil { | ||
1481 | hdec.SetEmitEnabled(false) | ||
1482 | return | ||
1483 | } | ||
1484 | |||
1485 | size := hf.Size() | ||
1486 | if size > remainSize { | ||
1487 | hdec.SetEmitEnabled(false) | ||
1488 | mh.Truncated = true | ||
1489 | return | ||
1490 | } | ||
1491 | remainSize -= size | ||
1492 | |||
1493 | mh.Fields = append(mh.Fields, hf) | ||
1494 | }) | ||
1495 | // Lose reference to MetaHeadersFrame: | ||
1496 | defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) | ||
1497 | |||
1498 | var hc headersOrContinuation = hf | ||
1499 | for { | ||
1500 | frag := hc.HeaderBlockFragment() | ||
1501 | if _, err := hdec.Write(frag); err != nil { | ||
1502 | return nil, ConnectionError(ErrCodeCompression) | ||
1503 | } | ||
1504 | |||
1505 | if hc.HeadersEnded() { | ||
1506 | break | ||
1507 | } | ||
1508 | if f, err := fr.ReadFrame(); err != nil { | ||
1509 | return nil, err | ||
1510 | } else { | ||
1511 | hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder | ||
1512 | } | ||
1513 | } | ||
1514 | |||
1515 | mh.HeadersFrame.headerFragBuf = nil | ||
1516 | mh.HeadersFrame.invalidate() | ||
1517 | |||
1518 | if err := hdec.Close(); err != nil { | ||
1519 | return nil, ConnectionError(ErrCodeCompression) | ||
1520 | } | ||
1521 | if invalid != nil { | ||
1522 | fr.errDetail = invalid | ||
1523 | if VerboseLogs { | ||
1524 | log.Printf("http2: invalid header: %v", invalid) | ||
1525 | } | ||
1526 | return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} | ||
1527 | } | ||
1528 | if err := mh.checkPseudos(); err != nil { | ||
1529 | fr.errDetail = err | ||
1530 | if VerboseLogs { | ||
1531 | log.Printf("http2: invalid pseudo headers: %v", err) | ||
1532 | } | ||
1533 | return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} | ||
1534 | } | ||
1535 | return mh, nil | ||
1536 | } | ||
1537 | |||
1538 | func summarizeFrame(f Frame) string { | ||
1539 | var buf bytes.Buffer | ||
1540 | f.Header().writeDebug(&buf) | ||
1541 | switch f := f.(type) { | ||
1542 | case *SettingsFrame: | ||
1543 | n := 0 | ||
1544 | f.ForeachSetting(func(s Setting) error { | ||
1545 | n++ | ||
1546 | if n == 1 { | ||
1547 | buf.WriteString(", settings:") | ||
1548 | } | ||
1549 | fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) | ||
1550 | return nil | ||
1551 | }) | ||
1552 | if n > 0 { | ||
1553 | buf.Truncate(buf.Len() - 1) // remove trailing comma | ||
1554 | } | ||
1555 | case *DataFrame: | ||
1556 | data := f.Data() | ||
1557 | const max = 256 | ||
1558 | if len(data) > max { | ||
1559 | data = data[:max] | ||
1560 | } | ||
1561 | fmt.Fprintf(&buf, " data=%q", data) | ||
1562 | if len(f.Data()) > max { | ||
1563 | fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) | ||
1564 | } | ||
1565 | case *WindowUpdateFrame: | ||
1566 | if f.StreamID == 0 { | ||
1567 | buf.WriteString(" (conn)") | ||
1568 | } | ||
1569 | fmt.Fprintf(&buf, " incr=%v", f.Increment) | ||
1570 | case *PingFrame: | ||
1571 | fmt.Fprintf(&buf, " ping=%q", f.Data[:]) | ||
1572 | case *GoAwayFrame: | ||
1573 | fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", | ||
1574 | f.LastStreamID, f.ErrCode, f.debugData) | ||
1575 | case *RSTStreamFrame: | ||
1576 | fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) | ||
1577 | } | ||
1578 | return buf.String() | ||
1579 | } | ||
diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go new file mode 100644 index 0000000..00b2e9e --- /dev/null +++ b/vendor/golang.org/x/net/http2/go16.go | |||
@@ -0,0 +1,16 @@ | |||
1 | // Copyright 2016 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // +build go1.6 | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import ( | ||
10 | "net/http" | ||
11 | "time" | ||
12 | ) | ||
13 | |||
14 | func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { | ||
15 | return t1.ExpectContinueTimeout | ||
16 | } | ||
diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go new file mode 100644 index 0000000..47b7fae --- /dev/null +++ b/vendor/golang.org/x/net/http2/go17.go | |||
@@ -0,0 +1,106 @@ | |||
1 | // Copyright 2016 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // +build go1.7 | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import ( | ||
10 | "context" | ||
11 | "net" | ||
12 | "net/http" | ||
13 | "net/http/httptrace" | ||
14 | "time" | ||
15 | ) | ||
16 | |||
17 | type contextContext interface { | ||
18 | context.Context | ||
19 | } | ||
20 | |||
21 | func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { | ||
22 | ctx, cancel = context.WithCancel(context.Background()) | ||
23 | ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) | ||
24 | if hs := opts.baseConfig(); hs != nil { | ||
25 | ctx = context.WithValue(ctx, http.ServerContextKey, hs) | ||
26 | } | ||
27 | return | ||
28 | } | ||
29 | |||
30 | func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { | ||
31 | return context.WithCancel(ctx) | ||
32 | } | ||
33 | |||
34 | func requestWithContext(req *http.Request, ctx contextContext) *http.Request { | ||
35 | return req.WithContext(ctx) | ||
36 | } | ||
37 | |||
38 | type clientTrace httptrace.ClientTrace | ||
39 | |||
40 | func reqContext(r *http.Request) context.Context { return r.Context() } | ||
41 | |||
42 | func (t *Transport) idleConnTimeout() time.Duration { | ||
43 | if t.t1 != nil { | ||
44 | return t.t1.IdleConnTimeout | ||
45 | } | ||
46 | return 0 | ||
47 | } | ||
48 | |||
49 | func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } | ||
50 | |||
51 | func traceGotConn(req *http.Request, cc *ClientConn) { | ||
52 | trace := httptrace.ContextClientTrace(req.Context()) | ||
53 | if trace == nil || trace.GotConn == nil { | ||
54 | return | ||
55 | } | ||
56 | ci := httptrace.GotConnInfo{Conn: cc.tconn} | ||
57 | cc.mu.Lock() | ||
58 | ci.Reused = cc.nextStreamID > 1 | ||
59 | ci.WasIdle = len(cc.streams) == 0 && ci.Reused | ||
60 | if ci.WasIdle && !cc.lastActive.IsZero() { | ||
61 | ci.IdleTime = time.Now().Sub(cc.lastActive) | ||
62 | } | ||
63 | cc.mu.Unlock() | ||
64 | |||
65 | trace.GotConn(ci) | ||
66 | } | ||
67 | |||
68 | func traceWroteHeaders(trace *clientTrace) { | ||
69 | if trace != nil && trace.WroteHeaders != nil { | ||
70 | trace.WroteHeaders() | ||
71 | } | ||
72 | } | ||
73 | |||
74 | func traceGot100Continue(trace *clientTrace) { | ||
75 | if trace != nil && trace.Got100Continue != nil { | ||
76 | trace.Got100Continue() | ||
77 | } | ||
78 | } | ||
79 | |||
80 | func traceWait100Continue(trace *clientTrace) { | ||
81 | if trace != nil && trace.Wait100Continue != nil { | ||
82 | trace.Wait100Continue() | ||
83 | } | ||
84 | } | ||
85 | |||
86 | func traceWroteRequest(trace *clientTrace, err error) { | ||
87 | if trace != nil && trace.WroteRequest != nil { | ||
88 | trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) | ||
89 | } | ||
90 | } | ||
91 | |||
92 | func traceFirstResponseByte(trace *clientTrace) { | ||
93 | if trace != nil && trace.GotFirstResponseByte != nil { | ||
94 | trace.GotFirstResponseByte() | ||
95 | } | ||
96 | } | ||
97 | |||
98 | func requestTrace(req *http.Request) *clientTrace { | ||
99 | trace := httptrace.ContextClientTrace(req.Context()) | ||
100 | return (*clientTrace)(trace) | ||
101 | } | ||
102 | |||
103 | // Ping sends a PING frame to the server and waits for the ack. | ||
104 | func (cc *ClientConn) Ping(ctx context.Context) error { | ||
105 | return cc.ping(ctx) | ||
106 | } | ||
diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go new file mode 100644 index 0000000..b4c52ec --- /dev/null +++ b/vendor/golang.org/x/net/http2/go17_not18.go | |||
@@ -0,0 +1,36 @@ | |||
1 | // Copyright 2016 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // +build go1.7,!go1.8 | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import "crypto/tls" | ||
10 | |||
11 | // temporary copy of Go 1.7's private tls.Config.clone: | ||
12 | func cloneTLSConfig(c *tls.Config) *tls.Config { | ||
13 | return &tls.Config{ | ||
14 | Rand: c.Rand, | ||
15 | Time: c.Time, | ||
16 | Certificates: c.Certificates, | ||
17 | NameToCertificate: c.NameToCertificate, | ||
18 | GetCertificate: c.GetCertificate, | ||
19 | RootCAs: c.RootCAs, | ||
20 | NextProtos: c.NextProtos, | ||
21 | ServerName: c.ServerName, | ||
22 | ClientAuth: c.ClientAuth, | ||
23 | ClientCAs: c.ClientCAs, | ||
24 | InsecureSkipVerify: c.InsecureSkipVerify, | ||
25 | CipherSuites: c.CipherSuites, | ||
26 | PreferServerCipherSuites: c.PreferServerCipherSuites, | ||
27 | SessionTicketsDisabled: c.SessionTicketsDisabled, | ||
28 | SessionTicketKey: c.SessionTicketKey, | ||
29 | ClientSessionCache: c.ClientSessionCache, | ||
30 | MinVersion: c.MinVersion, | ||
31 | MaxVersion: c.MaxVersion, | ||
32 | CurvePreferences: c.CurvePreferences, | ||
33 | DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, | ||
34 | Renegotiation: c.Renegotiation, | ||
35 | } | ||
36 | } | ||
diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go new file mode 100644 index 0000000..4f30d22 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go18.go | |||
@@ -0,0 +1,56 @@ | |||
1 | // Copyright 2015 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // +build go1.8 | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import ( | ||
10 | "crypto/tls" | ||
11 | "io" | ||
12 | "net/http" | ||
13 | ) | ||
14 | |||
15 | func cloneTLSConfig(c *tls.Config) *tls.Config { | ||
16 | c2 := c.Clone() | ||
17 | c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 | ||
18 | return c2 | ||
19 | } | ||
20 | |||
21 | var _ http.Pusher = (*responseWriter)(nil) | ||
22 | |||
23 | // Push implements http.Pusher. | ||
24 | func (w *responseWriter) Push(target string, opts *http.PushOptions) error { | ||
25 | internalOpts := pushOptions{} | ||
26 | if opts != nil { | ||
27 | internalOpts.Method = opts.Method | ||
28 | internalOpts.Header = opts.Header | ||
29 | } | ||
30 | return w.push(target, internalOpts) | ||
31 | } | ||
32 | |||
33 | func configureServer18(h1 *http.Server, h2 *Server) error { | ||
34 | if h2.IdleTimeout == 0 { | ||
35 | if h1.IdleTimeout != 0 { | ||
36 | h2.IdleTimeout = h1.IdleTimeout | ||
37 | } else { | ||
38 | h2.IdleTimeout = h1.ReadTimeout | ||
39 | } | ||
40 | } | ||
41 | return nil | ||
42 | } | ||
43 | |||
44 | func shouldLogPanic(panicValue interface{}) bool { | ||
45 | return panicValue != nil && panicValue != http.ErrAbortHandler | ||
46 | } | ||
47 | |||
48 | func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { | ||
49 | return req.GetBody | ||
50 | } | ||
51 | |||
52 | func reqBodyIsNoBody(body io.ReadCloser) bool { | ||
53 | return body == http.NoBody | ||
54 | } | ||
55 | |||
56 | func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only | ||
diff --git a/vendor/golang.org/x/net/http2/go19.go b/vendor/golang.org/x/net/http2/go19.go new file mode 100644 index 0000000..38124ba --- /dev/null +++ b/vendor/golang.org/x/net/http2/go19.go | |||
@@ -0,0 +1,16 @@ | |||
1 | // Copyright 2015 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // +build go1.9 | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import ( | ||
10 | "net/http" | ||
11 | ) | ||
12 | |||
13 | func configureServer19(s *http.Server, conf *Server) error { | ||
14 | s.RegisterOnShutdown(conf.state.startGracefulShutdown) | ||
15 | return nil | ||
16 | } | ||
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go new file mode 100644 index 0000000..9933c9f --- /dev/null +++ b/vendor/golang.org/x/net/http2/gotrack.go | |||
@@ -0,0 +1,170 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // Defensive debug-only utility to track that functions run on the | ||
6 | // goroutine that they're supposed to. | ||
7 | |||
8 | package http2 | ||
9 | |||
10 | import ( | ||
11 | "bytes" | ||
12 | "errors" | ||
13 | "fmt" | ||
14 | "os" | ||
15 | "runtime" | ||
16 | "strconv" | ||
17 | "sync" | ||
18 | ) | ||
19 | |||
20 | var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" | ||
21 | |||
22 | type goroutineLock uint64 | ||
23 | |||
24 | func newGoroutineLock() goroutineLock { | ||
25 | if !DebugGoroutines { | ||
26 | return 0 | ||
27 | } | ||
28 | return goroutineLock(curGoroutineID()) | ||
29 | } | ||
30 | |||
31 | func (g goroutineLock) check() { | ||
32 | if !DebugGoroutines { | ||
33 | return | ||
34 | } | ||
35 | if curGoroutineID() != uint64(g) { | ||
36 | panic("running on the wrong goroutine") | ||
37 | } | ||
38 | } | ||
39 | |||
40 | func (g goroutineLock) checkNotOn() { | ||
41 | if !DebugGoroutines { | ||
42 | return | ||
43 | } | ||
44 | if curGoroutineID() == uint64(g) { | ||
45 | panic("running on the wrong goroutine") | ||
46 | } | ||
47 | } | ||
48 | |||
49 | var goroutineSpace = []byte("goroutine ") | ||
50 | |||
51 | func curGoroutineID() uint64 { | ||
52 | bp := littleBuf.Get().(*[]byte) | ||
53 | defer littleBuf.Put(bp) | ||
54 | b := *bp | ||
55 | b = b[:runtime.Stack(b, false)] | ||
56 | // Parse the 4707 out of "goroutine 4707 [" | ||
57 | b = bytes.TrimPrefix(b, goroutineSpace) | ||
58 | i := bytes.IndexByte(b, ' ') | ||
59 | if i < 0 { | ||
60 | panic(fmt.Sprintf("No space found in %q", b)) | ||
61 | } | ||
62 | b = b[:i] | ||
63 | n, err := parseUintBytes(b, 10, 64) | ||
64 | if err != nil { | ||
65 | panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) | ||
66 | } | ||
67 | return n | ||
68 | } | ||
69 | |||
70 | var littleBuf = sync.Pool{ | ||
71 | New: func() interface{} { | ||
72 | buf := make([]byte, 64) | ||
73 | return &buf | ||
74 | }, | ||
75 | } | ||
76 | |||
77 | // parseUintBytes is like strconv.ParseUint, but using a []byte. | ||
78 | func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { | ||
79 | var cutoff, maxVal uint64 | ||
80 | |||
81 | if bitSize == 0 { | ||
82 | bitSize = int(strconv.IntSize) | ||
83 | } | ||
84 | |||
85 | s0 := s | ||
86 | switch { | ||
87 | case len(s) < 1: | ||
88 | err = strconv.ErrSyntax | ||
89 | goto Error | ||
90 | |||
91 | case 2 <= base && base <= 36: | ||
92 | // valid base; nothing to do | ||
93 | |||
94 | case base == 0: | ||
95 | // Look for octal, hex prefix. | ||
96 | switch { | ||
97 | case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): | ||
98 | base = 16 | ||
99 | s = s[2:] | ||
100 | if len(s) < 1 { | ||
101 | err = strconv.ErrSyntax | ||
102 | goto Error | ||
103 | } | ||
104 | case s[0] == '0': | ||
105 | base = 8 | ||
106 | default: | ||
107 | base = 10 | ||
108 | } | ||
109 | |||
110 | default: | ||
111 | err = errors.New("invalid base " + strconv.Itoa(base)) | ||
112 | goto Error | ||
113 | } | ||
114 | |||
115 | n = 0 | ||
116 | cutoff = cutoff64(base) | ||
117 | maxVal = 1<<uint(bitSize) - 1 | ||
118 | |||
119 | for i := 0; i < len(s); i++ { | ||
120 | var v byte | ||
121 | d := s[i] | ||
122 | switch { | ||
123 | case '0' <= d && d <= '9': | ||
124 | v = d - '0' | ||
125 | case 'a' <= d && d <= 'z': | ||
126 | v = d - 'a' + 10 | ||
127 | case 'A' <= d && d <= 'Z': | ||
128 | v = d - 'A' + 10 | ||
129 | default: | ||
130 | n = 0 | ||
131 | err = strconv.ErrSyntax | ||
132 | goto Error | ||
133 | } | ||
134 | if int(v) >= base { | ||
135 | n = 0 | ||
136 | err = strconv.ErrSyntax | ||
137 | goto Error | ||
138 | } | ||
139 | |||
140 | if n >= cutoff { | ||
141 | // n*base overflows | ||
142 | n = 1<<64 - 1 | ||
143 | err = strconv.ErrRange | ||
144 | goto Error | ||
145 | } | ||
146 | n *= uint64(base) | ||
147 | |||
148 | n1 := n + uint64(v) | ||
149 | if n1 < n || n1 > maxVal { | ||
150 | // n+v overflows | ||
151 | n = 1<<64 - 1 | ||
152 | err = strconv.ErrRange | ||
153 | goto Error | ||
154 | } | ||
155 | n = n1 | ||
156 | } | ||
157 | |||
158 | return n, nil | ||
159 | |||
160 | Error: | ||
161 | return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} | ||
162 | } | ||
163 | |||
164 | // Return the first number n such that n*base >= 1<<64. | ||
165 | func cutoff64(base int) uint64 { | ||
166 | if base < 2 { | ||
167 | return 0 | ||
168 | } | ||
169 | return (1<<64-1)/uint64(base) + 1 | ||
170 | } | ||
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 0000000..c2805f6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go | |||
@@ -0,0 +1,78 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package http2 | ||
6 | |||
7 | import ( | ||
8 | "net/http" | ||
9 | "strings" | ||
10 | ) | ||
11 | |||
12 | var ( | ||
13 | commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case | ||
14 | commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case | ||
15 | ) | ||
16 | |||
17 | func init() { | ||
18 | for _, v := range []string{ | ||
19 | "accept", | ||
20 | "accept-charset", | ||
21 | "accept-encoding", | ||
22 | "accept-language", | ||
23 | "accept-ranges", | ||
24 | "age", | ||
25 | "access-control-allow-origin", | ||
26 | "allow", | ||
27 | "authorization", | ||
28 | "cache-control", | ||
29 | "content-disposition", | ||
30 | "content-encoding", | ||
31 | "content-language", | ||
32 | "content-length", | ||
33 | "content-location", | ||
34 | "content-range", | ||
35 | "content-type", | ||
36 | "cookie", | ||
37 | "date", | ||
38 | "etag", | ||
39 | "expect", | ||
40 | "expires", | ||
41 | "from", | ||
42 | "host", | ||
43 | "if-match", | ||
44 | "if-modified-since", | ||
45 | "if-none-match", | ||
46 | "if-unmodified-since", | ||
47 | "last-modified", | ||
48 | "link", | ||
49 | "location", | ||
50 | "max-forwards", | ||
51 | "proxy-authenticate", | ||
52 | "proxy-authorization", | ||
53 | "range", | ||
54 | "referer", | ||
55 | "refresh", | ||
56 | "retry-after", | ||
57 | "server", | ||
58 | "set-cookie", | ||
59 | "strict-transport-security", | ||
60 | "trailer", | ||
61 | "transfer-encoding", | ||
62 | "user-agent", | ||
63 | "vary", | ||
64 | "via", | ||
65 | "www-authenticate", | ||
66 | } { | ||
67 | chk := http.CanonicalHeaderKey(v) | ||
68 | commonLowerHeader[chk] = v | ||
69 | commonCanonHeader[v] = chk | ||
70 | } | ||
71 | } | ||
72 | |||
73 | func lowerHeader(v string) string { | ||
74 | if s, ok := commonLowerHeader[v]; ok { | ||
75 | return s | ||
76 | } | ||
77 | return strings.ToLower(v) | ||
78 | } | ||
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 0000000..54726c2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go | |||
@@ -0,0 +1,240 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package hpack | ||
6 | |||
7 | import ( | ||
8 | "io" | ||
9 | ) | ||
10 | |||
11 | const ( | ||
12 | uint32Max = ^uint32(0) | ||
13 | initialHeaderTableSize = 4096 | ||
14 | ) | ||
15 | |||
16 | type Encoder struct { | ||
17 | dynTab dynamicTable | ||
18 | // minSize is the minimum table size set by | ||
19 | // SetMaxDynamicTableSize after the previous Header Table Size | ||
20 | // Update. | ||
21 | minSize uint32 | ||
22 | // maxSizeLimit is the maximum table size this encoder | ||
23 | // supports. This will protect the encoder from too large | ||
24 | // size. | ||
25 | maxSizeLimit uint32 | ||
26 | // tableSizeUpdate indicates whether "Header Table Size | ||
27 | // Update" is required. | ||
28 | tableSizeUpdate bool | ||
29 | w io.Writer | ||
30 | buf []byte | ||
31 | } | ||
32 | |||
33 | // NewEncoder returns a new Encoder which performs HPACK encoding. An | ||
34 | // encoded data is written to w. | ||
35 | func NewEncoder(w io.Writer) *Encoder { | ||
36 | e := &Encoder{ | ||
37 | minSize: uint32Max, | ||
38 | maxSizeLimit: initialHeaderTableSize, | ||
39 | tableSizeUpdate: false, | ||
40 | w: w, | ||
41 | } | ||
42 | e.dynTab.table.init() | ||
43 | e.dynTab.setMaxSize(initialHeaderTableSize) | ||
44 | return e | ||
45 | } | ||
46 | |||
47 | // WriteField encodes f into a single Write to e's underlying Writer. | ||
48 | // This function may also produce bytes for "Header Table Size Update" | ||
49 | // if necessary. If produced, it is done before encoding f. | ||
50 | func (e *Encoder) WriteField(f HeaderField) error { | ||
51 | e.buf = e.buf[:0] | ||
52 | |||
53 | if e.tableSizeUpdate { | ||
54 | e.tableSizeUpdate = false | ||
55 | if e.minSize < e.dynTab.maxSize { | ||
56 | e.buf = appendTableSize(e.buf, e.minSize) | ||
57 | } | ||
58 | e.minSize = uint32Max | ||
59 | e.buf = appendTableSize(e.buf, e.dynTab.maxSize) | ||
60 | } | ||
61 | |||
62 | idx, nameValueMatch := e.searchTable(f) | ||
63 | if nameValueMatch { | ||
64 | e.buf = appendIndexed(e.buf, idx) | ||
65 | } else { | ||
66 | indexing := e.shouldIndex(f) | ||
67 | if indexing { | ||
68 | e.dynTab.add(f) | ||
69 | } | ||
70 | |||
71 | if idx == 0 { | ||
72 | e.buf = appendNewName(e.buf, f, indexing) | ||
73 | } else { | ||
74 | e.buf = appendIndexedName(e.buf, f, idx, indexing) | ||
75 | } | ||
76 | } | ||
77 | n, err := e.w.Write(e.buf) | ||
78 | if err == nil && n != len(e.buf) { | ||
79 | err = io.ErrShortWrite | ||
80 | } | ||
81 | return err | ||
82 | } | ||
83 | |||
84 | // searchTable searches f in both stable and dynamic header tables. | ||
85 | // The static header table is searched first. Only when there is no | ||
86 | // exact match for both name and value, the dynamic header table is | ||
87 | // then searched. If there is no match, i is 0. If both name and value | ||
88 | // match, i is the matched index and nameValueMatch becomes true. If | ||
89 | // only name matches, i points to that index and nameValueMatch | ||
90 | // becomes false. | ||
91 | func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { | ||
92 | i, nameValueMatch = staticTable.search(f) | ||
93 | if nameValueMatch { | ||
94 | return i, true | ||
95 | } | ||
96 | |||
97 | j, nameValueMatch := e.dynTab.table.search(f) | ||
98 | if nameValueMatch || (i == 0 && j != 0) { | ||
99 | return j + uint64(staticTable.len()), nameValueMatch | ||
100 | } | ||
101 | |||
102 | return i, false | ||
103 | } | ||
104 | |||
105 | // SetMaxDynamicTableSize changes the dynamic header table size to v. | ||
106 | // The actual size is bounded by the value passed to | ||
107 | // SetMaxDynamicTableSizeLimit. | ||
108 | func (e *Encoder) SetMaxDynamicTableSize(v uint32) { | ||
109 | if v > e.maxSizeLimit { | ||
110 | v = e.maxSizeLimit | ||
111 | } | ||
112 | if v < e.minSize { | ||
113 | e.minSize = v | ||
114 | } | ||
115 | e.tableSizeUpdate = true | ||
116 | e.dynTab.setMaxSize(v) | ||
117 | } | ||
118 | |||
119 | // SetMaxDynamicTableSizeLimit changes the maximum value that can be | ||
120 | // specified in SetMaxDynamicTableSize to v. By default, it is set to | ||
121 | // 4096, which is the same size of the default dynamic header table | ||
122 | // size described in HPACK specification. If the current maximum | ||
123 | // dynamic header table size is strictly greater than v, "Header Table | ||
124 | // Size Update" will be done in the next WriteField call and the | ||
125 | // maximum dynamic header table size is truncated to v. | ||
126 | func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { | ||
127 | e.maxSizeLimit = v | ||
128 | if e.dynTab.maxSize > v { | ||
129 | e.tableSizeUpdate = true | ||
130 | e.dynTab.setMaxSize(v) | ||
131 | } | ||
132 | } | ||
133 | |||
134 | // shouldIndex reports whether f should be indexed. | ||
135 | func (e *Encoder) shouldIndex(f HeaderField) bool { | ||
136 | return !f.Sensitive && f.Size() <= e.dynTab.maxSize | ||
137 | } | ||
138 | |||
139 | // appendIndexed appends index i, as encoded in "Indexed Header Field" | ||
140 | // representation, to dst and returns the extended buffer. | ||
141 | func appendIndexed(dst []byte, i uint64) []byte { | ||
142 | first := len(dst) | ||
143 | dst = appendVarInt(dst, 7, i) | ||
144 | dst[first] |= 0x80 | ||
145 | return dst | ||
146 | } | ||
147 | |||
148 | // appendNewName appends f, as encoded in one of "Literal Header field | ||
149 | // - New Name" representation variants, to dst and returns the | ||
150 | // extended buffer. | ||
151 | // | ||
152 | // If f.Sensitive is true, "Never Indexed" representation is used. If | ||
153 | // f.Sensitive is false and indexing is true, "Inremental Indexing" | ||
154 | // representation is used. | ||
155 | func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { | ||
156 | dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) | ||
157 | dst = appendHpackString(dst, f.Name) | ||
158 | return appendHpackString(dst, f.Value) | ||
159 | } | ||
160 | |||
161 | // appendIndexedName appends f and index i referring indexed name | ||
162 | // entry, as encoded in one of "Literal Header field - Indexed Name" | ||
163 | // representation variants, to dst and returns the extended buffer. | ||
164 | // | ||
165 | // If f.Sensitive is true, "Never Indexed" representation is used. If | ||
166 | // f.Sensitive is false and indexing is true, "Incremental Indexing" | ||
167 | // representation is used. | ||
168 | func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { | ||
169 | first := len(dst) | ||
170 | var n byte | ||
171 | if indexing { | ||
172 | n = 6 | ||
173 | } else { | ||
174 | n = 4 | ||
175 | } | ||
176 | dst = appendVarInt(dst, n, i) | ||
177 | dst[first] |= encodeTypeByte(indexing, f.Sensitive) | ||
178 | return appendHpackString(dst, f.Value) | ||
179 | } | ||
180 | |||
181 | // appendTableSize appends v, as encoded in "Header Table Size Update" | ||
182 | // representation, to dst and returns the extended buffer. | ||
183 | func appendTableSize(dst []byte, v uint32) []byte { | ||
184 | first := len(dst) | ||
185 | dst = appendVarInt(dst, 5, uint64(v)) | ||
186 | dst[first] |= 0x20 | ||
187 | return dst | ||
188 | } | ||
189 | |||
190 | // appendVarInt appends i, as encoded in variable integer form using n | ||
191 | // bit prefix, to dst and returns the extended buffer. | ||
192 | // | ||
193 | // See | ||
194 | // http://http2.github.io/http2-spec/compression.html#integer.representation | ||
195 | func appendVarInt(dst []byte, n byte, i uint64) []byte { | ||
196 | k := uint64((1 << n) - 1) | ||
197 | if i < k { | ||
198 | return append(dst, byte(i)) | ||
199 | } | ||
200 | dst = append(dst, byte(k)) | ||
201 | i -= k | ||
202 | for ; i >= 128; i >>= 7 { | ||
203 | dst = append(dst, byte(0x80|(i&0x7f))) | ||
204 | } | ||
205 | return append(dst, byte(i)) | ||
206 | } | ||
207 | |||
208 | // appendHpackString appends s, as encoded in "String Literal" | ||
209 | // representation, to dst and returns the the extended buffer. | ||
210 | // | ||
211 | // s will be encoded in Huffman codes only when it produces strictly | ||
212 | // shorter byte string. | ||
213 | func appendHpackString(dst []byte, s string) []byte { | ||
214 | huffmanLength := HuffmanEncodeLength(s) | ||
215 | if huffmanLength < uint64(len(s)) { | ||
216 | first := len(dst) | ||
217 | dst = appendVarInt(dst, 7, huffmanLength) | ||
218 | dst = AppendHuffmanString(dst, s) | ||
219 | dst[first] |= 0x80 | ||
220 | } else { | ||
221 | dst = appendVarInt(dst, 7, uint64(len(s))) | ||
222 | dst = append(dst, s...) | ||
223 | } | ||
224 | return dst | ||
225 | } | ||
226 | |||
227 | // encodeTypeByte returns type byte. If sensitive is true, type byte | ||
228 | // for "Never Indexed" representation is returned. If sensitive is | ||
229 | // false and indexing is true, type byte for "Incremental Indexing" | ||
230 | // representation is returned. Otherwise, type byte for "Without | ||
231 | // Indexing" is returned. | ||
232 | func encodeTypeByte(indexing, sensitive bool) byte { | ||
233 | if sensitive { | ||
234 | return 0x10 | ||
235 | } | ||
236 | if indexing { | ||
237 | return 0x40 | ||
238 | } | ||
239 | return 0 | ||
240 | } | ||
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 0000000..176644a --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go | |||
@@ -0,0 +1,490 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // Package hpack implements HPACK, a compression format for | ||
6 | // efficiently representing HTTP header fields in the context of HTTP/2. | ||
7 | // | ||
8 | // See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 | ||
9 | package hpack | ||
10 | |||
11 | import ( | ||
12 | "bytes" | ||
13 | "errors" | ||
14 | "fmt" | ||
15 | ) | ||
16 | |||
17 | // A DecodingError is something the spec defines as a decoding error. | ||
18 | type DecodingError struct { | ||
19 | Err error | ||
20 | } | ||
21 | |||
22 | func (de DecodingError) Error() string { | ||
23 | return fmt.Sprintf("decoding error: %v", de.Err) | ||
24 | } | ||
25 | |||
26 | // An InvalidIndexError is returned when an encoder references a table | ||
27 | // entry before the static table or after the end of the dynamic table. | ||
28 | type InvalidIndexError int | ||
29 | |||
30 | func (e InvalidIndexError) Error() string { | ||
31 | return fmt.Sprintf("invalid indexed representation index %d", int(e)) | ||
32 | } | ||
33 | |||
34 | // A HeaderField is a name-value pair. Both the name and value are | ||
35 | // treated as opaque sequences of octets. | ||
36 | type HeaderField struct { | ||
37 | Name, Value string | ||
38 | |||
39 | // Sensitive means that this header field should never be | ||
40 | // indexed. | ||
41 | Sensitive bool | ||
42 | } | ||
43 | |||
44 | // IsPseudo reports whether the header field is an http2 pseudo header. | ||
45 | // That is, it reports whether it starts with a colon. | ||
46 | // It is not otherwise guaranteed to be a valid pseudo header field, | ||
47 | // though. | ||
48 | func (hf HeaderField) IsPseudo() bool { | ||
49 | return len(hf.Name) != 0 && hf.Name[0] == ':' | ||
50 | } | ||
51 | |||
52 | func (hf HeaderField) String() string { | ||
53 | var suffix string | ||
54 | if hf.Sensitive { | ||
55 | suffix = " (sensitive)" | ||
56 | } | ||
57 | return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) | ||
58 | } | ||
59 | |||
60 | // Size returns the size of an entry per RFC 7541 section 4.1. | ||
61 | func (hf HeaderField) Size() uint32 { | ||
62 | // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 | ||
63 | // "The size of the dynamic table is the sum of the size of | ||
64 | // its entries. The size of an entry is the sum of its name's | ||
65 | // length in octets (as defined in Section 5.2), its value's | ||
66 | // length in octets (see Section 5.2), plus 32. The size of | ||
67 | // an entry is calculated using the length of the name and | ||
68 | // value without any Huffman encoding applied." | ||
69 | |||
70 | // This can overflow if somebody makes a large HeaderField | ||
71 | // Name and/or Value by hand, but we don't care, because that | ||
72 | // won't happen on the wire because the encoding doesn't allow | ||
73 | // it. | ||
74 | return uint32(len(hf.Name) + len(hf.Value) + 32) | ||
75 | } | ||
76 | |||
77 | // A Decoder is the decoding context for incremental processing of | ||
78 | // header blocks. | ||
79 | type Decoder struct { | ||
80 | dynTab dynamicTable | ||
81 | emit func(f HeaderField) | ||
82 | |||
83 | emitEnabled bool // whether calls to emit are enabled | ||
84 | maxStrLen int // 0 means unlimited | ||
85 | |||
86 | // buf is the unparsed buffer. It's only written to | ||
87 | // saveBuf if it was truncated in the middle of a header | ||
88 | // block. Because it's usually not owned, we can only | ||
89 | // process it under Write. | ||
90 | buf []byte // not owned; only valid during Write | ||
91 | |||
92 | // saveBuf is previous data passed to Write which we weren't able | ||
93 | // to fully parse before. Unlike buf, we own this data. | ||
94 | saveBuf bytes.Buffer | ||
95 | } | ||
96 | |||
97 | // NewDecoder returns a new decoder with the provided maximum dynamic | ||
98 | // table size. The emitFunc will be called for each valid field | ||
99 | // parsed, in the same goroutine as calls to Write, before Write returns. | ||
100 | func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { | ||
101 | d := &Decoder{ | ||
102 | emit: emitFunc, | ||
103 | emitEnabled: true, | ||
104 | } | ||
105 | d.dynTab.table.init() | ||
106 | d.dynTab.allowedMaxSize = maxDynamicTableSize | ||
107 | d.dynTab.setMaxSize(maxDynamicTableSize) | ||
108 | return d | ||
109 | } | ||
110 | |||
111 | // ErrStringLength is returned by Decoder.Write when the max string length | ||
112 | // (as configured by Decoder.SetMaxStringLength) would be violated. | ||
113 | var ErrStringLength = errors.New("hpack: string too long") | ||
114 | |||
115 | // SetMaxStringLength sets the maximum size of a HeaderField name or | ||
116 | // value string. If a string exceeds this length (even after any | ||
117 | // decompression), Write will return ErrStringLength. | ||
118 | // A value of 0 means unlimited and is the default from NewDecoder. | ||
119 | func (d *Decoder) SetMaxStringLength(n int) { | ||
120 | d.maxStrLen = n | ||
121 | } | ||
122 | |||
123 | // SetEmitFunc changes the callback used when new header fields | ||
124 | // are decoded. | ||
125 | // It must be non-nil. It does not affect EmitEnabled. | ||
126 | func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { | ||
127 | d.emit = emitFunc | ||
128 | } | ||
129 | |||
130 | // SetEmitEnabled controls whether the emitFunc provided to NewDecoder | ||
131 | // should be called. The default is true. | ||
132 | // | ||
133 | // This facility exists to let servers enforce MAX_HEADER_LIST_SIZE | ||
134 | // while still decoding and keeping in-sync with decoder state, but | ||
135 | // without doing unnecessary decompression or generating unnecessary | ||
136 | // garbage for header fields past the limit. | ||
137 | func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } | ||
138 | |||
139 | // EmitEnabled reports whether calls to the emitFunc provided to NewDecoder | ||
140 | // are currently enabled. The default is true. | ||
141 | func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } | ||
142 | |||
143 | // TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their | ||
144 | // underlying buffers for garbage reasons. | ||
145 | |||
146 | func (d *Decoder) SetMaxDynamicTableSize(v uint32) { | ||
147 | d.dynTab.setMaxSize(v) | ||
148 | } | ||
149 | |||
150 | // SetAllowedMaxDynamicTableSize sets the upper bound that the encoded | ||
151 | // stream (via dynamic table size updates) may set the maximum size | ||
152 | // to. | ||
153 | func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { | ||
154 | d.dynTab.allowedMaxSize = v | ||
155 | } | ||
156 | |||
157 | type dynamicTable struct { | ||
158 | // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 | ||
159 | table headerFieldTable | ||
160 | size uint32 // in bytes | ||
161 | maxSize uint32 // current maxSize | ||
162 | allowedMaxSize uint32 // maxSize may go up to this, inclusive | ||
163 | } | ||
164 | |||
165 | func (dt *dynamicTable) setMaxSize(v uint32) { | ||
166 | dt.maxSize = v | ||
167 | dt.evict() | ||
168 | } | ||
169 | |||
170 | func (dt *dynamicTable) add(f HeaderField) { | ||
171 | dt.table.addEntry(f) | ||
172 | dt.size += f.Size() | ||
173 | dt.evict() | ||
174 | } | ||
175 | |||
176 | // If we're too big, evict old stuff. | ||
177 | func (dt *dynamicTable) evict() { | ||
178 | var n int | ||
179 | for dt.size > dt.maxSize && n < dt.table.len() { | ||
180 | dt.size -= dt.table.ents[n].Size() | ||
181 | n++ | ||
182 | } | ||
183 | dt.table.evictOldest(n) | ||
184 | } | ||
185 | |||
186 | func (d *Decoder) maxTableIndex() int { | ||
187 | // This should never overflow. RFC 7540 Section 6.5.2 limits the size of | ||
188 | // the dynamic table to 2^32 bytes, where each entry will occupy more than | ||
189 | // one byte. Further, the staticTable has a fixed, small length. | ||
190 | return d.dynTab.table.len() + staticTable.len() | ||
191 | } | ||
192 | |||
193 | func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { | ||
194 | // See Section 2.3.3. | ||
195 | if i == 0 { | ||
196 | return | ||
197 | } | ||
198 | if i <= uint64(staticTable.len()) { | ||
199 | return staticTable.ents[i-1], true | ||
200 | } | ||
201 | if i > uint64(d.maxTableIndex()) { | ||
202 | return | ||
203 | } | ||
204 | // In the dynamic table, newer entries have lower indices. | ||
205 | // However, dt.ents[0] is the oldest entry. Hence, dt.ents is | ||
206 | // the reversed dynamic table. | ||
207 | dt := d.dynTab.table | ||
208 | return dt.ents[dt.len()-(int(i)-staticTable.len())], true | ||
209 | } | ||
210 | |||
211 | // Decode decodes an entire block. | ||
212 | // | ||
213 | // TODO: remove this method and make it incremental later? This is | ||
214 | // easier for debugging now. | ||
215 | func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { | ||
216 | var hf []HeaderField | ||
217 | saveFunc := d.emit | ||
218 | defer func() { d.emit = saveFunc }() | ||
219 | d.emit = func(f HeaderField) { hf = append(hf, f) } | ||
220 | if _, err := d.Write(p); err != nil { | ||
221 | return nil, err | ||
222 | } | ||
223 | if err := d.Close(); err != nil { | ||
224 | return nil, err | ||
225 | } | ||
226 | return hf, nil | ||
227 | } | ||
228 | |||
229 | func (d *Decoder) Close() error { | ||
230 | if d.saveBuf.Len() > 0 { | ||
231 | d.saveBuf.Reset() | ||
232 | return DecodingError{errors.New("truncated headers")} | ||
233 | } | ||
234 | return nil | ||
235 | } | ||
236 | |||
237 | func (d *Decoder) Write(p []byte) (n int, err error) { | ||
238 | if len(p) == 0 { | ||
239 | // Prevent state machine CPU attacks (making us redo | ||
240 | // work up to the point of finding out we don't have | ||
241 | // enough data) | ||
242 | return | ||
243 | } | ||
244 | // Only copy the data if we have to. Optimistically assume | ||
245 | // that p will contain a complete header block. | ||
246 | if d.saveBuf.Len() == 0 { | ||
247 | d.buf = p | ||
248 | } else { | ||
249 | d.saveBuf.Write(p) | ||
250 | d.buf = d.saveBuf.Bytes() | ||
251 | d.saveBuf.Reset() | ||
252 | } | ||
253 | |||
254 | for len(d.buf) > 0 { | ||
255 | err = d.parseHeaderFieldRepr() | ||
256 | if err == errNeedMore { | ||
257 | // Extra paranoia, making sure saveBuf won't | ||
258 | // get too large. All the varint and string | ||
259 | // reading code earlier should already catch | ||
260 | // overlong things and return ErrStringLength, | ||
261 | // but keep this as a last resort. | ||
262 | const varIntOverhead = 8 // conservative | ||
263 | if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { | ||
264 | return 0, ErrStringLength | ||
265 | } | ||
266 | d.saveBuf.Write(d.buf) | ||
267 | return len(p), nil | ||
268 | } | ||
269 | if err != nil { | ||
270 | break | ||
271 | } | ||
272 | } | ||
273 | return len(p), err | ||
274 | } | ||
275 | |||
276 | // errNeedMore is an internal sentinel error value that means the | ||
277 | // buffer is truncated and we need to read more data before we can | ||
278 | // continue parsing. | ||
279 | var errNeedMore = errors.New("need more data") | ||
280 | |||
281 | type indexType int | ||
282 | |||
283 | const ( | ||
284 | indexedTrue indexType = iota | ||
285 | indexedFalse | ||
286 | indexedNever | ||
287 | ) | ||
288 | |||
289 | func (v indexType) indexed() bool { return v == indexedTrue } | ||
290 | func (v indexType) sensitive() bool { return v == indexedNever } | ||
291 | |||
292 | // returns errNeedMore if there isn't enough data available. | ||
293 | // any other error is fatal. | ||
294 | // consumes d.buf iff it returns nil. | ||
295 | // precondition: must be called with len(d.buf) > 0 | ||
296 | func (d *Decoder) parseHeaderFieldRepr() error { | ||
297 | b := d.buf[0] | ||
298 | switch { | ||
299 | case b&128 != 0: | ||
300 | // Indexed representation. | ||
301 | // High bit set? | ||
302 | // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 | ||
303 | return d.parseFieldIndexed() | ||
304 | case b&192 == 64: | ||
305 | // 6.2.1 Literal Header Field with Incremental Indexing | ||
306 | // 0b10xxxxxx: top two bits are 10 | ||
307 | // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 | ||
308 | return d.parseFieldLiteral(6, indexedTrue) | ||
309 | case b&240 == 0: | ||
310 | // 6.2.2 Literal Header Field without Indexing | ||
311 | // 0b0000xxxx: top four bits are 0000 | ||
312 | // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 | ||
313 | return d.parseFieldLiteral(4, indexedFalse) | ||
314 | case b&240 == 16: | ||
315 | // 6.2.3 Literal Header Field never Indexed | ||
316 | // 0b0001xxxx: top four bits are 0001 | ||
317 | // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 | ||
318 | return d.parseFieldLiteral(4, indexedNever) | ||
319 | case b&224 == 32: | ||
320 | // 6.3 Dynamic Table Size Update | ||
321 | // Top three bits are '001'. | ||
322 | // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 | ||
323 | return d.parseDynamicTableSizeUpdate() | ||
324 | } | ||
325 | |||
326 | return DecodingError{errors.New("invalid encoding")} | ||
327 | } | ||
328 | |||
329 | // (same invariants and behavior as parseHeaderFieldRepr) | ||
330 | func (d *Decoder) parseFieldIndexed() error { | ||
331 | buf := d.buf | ||
332 | idx, buf, err := readVarInt(7, buf) | ||
333 | if err != nil { | ||
334 | return err | ||
335 | } | ||
336 | hf, ok := d.at(idx) | ||
337 | if !ok { | ||
338 | return DecodingError{InvalidIndexError(idx)} | ||
339 | } | ||
340 | d.buf = buf | ||
341 | return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) | ||
342 | } | ||
343 | |||
344 | // (same invariants and behavior as parseHeaderFieldRepr) | ||
345 | func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { | ||
346 | buf := d.buf | ||
347 | nameIdx, buf, err := readVarInt(n, buf) | ||
348 | if err != nil { | ||
349 | return err | ||
350 | } | ||
351 | |||
352 | var hf HeaderField | ||
353 | wantStr := d.emitEnabled || it.indexed() | ||
354 | if nameIdx > 0 { | ||
355 | ihf, ok := d.at(nameIdx) | ||
356 | if !ok { | ||
357 | return DecodingError{InvalidIndexError(nameIdx)} | ||
358 | } | ||
359 | hf.Name = ihf.Name | ||
360 | } else { | ||
361 | hf.Name, buf, err = d.readString(buf, wantStr) | ||
362 | if err != nil { | ||
363 | return err | ||
364 | } | ||
365 | } | ||
366 | hf.Value, buf, err = d.readString(buf, wantStr) | ||
367 | if err != nil { | ||
368 | return err | ||
369 | } | ||
370 | d.buf = buf | ||
371 | if it.indexed() { | ||
372 | d.dynTab.add(hf) | ||
373 | } | ||
374 | hf.Sensitive = it.sensitive() | ||
375 | return d.callEmit(hf) | ||
376 | } | ||
377 | |||
378 | func (d *Decoder) callEmit(hf HeaderField) error { | ||
379 | if d.maxStrLen != 0 { | ||
380 | if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { | ||
381 | return ErrStringLength | ||
382 | } | ||
383 | } | ||
384 | if d.emitEnabled { | ||
385 | d.emit(hf) | ||
386 | } | ||
387 | return nil | ||
388 | } | ||
389 | |||
390 | // (same invariants and behavior as parseHeaderFieldRepr) | ||
391 | func (d *Decoder) parseDynamicTableSizeUpdate() error { | ||
392 | buf := d.buf | ||
393 | size, buf, err := readVarInt(5, buf) | ||
394 | if err != nil { | ||
395 | return err | ||
396 | } | ||
397 | if size > uint64(d.dynTab.allowedMaxSize) { | ||
398 | return DecodingError{errors.New("dynamic table size update too large")} | ||
399 | } | ||
400 | d.dynTab.setMaxSize(uint32(size)) | ||
401 | d.buf = buf | ||
402 | return nil | ||
403 | } | ||
404 | |||
405 | var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} | ||
406 | |||
407 | // readVarInt reads an unsigned variable length integer off the | ||
408 | // beginning of p. n is the parameter as described in | ||
409 | // http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. | ||
410 | // | ||
411 | // n must always be between 1 and 8. | ||
412 | // | ||
413 | // The returned remain buffer is either a smaller suffix of p, or err != nil. | ||
414 | // The error is errNeedMore if p doesn't contain a complete integer. | ||
415 | func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { | ||
416 | if n < 1 || n > 8 { | ||
417 | panic("bad n") | ||
418 | } | ||
419 | if len(p) == 0 { | ||
420 | return 0, p, errNeedMore | ||
421 | } | ||
422 | i = uint64(p[0]) | ||
423 | if n < 8 { | ||
424 | i &= (1 << uint64(n)) - 1 | ||
425 | } | ||
426 | if i < (1<<uint64(n))-1 { | ||
427 | return i, p[1:], nil | ||
428 | } | ||
429 | |||
430 | origP := p | ||
431 | p = p[1:] | ||
432 | var m uint64 | ||
433 | for len(p) > 0 { | ||
434 | b := p[0] | ||
435 | p = p[1:] | ||
436 | i += uint64(b&127) << m | ||
437 | if b&128 == 0 { | ||
438 | return i, p, nil | ||
439 | } | ||
440 | m += 7 | ||
441 | if m >= 63 { // TODO: proper overflow check. making this up. | ||
442 | return 0, origP, errVarintOverflow | ||
443 | } | ||
444 | } | ||
445 | return 0, origP, errNeedMore | ||
446 | } | ||
447 | |||
448 | // readString decodes an hpack string from p. | ||
449 | // | ||
450 | // wantStr is whether s will be used. If false, decompression and | ||
451 | // []byte->string garbage are skipped if s will be ignored | ||
452 | // anyway. This does mean that huffman decoding errors for non-indexed | ||
453 | // strings past the MAX_HEADER_LIST_SIZE are ignored, but the server | ||
454 | // is returning an error anyway, and because they're not indexed, the error | ||
455 | // won't affect the decoding state. | ||
456 | func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { | ||
457 | if len(p) == 0 { | ||
458 | return "", p, errNeedMore | ||
459 | } | ||
460 | isHuff := p[0]&128 != 0 | ||
461 | strLen, p, err := readVarInt(7, p) | ||
462 | if err != nil { | ||
463 | return "", p, err | ||
464 | } | ||
465 | if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { | ||
466 | return "", nil, ErrStringLength | ||
467 | } | ||
468 | if uint64(len(p)) < strLen { | ||
469 | return "", p, errNeedMore | ||
470 | } | ||
471 | if !isHuff { | ||
472 | if wantStr { | ||
473 | s = string(p[:strLen]) | ||
474 | } | ||
475 | return s, p[strLen:], nil | ||
476 | } | ||
477 | |||
478 | if wantStr { | ||
479 | buf := bufPool.Get().(*bytes.Buffer) | ||
480 | buf.Reset() // don't trust others | ||
481 | defer bufPool.Put(buf) | ||
482 | if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { | ||
483 | buf.Reset() | ||
484 | return "", nil, err | ||
485 | } | ||
486 | s = buf.String() | ||
487 | buf.Reset() // be nice to GC | ||
488 | } | ||
489 | return s, p[strLen:], nil | ||
490 | } | ||
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 0000000..8850e39 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go | |||
@@ -0,0 +1,212 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package hpack | ||
6 | |||
7 | import ( | ||
8 | "bytes" | ||
9 | "errors" | ||
10 | "io" | ||
11 | "sync" | ||
12 | ) | ||
13 | |||
14 | var bufPool = sync.Pool{ | ||
15 | New: func() interface{} { return new(bytes.Buffer) }, | ||
16 | } | ||
17 | |||
18 | // HuffmanDecode decodes the string in v and writes the expanded | ||
19 | // result to w, returning the number of bytes written to w and the | ||
20 | // Write call's return value. At most one Write call is made. | ||
21 | func HuffmanDecode(w io.Writer, v []byte) (int, error) { | ||
22 | buf := bufPool.Get().(*bytes.Buffer) | ||
23 | buf.Reset() | ||
24 | defer bufPool.Put(buf) | ||
25 | if err := huffmanDecode(buf, 0, v); err != nil { | ||
26 | return 0, err | ||
27 | } | ||
28 | return w.Write(buf.Bytes()) | ||
29 | } | ||
30 | |||
31 | // HuffmanDecodeToString decodes the string in v. | ||
32 | func HuffmanDecodeToString(v []byte) (string, error) { | ||
33 | buf := bufPool.Get().(*bytes.Buffer) | ||
34 | buf.Reset() | ||
35 | defer bufPool.Put(buf) | ||
36 | if err := huffmanDecode(buf, 0, v); err != nil { | ||
37 | return "", err | ||
38 | } | ||
39 | return buf.String(), nil | ||
40 | } | ||
41 | |||
42 | // ErrInvalidHuffman is returned for errors found decoding | ||
43 | // Huffman-encoded strings. | ||
44 | var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") | ||
45 | |||
46 | // huffmanDecode decodes v to buf. | ||
47 | // If maxLen is greater than 0, attempts to write more to buf than | ||
48 | // maxLen bytes will return ErrStringLength. | ||
49 | func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { | ||
50 | n := rootHuffmanNode | ||
51 | // cur is the bit buffer that has not been fed into n. | ||
52 | // cbits is the number of low order bits in cur that are valid. | ||
53 | // sbits is the number of bits of the symbol prefix being decoded. | ||
54 | cur, cbits, sbits := uint(0), uint8(0), uint8(0) | ||
55 | for _, b := range v { | ||
56 | cur = cur<<8 | uint(b) | ||
57 | cbits += 8 | ||
58 | sbits += 8 | ||
59 | for cbits >= 8 { | ||
60 | idx := byte(cur >> (cbits - 8)) | ||
61 | n = n.children[idx] | ||
62 | if n == nil { | ||
63 | return ErrInvalidHuffman | ||
64 | } | ||
65 | if n.children == nil { | ||
66 | if maxLen != 0 && buf.Len() == maxLen { | ||
67 | return ErrStringLength | ||
68 | } | ||
69 | buf.WriteByte(n.sym) | ||
70 | cbits -= n.codeLen | ||
71 | n = rootHuffmanNode | ||
72 | sbits = cbits | ||
73 | } else { | ||
74 | cbits -= 8 | ||
75 | } | ||
76 | } | ||
77 | } | ||
78 | for cbits > 0 { | ||
79 | n = n.children[byte(cur<<(8-cbits))] | ||
80 | if n == nil { | ||
81 | return ErrInvalidHuffman | ||
82 | } | ||
83 | if n.children != nil || n.codeLen > cbits { | ||
84 | break | ||
85 | } | ||
86 | if maxLen != 0 && buf.Len() == maxLen { | ||
87 | return ErrStringLength | ||
88 | } | ||
89 | buf.WriteByte(n.sym) | ||
90 | cbits -= n.codeLen | ||
91 | n = rootHuffmanNode | ||
92 | sbits = cbits | ||
93 | } | ||
94 | if sbits > 7 { | ||
95 | // Either there was an incomplete symbol, or overlong padding. | ||
96 | // Both are decoding errors per RFC 7541 section 5.2. | ||
97 | return ErrInvalidHuffman | ||
98 | } | ||
99 | if mask := uint(1<<cbits - 1); cur&mask != mask { | ||
100 | // Trailing bits must be a prefix of EOS per RFC 7541 section 5.2. | ||
101 | return ErrInvalidHuffman | ||
102 | } | ||
103 | |||
104 | return nil | ||
105 | } | ||
106 | |||
107 | type node struct { | ||
108 | // children is non-nil for internal nodes | ||
109 | children []*node | ||
110 | |||
111 | // The following are only valid if children is nil: | ||
112 | codeLen uint8 // number of bits that led to the output of sym | ||
113 | sym byte // output symbol | ||
114 | } | ||
115 | |||
116 | func newInternalNode() *node { | ||
117 | return &node{children: make([]*node, 256)} | ||
118 | } | ||
119 | |||
120 | var rootHuffmanNode = newInternalNode() | ||
121 | |||
122 | func init() { | ||
123 | if len(huffmanCodes) != 256 { | ||
124 | panic("unexpected size") | ||
125 | } | ||
126 | for i, code := range huffmanCodes { | ||
127 | addDecoderNode(byte(i), code, huffmanCodeLen[i]) | ||
128 | } | ||
129 | } | ||
130 | |||
131 | func addDecoderNode(sym byte, code uint32, codeLen uint8) { | ||
132 | cur := rootHuffmanNode | ||
133 | for codeLen > 8 { | ||
134 | codeLen -= 8 | ||
135 | i := uint8(code >> codeLen) | ||
136 | if cur.children[i] == nil { | ||
137 | cur.children[i] = newInternalNode() | ||
138 | } | ||
139 | cur = cur.children[i] | ||
140 | } | ||
141 | shift := 8 - codeLen | ||
142 | start, end := int(uint8(code<<shift)), int(1<<shift) | ||
143 | for i := start; i < start+end; i++ { | ||
144 | cur.children[i] = &node{sym: sym, codeLen: codeLen} | ||
145 | } | ||
146 | } | ||
147 | |||
148 | // AppendHuffmanString appends s, as encoded in Huffman codes, to dst | ||
149 | // and returns the extended buffer. | ||
150 | func AppendHuffmanString(dst []byte, s string) []byte { | ||
151 | rembits := uint8(8) | ||
152 | |||
153 | for i := 0; i < len(s); i++ { | ||
154 | if rembits == 8 { | ||
155 | dst = append(dst, 0) | ||
156 | } | ||
157 | dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i]) | ||
158 | } | ||
159 | |||
160 | if rembits < 8 { | ||
161 | // special EOS symbol | ||
162 | code := uint32(0x3fffffff) | ||
163 | nbits := uint8(30) | ||
164 | |||
165 | t := uint8(code >> (nbits - rembits)) | ||
166 | dst[len(dst)-1] |= t | ||
167 | } | ||
168 | |||
169 | return dst | ||
170 | } | ||
171 | |||
172 | // HuffmanEncodeLength returns the number of bytes required to encode | ||
173 | // s in Huffman codes. The result is round up to byte boundary. | ||
174 | func HuffmanEncodeLength(s string) uint64 { | ||
175 | n := uint64(0) | ||
176 | for i := 0; i < len(s); i++ { | ||
177 | n += uint64(huffmanCodeLen[s[i]]) | ||
178 | } | ||
179 | return (n + 7) / 8 | ||
180 | } | ||
181 | |||
182 | // appendByteToHuffmanCode appends Huffman code for c to dst and | ||
183 | // returns the extended buffer and the remaining bits in the last | ||
184 | // element. The appending is not byte aligned and the remaining bits | ||
185 | // in the last element of dst is given in rembits. | ||
186 | func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { | ||
187 | code := huffmanCodes[c] | ||
188 | nbits := huffmanCodeLen[c] | ||
189 | |||
190 | for { | ||
191 | if rembits > nbits { | ||
192 | t := uint8(code << (rembits - nbits)) | ||
193 | dst[len(dst)-1] |= t | ||
194 | rembits -= nbits | ||
195 | break | ||
196 | } | ||
197 | |||
198 | t := uint8(code >> (nbits - rembits)) | ||
199 | dst[len(dst)-1] |= t | ||
200 | |||
201 | nbits -= rembits | ||
202 | rembits = 8 | ||
203 | |||
204 | if nbits == 0 { | ||
205 | break | ||
206 | } | ||
207 | |||
208 | dst = append(dst, 0) | ||
209 | } | ||
210 | |||
211 | return dst, rembits | ||
212 | } | ||
diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 0000000..a66cfbe --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go | |||
@@ -0,0 +1,479 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package hpack | ||
6 | |||
7 | import ( | ||
8 | "fmt" | ||
9 | ) | ||
10 | |||
11 | // headerFieldTable implements a list of HeaderFields. | ||
12 | // This is used to implement the static and dynamic tables. | ||
13 | type headerFieldTable struct { | ||
14 | // For static tables, entries are never evicted. | ||
15 | // | ||
16 | // For dynamic tables, entries are evicted from ents[0] and added to the end. | ||
17 | // Each entry has a unique id that starts at one and increments for each | ||
18 | // entry that is added. This unique id is stable across evictions, meaning | ||
19 | // it can be used as a pointer to a specific entry. As in hpack, unique ids | ||
20 | // are 1-based. The unique id for ents[k] is k + evictCount + 1. | ||
21 | // | ||
22 | // Zero is not a valid unique id. | ||
23 | // | ||
24 | // evictCount should not overflow in any remotely practical situation. In | ||
25 | // practice, we will have one dynamic table per HTTP/2 connection. If we | ||
26 | // assume a very powerful server that handles 1M QPS per connection and each | ||
27 | // request adds (then evicts) 100 entries from the table, it would still take | ||
28 | // 2M years for evictCount to overflow. | ||
29 | ents []HeaderField | ||
30 | evictCount uint64 | ||
31 | |||
32 | // byName maps a HeaderField name to the unique id of the newest entry with | ||
33 | // the same name. See above for a definition of "unique id". | ||
34 | byName map[string]uint64 | ||
35 | |||
36 | // byNameValue maps a HeaderField name/value pair to the unique id of the newest | ||
37 | // entry with the same name and value. See above for a definition of "unique id". | ||
38 | byNameValue map[pairNameValue]uint64 | ||
39 | } | ||
40 | |||
41 | type pairNameValue struct { | ||
42 | name, value string | ||
43 | } | ||
44 | |||
45 | func (t *headerFieldTable) init() { | ||
46 | t.byName = make(map[string]uint64) | ||
47 | t.byNameValue = make(map[pairNameValue]uint64) | ||
48 | } | ||
49 | |||
50 | // len reports the number of entries in the table. | ||
51 | func (t *headerFieldTable) len() int { | ||
52 | return len(t.ents) | ||
53 | } | ||
54 | |||
55 | // addEntry adds a new entry. | ||
56 | func (t *headerFieldTable) addEntry(f HeaderField) { | ||
57 | id := uint64(t.len()) + t.evictCount + 1 | ||
58 | t.byName[f.Name] = id | ||
59 | t.byNameValue[pairNameValue{f.Name, f.Value}] = id | ||
60 | t.ents = append(t.ents, f) | ||
61 | } | ||
62 | |||
63 | // evictOldest evicts the n oldest entries in the table. | ||
64 | func (t *headerFieldTable) evictOldest(n int) { | ||
65 | if n > t.len() { | ||
66 | panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) | ||
67 | } | ||
68 | for k := 0; k < n; k++ { | ||
69 | f := t.ents[k] | ||
70 | id := t.evictCount + uint64(k) + 1 | ||
71 | if t.byName[f.Name] == id { | ||
72 | delete(t.byName, f.Name) | ||
73 | } | ||
74 | if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { | ||
75 | delete(t.byNameValue, p) | ||
76 | } | ||
77 | } | ||
78 | copy(t.ents, t.ents[n:]) | ||
79 | for k := t.len() - n; k < t.len(); k++ { | ||
80 | t.ents[k] = HeaderField{} // so strings can be garbage collected | ||
81 | } | ||
82 | t.ents = t.ents[:t.len()-n] | ||
83 | if t.evictCount+uint64(n) < t.evictCount { | ||
84 | panic("evictCount overflow") | ||
85 | } | ||
86 | t.evictCount += uint64(n) | ||
87 | } | ||
88 | |||
89 | // search finds f in the table. If there is no match, i is 0. | ||
90 | // If both name and value match, i is the matched index and nameValueMatch | ||
91 | // becomes true. If only name matches, i points to that index and | ||
92 | // nameValueMatch becomes false. | ||
93 | // | ||
94 | // The returned index is a 1-based HPACK index. For dynamic tables, HPACK says | ||
95 | // that index 1 should be the newest entry, but t.ents[0] is the oldest entry, | ||
96 | // meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic | ||
97 | // table, the return value i actually refers to the entry t.ents[t.len()-i]. | ||
98 | // | ||
99 | // All tables are assumed to be a dynamic tables except for the global | ||
100 | // staticTable pointer. | ||
101 | // | ||
102 | // See Section 2.3.3. | ||
103 | func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { | ||
104 | if !f.Sensitive { | ||
105 | if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { | ||
106 | return t.idToIndex(id), true | ||
107 | } | ||
108 | } | ||
109 | if id := t.byName[f.Name]; id != 0 { | ||
110 | return t.idToIndex(id), false | ||
111 | } | ||
112 | return 0, false | ||
113 | } | ||
114 | |||
115 | // idToIndex converts a unique id to an HPACK index. | ||
116 | // See Section 2.3.3. | ||
117 | func (t *headerFieldTable) idToIndex(id uint64) uint64 { | ||
118 | if id <= t.evictCount { | ||
119 | panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) | ||
120 | } | ||
121 | k := id - t.evictCount - 1 // convert id to an index t.ents[k] | ||
122 | if t != staticTable { | ||
123 | return uint64(t.len()) - k // dynamic table | ||
124 | } | ||
125 | return k + 1 | ||
126 | } | ||
127 | |||
128 | // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B | ||
129 | var staticTable = newStaticTable() | ||
130 | var staticTableEntries = [...]HeaderField{ | ||
131 | {Name: ":authority"}, | ||
132 | {Name: ":method", Value: "GET"}, | ||
133 | {Name: ":method", Value: "POST"}, | ||
134 | {Name: ":path", Value: "/"}, | ||
135 | {Name: ":path", Value: "/index.html"}, | ||
136 | {Name: ":scheme", Value: "http"}, | ||
137 | {Name: ":scheme", Value: "https"}, | ||
138 | {Name: ":status", Value: "200"}, | ||
139 | {Name: ":status", Value: "204"}, | ||
140 | {Name: ":status", Value: "206"}, | ||
141 | {Name: ":status", Value: "304"}, | ||
142 | {Name: ":status", Value: "400"}, | ||
143 | {Name: ":status", Value: "404"}, | ||
144 | {Name: ":status", Value: "500"}, | ||
145 | {Name: "accept-charset"}, | ||
146 | {Name: "accept-encoding", Value: "gzip, deflate"}, | ||
147 | {Name: "accept-language"}, | ||
148 | {Name: "accept-ranges"}, | ||
149 | {Name: "accept"}, | ||
150 | {Name: "access-control-allow-origin"}, | ||
151 | {Name: "age"}, | ||
152 | {Name: "allow"}, | ||
153 | {Name: "authorization"}, | ||
154 | {Name: "cache-control"}, | ||
155 | {Name: "content-disposition"}, | ||
156 | {Name: "content-encoding"}, | ||
157 | {Name: "content-language"}, | ||
158 | {Name: "content-length"}, | ||
159 | {Name: "content-location"}, | ||
160 | {Name: "content-range"}, | ||
161 | {Name: "content-type"}, | ||
162 | {Name: "cookie"}, | ||
163 | {Name: "date"}, | ||
164 | {Name: "etag"}, | ||
165 | {Name: "expect"}, | ||
166 | {Name: "expires"}, | ||
167 | {Name: "from"}, | ||
168 | {Name: "host"}, | ||
169 | {Name: "if-match"}, | ||
170 | {Name: "if-modified-since"}, | ||
171 | {Name: "if-none-match"}, | ||
172 | {Name: "if-range"}, | ||
173 | {Name: "if-unmodified-since"}, | ||
174 | {Name: "last-modified"}, | ||
175 | {Name: "link"}, | ||
176 | {Name: "location"}, | ||
177 | {Name: "max-forwards"}, | ||
178 | {Name: "proxy-authenticate"}, | ||
179 | {Name: "proxy-authorization"}, | ||
180 | {Name: "range"}, | ||
181 | {Name: "referer"}, | ||
182 | {Name: "refresh"}, | ||
183 | {Name: "retry-after"}, | ||
184 | {Name: "server"}, | ||
185 | {Name: "set-cookie"}, | ||
186 | {Name: "strict-transport-security"}, | ||
187 | {Name: "transfer-encoding"}, | ||
188 | {Name: "user-agent"}, | ||
189 | {Name: "vary"}, | ||
190 | {Name: "via"}, | ||
191 | {Name: "www-authenticate"}, | ||
192 | } | ||
193 | |||
194 | func newStaticTable() *headerFieldTable { | ||
195 | t := &headerFieldTable{} | ||
196 | t.init() | ||
197 | for _, e := range staticTableEntries[:] { | ||
198 | t.addEntry(e) | ||
199 | } | ||
200 | return t | ||
201 | } | ||
202 | |||
203 | var huffmanCodes = [256]uint32{ | ||
204 | 0x1ff8, | ||
205 | 0x7fffd8, | ||
206 | 0xfffffe2, | ||
207 | 0xfffffe3, | ||
208 | 0xfffffe4, | ||
209 | 0xfffffe5, | ||
210 | 0xfffffe6, | ||
211 | 0xfffffe7, | ||
212 | 0xfffffe8, | ||
213 | 0xffffea, | ||
214 | 0x3ffffffc, | ||
215 | 0xfffffe9, | ||
216 | 0xfffffea, | ||
217 | 0x3ffffffd, | ||
218 | 0xfffffeb, | ||
219 | 0xfffffec, | ||
220 | 0xfffffed, | ||
221 | 0xfffffee, | ||
222 | 0xfffffef, | ||
223 | 0xffffff0, | ||
224 | 0xffffff1, | ||
225 | 0xffffff2, | ||
226 | 0x3ffffffe, | ||
227 | 0xffffff3, | ||
228 | 0xffffff4, | ||
229 | 0xffffff5, | ||
230 | 0xffffff6, | ||
231 | 0xffffff7, | ||
232 | 0xffffff8, | ||
233 | 0xffffff9, | ||
234 | 0xffffffa, | ||
235 | 0xffffffb, | ||
236 | 0x14, | ||
237 | 0x3f8, | ||
238 | 0x3f9, | ||
239 | 0xffa, | ||
240 | 0x1ff9, | ||
241 | 0x15, | ||
242 | 0xf8, | ||
243 | 0x7fa, | ||
244 | 0x3fa, | ||
245 | 0x3fb, | ||
246 | 0xf9, | ||
247 | 0x7fb, | ||
248 | 0xfa, | ||
249 | 0x16, | ||
250 | 0x17, | ||
251 | 0x18, | ||
252 | 0x0, | ||
253 | 0x1, | ||
254 | 0x2, | ||
255 | 0x19, | ||
256 | 0x1a, | ||
257 | 0x1b, | ||
258 | 0x1c, | ||
259 | 0x1d, | ||
260 | 0x1e, | ||
261 | 0x1f, | ||
262 | 0x5c, | ||
263 | 0xfb, | ||
264 | 0x7ffc, | ||
265 | 0x20, | ||
266 | 0xffb, | ||
267 | 0x3fc, | ||
268 | 0x1ffa, | ||
269 | 0x21, | ||
270 | 0x5d, | ||
271 | 0x5e, | ||
272 | 0x5f, | ||
273 | 0x60, | ||
274 | 0x61, | ||
275 | 0x62, | ||
276 | 0x63, | ||
277 | 0x64, | ||
278 | 0x65, | ||
279 | 0x66, | ||
280 | 0x67, | ||
281 | 0x68, | ||
282 | 0x69, | ||
283 | 0x6a, | ||
284 | 0x6b, | ||
285 | 0x6c, | ||
286 | 0x6d, | ||
287 | 0x6e, | ||
288 | 0x6f, | ||
289 | 0x70, | ||
290 | 0x71, | ||
291 | 0x72, | ||
292 | 0xfc, | ||
293 | 0x73, | ||
294 | 0xfd, | ||
295 | 0x1ffb, | ||
296 | 0x7fff0, | ||
297 | 0x1ffc, | ||
298 | 0x3ffc, | ||
299 | 0x22, | ||
300 | 0x7ffd, | ||
301 | 0x3, | ||
302 | 0x23, | ||
303 | 0x4, | ||
304 | 0x24, | ||
305 | 0x5, | ||
306 | 0x25, | ||
307 | 0x26, | ||
308 | 0x27, | ||
309 | 0x6, | ||
310 | 0x74, | ||
311 | 0x75, | ||
312 | 0x28, | ||
313 | 0x29, | ||
314 | 0x2a, | ||
315 | 0x7, | ||
316 | 0x2b, | ||
317 | 0x76, | ||
318 | 0x2c, | ||
319 | 0x8, | ||
320 | 0x9, | ||
321 | 0x2d, | ||
322 | 0x77, | ||
323 | 0x78, | ||
324 | 0x79, | ||
325 | 0x7a, | ||
326 | 0x7b, | ||
327 | 0x7ffe, | ||
328 | 0x7fc, | ||
329 | 0x3ffd, | ||
330 | 0x1ffd, | ||
331 | 0xffffffc, | ||
332 | 0xfffe6, | ||
333 | 0x3fffd2, | ||
334 | 0xfffe7, | ||
335 | 0xfffe8, | ||
336 | 0x3fffd3, | ||
337 | 0x3fffd4, | ||
338 | 0x3fffd5, | ||
339 | 0x7fffd9, | ||
340 | 0x3fffd6, | ||
341 | 0x7fffda, | ||
342 | 0x7fffdb, | ||
343 | 0x7fffdc, | ||
344 | 0x7fffdd, | ||
345 | 0x7fffde, | ||
346 | 0xffffeb, | ||
347 | 0x7fffdf, | ||
348 | 0xffffec, | ||
349 | 0xffffed, | ||
350 | 0x3fffd7, | ||
351 | 0x7fffe0, | ||
352 | 0xffffee, | ||
353 | 0x7fffe1, | ||
354 | 0x7fffe2, | ||
355 | 0x7fffe3, | ||
356 | 0x7fffe4, | ||
357 | 0x1fffdc, | ||
358 | 0x3fffd8, | ||
359 | 0x7fffe5, | ||
360 | 0x3fffd9, | ||
361 | 0x7fffe6, | ||
362 | 0x7fffe7, | ||
363 | 0xffffef, | ||
364 | 0x3fffda, | ||
365 | 0x1fffdd, | ||
366 | 0xfffe9, | ||
367 | 0x3fffdb, | ||
368 | 0x3fffdc, | ||
369 | 0x7fffe8, | ||
370 | 0x7fffe9, | ||
371 | 0x1fffde, | ||
372 | 0x7fffea, | ||
373 | 0x3fffdd, | ||
374 | 0x3fffde, | ||
375 | 0xfffff0, | ||
376 | 0x1fffdf, | ||
377 | 0x3fffdf, | ||
378 | 0x7fffeb, | ||
379 | 0x7fffec, | ||
380 | 0x1fffe0, | ||
381 | 0x1fffe1, | ||
382 | 0x3fffe0, | ||
383 | 0x1fffe2, | ||
384 | 0x7fffed, | ||
385 | 0x3fffe1, | ||
386 | 0x7fffee, | ||
387 | 0x7fffef, | ||
388 | 0xfffea, | ||
389 | 0x3fffe2, | ||
390 | 0x3fffe3, | ||
391 | 0x3fffe4, | ||
392 | 0x7ffff0, | ||
393 | 0x3fffe5, | ||
394 | 0x3fffe6, | ||
395 | 0x7ffff1, | ||
396 | 0x3ffffe0, | ||
397 | 0x3ffffe1, | ||
398 | 0xfffeb, | ||
399 | 0x7fff1, | ||
400 | 0x3fffe7, | ||
401 | 0x7ffff2, | ||
402 | 0x3fffe8, | ||
403 | 0x1ffffec, | ||
404 | 0x3ffffe2, | ||
405 | 0x3ffffe3, | ||
406 | 0x3ffffe4, | ||
407 | 0x7ffffde, | ||
408 | 0x7ffffdf, | ||
409 | 0x3ffffe5, | ||
410 | 0xfffff1, | ||
411 | 0x1ffffed, | ||
412 | 0x7fff2, | ||
413 | 0x1fffe3, | ||
414 | 0x3ffffe6, | ||
415 | 0x7ffffe0, | ||
416 | 0x7ffffe1, | ||
417 | 0x3ffffe7, | ||
418 | 0x7ffffe2, | ||
419 | 0xfffff2, | ||
420 | 0x1fffe4, | ||
421 | 0x1fffe5, | ||
422 | 0x3ffffe8, | ||
423 | 0x3ffffe9, | ||
424 | 0xffffffd, | ||
425 | 0x7ffffe3, | ||
426 | 0x7ffffe4, | ||
427 | 0x7ffffe5, | ||
428 | 0xfffec, | ||
429 | 0xfffff3, | ||
430 | 0xfffed, | ||
431 | 0x1fffe6, | ||
432 | 0x3fffe9, | ||
433 | 0x1fffe7, | ||
434 | 0x1fffe8, | ||
435 | 0x7ffff3, | ||
436 | 0x3fffea, | ||
437 | 0x3fffeb, | ||
438 | 0x1ffffee, | ||
439 | 0x1ffffef, | ||
440 | 0xfffff4, | ||
441 | 0xfffff5, | ||
442 | 0x3ffffea, | ||
443 | 0x7ffff4, | ||
444 | 0x3ffffeb, | ||
445 | 0x7ffffe6, | ||
446 | 0x3ffffec, | ||
447 | 0x3ffffed, | ||
448 | 0x7ffffe7, | ||
449 | 0x7ffffe8, | ||
450 | 0x7ffffe9, | ||
451 | 0x7ffffea, | ||
452 | 0x7ffffeb, | ||
453 | 0xffffffe, | ||
454 | 0x7ffffec, | ||
455 | 0x7ffffed, | ||
456 | 0x7ffffee, | ||
457 | 0x7ffffef, | ||
458 | 0x7fffff0, | ||
459 | 0x3ffffee, | ||
460 | } | ||
461 | |||
462 | var huffmanCodeLen = [256]uint8{ | ||
463 | 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, | ||
464 | 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, | ||
465 | 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, | ||
466 | 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, | ||
467 | 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | ||
468 | 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, | ||
469 | 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, | ||
470 | 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, | ||
471 | 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, | ||
472 | 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, | ||
473 | 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, | ||
474 | 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, | ||
475 | 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, | ||
476 | 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, | ||
477 | 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, | ||
478 | 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, | ||
479 | } | ||
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 0000000..d565f40 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go | |||
@@ -0,0 +1,391 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // Package http2 implements the HTTP/2 protocol. | ||
6 | // | ||
7 | // This package is low-level and intended to be used directly by very | ||
8 | // few people. Most users will use it indirectly through the automatic | ||
9 | // use by the net/http package (from Go 1.6 and later). | ||
10 | // For use in earlier Go versions see ConfigureServer. (Transport support | ||
11 | // requires Go 1.6 or later) | ||
12 | // | ||
13 | // See https://http2.github.io/ for more information on HTTP/2. | ||
14 | // | ||
15 | // See https://http2.golang.org/ for a test server running this code. | ||
16 | // | ||
17 | package http2 // import "golang.org/x/net/http2" | ||
18 | |||
19 | import ( | ||
20 | "bufio" | ||
21 | "crypto/tls" | ||
22 | "errors" | ||
23 | "fmt" | ||
24 | "io" | ||
25 | "net/http" | ||
26 | "os" | ||
27 | "sort" | ||
28 | "strconv" | ||
29 | "strings" | ||
30 | "sync" | ||
31 | |||
32 | "golang.org/x/net/lex/httplex" | ||
33 | ) | ||
34 | |||
35 | var ( | ||
36 | VerboseLogs bool | ||
37 | logFrameWrites bool | ||
38 | logFrameReads bool | ||
39 | inTests bool | ||
40 | ) | ||
41 | |||
42 | func init() { | ||
43 | e := os.Getenv("GODEBUG") | ||
44 | if strings.Contains(e, "http2debug=1") { | ||
45 | VerboseLogs = true | ||
46 | } | ||
47 | if strings.Contains(e, "http2debug=2") { | ||
48 | VerboseLogs = true | ||
49 | logFrameWrites = true | ||
50 | logFrameReads = true | ||
51 | } | ||
52 | } | ||
53 | |||
54 | const ( | ||
55 | // ClientPreface is the string that must be sent by new | ||
56 | // connections from clients. | ||
57 | ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" | ||
58 | |||
59 | // SETTINGS_MAX_FRAME_SIZE default | ||
60 | // http://http2.github.io/http2-spec/#rfc.section.6.5.2 | ||
61 | initialMaxFrameSize = 16384 | ||
62 | |||
63 | // NextProtoTLS is the NPN/ALPN protocol negotiated during | ||
64 | // HTTP/2's TLS setup. | ||
65 | NextProtoTLS = "h2" | ||
66 | |||
67 | // http://http2.github.io/http2-spec/#SettingValues | ||
68 | initialHeaderTableSize = 4096 | ||
69 | |||
70 | initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size | ||
71 | |||
72 | defaultMaxReadFrameSize = 1 << 20 | ||
73 | ) | ||
74 | |||
75 | var ( | ||
76 | clientPreface = []byte(ClientPreface) | ||
77 | ) | ||
78 | |||
79 | type streamState int | ||
80 | |||
81 | // HTTP/2 stream states. | ||
82 | // | ||
83 | // See http://tools.ietf.org/html/rfc7540#section-5.1. | ||
84 | // | ||
85 | // For simplicity, the server code merges "reserved (local)" into | ||
86 | // "half-closed (remote)". This is one less state transition to track. | ||
87 | // The only downside is that we send PUSH_PROMISEs slightly less | ||
88 | // liberally than allowable. More discussion here: | ||
89 | // https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html | ||
90 | // | ||
91 | // "reserved (remote)" is omitted since the client code does not | ||
92 | // support server push. | ||
93 | const ( | ||
94 | stateIdle streamState = iota | ||
95 | stateOpen | ||
96 | stateHalfClosedLocal | ||
97 | stateHalfClosedRemote | ||
98 | stateClosed | ||
99 | ) | ||
100 | |||
101 | var stateName = [...]string{ | ||
102 | stateIdle: "Idle", | ||
103 | stateOpen: "Open", | ||
104 | stateHalfClosedLocal: "HalfClosedLocal", | ||
105 | stateHalfClosedRemote: "HalfClosedRemote", | ||
106 | stateClosed: "Closed", | ||
107 | } | ||
108 | |||
109 | func (st streamState) String() string { | ||
110 | return stateName[st] | ||
111 | } | ||
112 | |||
113 | // Setting is a setting parameter: which setting it is, and its value. | ||
114 | type Setting struct { | ||
115 | // ID is which setting is being set. | ||
116 | // See http://http2.github.io/http2-spec/#SettingValues | ||
117 | ID SettingID | ||
118 | |||
119 | // Val is the value. | ||
120 | Val uint32 | ||
121 | } | ||
122 | |||
123 | func (s Setting) String() string { | ||
124 | return fmt.Sprintf("[%v = %d]", s.ID, s.Val) | ||
125 | } | ||
126 | |||
127 | // Valid reports whether the setting is valid. | ||
128 | func (s Setting) Valid() error { | ||
129 | // Limits and error codes from 6.5.2 Defined SETTINGS Parameters | ||
130 | switch s.ID { | ||
131 | case SettingEnablePush: | ||
132 | if s.Val != 1 && s.Val != 0 { | ||
133 | return ConnectionError(ErrCodeProtocol) | ||
134 | } | ||
135 | case SettingInitialWindowSize: | ||
136 | if s.Val > 1<<31-1 { | ||
137 | return ConnectionError(ErrCodeFlowControl) | ||
138 | } | ||
139 | case SettingMaxFrameSize: | ||
140 | if s.Val < 16384 || s.Val > 1<<24-1 { | ||
141 | return ConnectionError(ErrCodeProtocol) | ||
142 | } | ||
143 | } | ||
144 | return nil | ||
145 | } | ||
146 | |||
147 | // A SettingID is an HTTP/2 setting as defined in | ||
148 | // http://http2.github.io/http2-spec/#iana-settings | ||
149 | type SettingID uint16 | ||
150 | |||
151 | const ( | ||
152 | SettingHeaderTableSize SettingID = 0x1 | ||
153 | SettingEnablePush SettingID = 0x2 | ||
154 | SettingMaxConcurrentStreams SettingID = 0x3 | ||
155 | SettingInitialWindowSize SettingID = 0x4 | ||
156 | SettingMaxFrameSize SettingID = 0x5 | ||
157 | SettingMaxHeaderListSize SettingID = 0x6 | ||
158 | ) | ||
159 | |||
160 | var settingName = map[SettingID]string{ | ||
161 | SettingHeaderTableSize: "HEADER_TABLE_SIZE", | ||
162 | SettingEnablePush: "ENABLE_PUSH", | ||
163 | SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", | ||
164 | SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", | ||
165 | SettingMaxFrameSize: "MAX_FRAME_SIZE", | ||
166 | SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", | ||
167 | } | ||
168 | |||
169 | func (s SettingID) String() string { | ||
170 | if v, ok := settingName[s]; ok { | ||
171 | return v | ||
172 | } | ||
173 | return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) | ||
174 | } | ||
175 | |||
176 | var ( | ||
177 | errInvalidHeaderFieldName = errors.New("http2: invalid header field name") | ||
178 | errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") | ||
179 | ) | ||
180 | |||
181 | // validWireHeaderFieldName reports whether v is a valid header field | ||
182 | // name (key). See httplex.ValidHeaderName for the base rules. | ||
183 | // | ||
184 | // Further, http2 says: | ||
185 | // "Just as in HTTP/1.x, header field names are strings of ASCII | ||
186 | // characters that are compared in a case-insensitive | ||
187 | // fashion. However, header field names MUST be converted to | ||
188 | // lowercase prior to their encoding in HTTP/2. " | ||
189 | func validWireHeaderFieldName(v string) bool { | ||
190 | if len(v) == 0 { | ||
191 | return false | ||
192 | } | ||
193 | for _, r := range v { | ||
194 | if !httplex.IsTokenRune(r) { | ||
195 | return false | ||
196 | } | ||
197 | if 'A' <= r && r <= 'Z' { | ||
198 | return false | ||
199 | } | ||
200 | } | ||
201 | return true | ||
202 | } | ||
203 | |||
204 | var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) | ||
205 | |||
206 | func init() { | ||
207 | for i := 100; i <= 999; i++ { | ||
208 | if v := http.StatusText(i); v != "" { | ||
209 | httpCodeStringCommon[i] = strconv.Itoa(i) | ||
210 | } | ||
211 | } | ||
212 | } | ||
213 | |||
214 | func httpCodeString(code int) string { | ||
215 | if s, ok := httpCodeStringCommon[code]; ok { | ||
216 | return s | ||
217 | } | ||
218 | return strconv.Itoa(code) | ||
219 | } | ||
220 | |||
221 | // from pkg io | ||
222 | type stringWriter interface { | ||
223 | WriteString(s string) (n int, err error) | ||
224 | } | ||
225 | |||
226 | // A gate lets two goroutines coordinate their activities. | ||
227 | type gate chan struct{} | ||
228 | |||
229 | func (g gate) Done() { g <- struct{}{} } | ||
230 | func (g gate) Wait() { <-g } | ||
231 | |||
232 | // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). | ||
233 | type closeWaiter chan struct{} | ||
234 | |||
235 | // Init makes a closeWaiter usable. | ||
236 | // It exists because so a closeWaiter value can be placed inside a | ||
237 | // larger struct and have the Mutex and Cond's memory in the same | ||
238 | // allocation. | ||
239 | func (cw *closeWaiter) Init() { | ||
240 | *cw = make(chan struct{}) | ||
241 | } | ||
242 | |||
243 | // Close marks the closeWaiter as closed and unblocks any waiters. | ||
244 | func (cw closeWaiter) Close() { | ||
245 | close(cw) | ||
246 | } | ||
247 | |||
248 | // Wait waits for the closeWaiter to become closed. | ||
249 | func (cw closeWaiter) Wait() { | ||
250 | <-cw | ||
251 | } | ||
252 | |||
253 | // bufferedWriter is a buffered writer that writes to w. | ||
254 | // Its buffered writer is lazily allocated as needed, to minimize | ||
255 | // idle memory usage with many connections. | ||
256 | type bufferedWriter struct { | ||
257 | w io.Writer // immutable | ||
258 | bw *bufio.Writer // non-nil when data is buffered | ||
259 | } | ||
260 | |||
261 | func newBufferedWriter(w io.Writer) *bufferedWriter { | ||
262 | return &bufferedWriter{w: w} | ||
263 | } | ||
264 | |||
265 | // bufWriterPoolBufferSize is the size of bufio.Writer's | ||
266 | // buffers created using bufWriterPool. | ||
267 | // | ||
268 | // TODO: pick a less arbitrary value? this is a bit under | ||
269 | // (3 x typical 1500 byte MTU) at least. Other than that, | ||
270 | // not much thought went into it. | ||
271 | const bufWriterPoolBufferSize = 4 << 10 | ||
272 | |||
273 | var bufWriterPool = sync.Pool{ | ||
274 | New: func() interface{} { | ||
275 | return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) | ||
276 | }, | ||
277 | } | ||
278 | |||
279 | func (w *bufferedWriter) Available() int { | ||
280 | if w.bw == nil { | ||
281 | return bufWriterPoolBufferSize | ||
282 | } | ||
283 | return w.bw.Available() | ||
284 | } | ||
285 | |||
286 | func (w *bufferedWriter) Write(p []byte) (n int, err error) { | ||
287 | if w.bw == nil { | ||
288 | bw := bufWriterPool.Get().(*bufio.Writer) | ||
289 | bw.Reset(w.w) | ||
290 | w.bw = bw | ||
291 | } | ||
292 | return w.bw.Write(p) | ||
293 | } | ||
294 | |||
295 | func (w *bufferedWriter) Flush() error { | ||
296 | bw := w.bw | ||
297 | if bw == nil { | ||
298 | return nil | ||
299 | } | ||
300 | err := bw.Flush() | ||
301 | bw.Reset(nil) | ||
302 | bufWriterPool.Put(bw) | ||
303 | w.bw = nil | ||
304 | return err | ||
305 | } | ||
306 | |||
307 | func mustUint31(v int32) uint32 { | ||
308 | if v < 0 || v > 2147483647 { | ||
309 | panic("out of range") | ||
310 | } | ||
311 | return uint32(v) | ||
312 | } | ||
313 | |||
314 | // bodyAllowedForStatus reports whether a given response status code | ||
315 | // permits a body. See RFC 2616, section 4.4. | ||
316 | func bodyAllowedForStatus(status int) bool { | ||
317 | switch { | ||
318 | case status >= 100 && status <= 199: | ||
319 | return false | ||
320 | case status == 204: | ||
321 | return false | ||
322 | case status == 304: | ||
323 | return false | ||
324 | } | ||
325 | return true | ||
326 | } | ||
327 | |||
328 | type httpError struct { | ||
329 | msg string | ||
330 | timeout bool | ||
331 | } | ||
332 | |||
333 | func (e *httpError) Error() string { return e.msg } | ||
334 | func (e *httpError) Timeout() bool { return e.timeout } | ||
335 | func (e *httpError) Temporary() bool { return true } | ||
336 | |||
337 | var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} | ||
338 | |||
339 | type connectionStater interface { | ||
340 | ConnectionState() tls.ConnectionState | ||
341 | } | ||
342 | |||
343 | var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} | ||
344 | |||
345 | type sorter struct { | ||
346 | v []string // owned by sorter | ||
347 | } | ||
348 | |||
349 | func (s *sorter) Len() int { return len(s.v) } | ||
350 | func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } | ||
351 | func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } | ||
352 | |||
353 | // Keys returns the sorted keys of h. | ||
354 | // | ||
355 | // The returned slice is only valid until s used again or returned to | ||
356 | // its pool. | ||
357 | func (s *sorter) Keys(h http.Header) []string { | ||
358 | keys := s.v[:0] | ||
359 | for k := range h { | ||
360 | keys = append(keys, k) | ||
361 | } | ||
362 | s.v = keys | ||
363 | sort.Sort(s) | ||
364 | return keys | ||
365 | } | ||
366 | |||
367 | func (s *sorter) SortStrings(ss []string) { | ||
368 | // Our sorter works on s.v, which sorter owns, so | ||
369 | // stash it away while we sort the user's buffer. | ||
370 | save := s.v | ||
371 | s.v = ss | ||
372 | sort.Sort(s) | ||
373 | s.v = save | ||
374 | } | ||
375 | |||
376 | // validPseudoPath reports whether v is a valid :path pseudo-header | ||
377 | // value. It must be either: | ||
378 | // | ||
379 | // *) a non-empty string starting with '/' | ||
380 | // *) the string '*', for OPTIONS requests. | ||
381 | // | ||
382 | // For now this is only used a quick check for deciding when to clean | ||
383 | // up Opaque URLs before sending requests from the Transport. | ||
384 | // See golang.org/issue/16847 | ||
385 | // | ||
386 | // We used to enforce that the path also didn't start with "//", but | ||
387 | // Google's GFE accepts such paths and Chrome sends them, so ignore | ||
388 | // that part of the spec. See golang.org/issue/19103. | ||
389 | func validPseudoPath(v string) bool { | ||
390 | return (len(v) > 0 && v[0] == '/') || v == "*" | ||
391 | } | ||
diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go new file mode 100644 index 0000000..508cebc --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go16.go | |||
@@ -0,0 +1,21 @@ | |||
1 | // Copyright 2015 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // +build !go1.6 | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import ( | ||
10 | "net/http" | ||
11 | "time" | ||
12 | ) | ||
13 | |||
14 | func configureTransport(t1 *http.Transport) (*Transport, error) { | ||
15 | return nil, errTransportVersion | ||
16 | } | ||
17 | |||
18 | func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { | ||
19 | return 0 | ||
20 | |||
21 | } | ||
diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go new file mode 100644 index 0000000..140434a --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go17.go | |||
@@ -0,0 +1,87 @@ | |||
1 | // Copyright 2016 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // +build !go1.7 | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import ( | ||
10 | "crypto/tls" | ||
11 | "net" | ||
12 | "net/http" | ||
13 | "time" | ||
14 | ) | ||
15 | |||
16 | type contextContext interface { | ||
17 | Done() <-chan struct{} | ||
18 | Err() error | ||
19 | } | ||
20 | |||
21 | type fakeContext struct{} | ||
22 | |||
23 | func (fakeContext) Done() <-chan struct{} { return nil } | ||
24 | func (fakeContext) Err() error { panic("should not be called") } | ||
25 | |||
26 | func reqContext(r *http.Request) fakeContext { | ||
27 | return fakeContext{} | ||
28 | } | ||
29 | |||
30 | func setResponseUncompressed(res *http.Response) { | ||
31 | // Nothing. | ||
32 | } | ||
33 | |||
34 | type clientTrace struct{} | ||
35 | |||
36 | func requestTrace(*http.Request) *clientTrace { return nil } | ||
37 | func traceGotConn(*http.Request, *ClientConn) {} | ||
38 | func traceFirstResponseByte(*clientTrace) {} | ||
39 | func traceWroteHeaders(*clientTrace) {} | ||
40 | func traceWroteRequest(*clientTrace, error) {} | ||
41 | func traceGot100Continue(trace *clientTrace) {} | ||
42 | func traceWait100Continue(trace *clientTrace) {} | ||
43 | |||
44 | func nop() {} | ||
45 | |||
46 | func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { | ||
47 | return nil, nop | ||
48 | } | ||
49 | |||
50 | func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { | ||
51 | return ctx, nop | ||
52 | } | ||
53 | |||
54 | func requestWithContext(req *http.Request, ctx contextContext) *http.Request { | ||
55 | return req | ||
56 | } | ||
57 | |||
58 | // temporary copy of Go 1.6's private tls.Config.clone: | ||
59 | func cloneTLSConfig(c *tls.Config) *tls.Config { | ||
60 | return &tls.Config{ | ||
61 | Rand: c.Rand, | ||
62 | Time: c.Time, | ||
63 | Certificates: c.Certificates, | ||
64 | NameToCertificate: c.NameToCertificate, | ||
65 | GetCertificate: c.GetCertificate, | ||
66 | RootCAs: c.RootCAs, | ||
67 | NextProtos: c.NextProtos, | ||
68 | ServerName: c.ServerName, | ||
69 | ClientAuth: c.ClientAuth, | ||
70 | ClientCAs: c.ClientCAs, | ||
71 | InsecureSkipVerify: c.InsecureSkipVerify, | ||
72 | CipherSuites: c.CipherSuites, | ||
73 | PreferServerCipherSuites: c.PreferServerCipherSuites, | ||
74 | SessionTicketsDisabled: c.SessionTicketsDisabled, | ||
75 | SessionTicketKey: c.SessionTicketKey, | ||
76 | ClientSessionCache: c.ClientSessionCache, | ||
77 | MinVersion: c.MinVersion, | ||
78 | MaxVersion: c.MaxVersion, | ||
79 | CurvePreferences: c.CurvePreferences, | ||
80 | } | ||
81 | } | ||
82 | |||
83 | func (cc *ClientConn) Ping(ctx contextContext) error { | ||
84 | return cc.ping(ctx) | ||
85 | } | ||
86 | |||
87 | func (t *Transport) idleConnTimeout() time.Duration { return 0 } | ||
diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go new file mode 100644 index 0000000..6f8d3f8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go18.go | |||
@@ -0,0 +1,29 @@ | |||
1 | // Copyright 2016 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // +build !go1.8 | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import ( | ||
10 | "io" | ||
11 | "net/http" | ||
12 | ) | ||
13 | |||
14 | func configureServer18(h1 *http.Server, h2 *Server) error { | ||
15 | // No IdleTimeout to sync prior to Go 1.8. | ||
16 | return nil | ||
17 | } | ||
18 | |||
19 | func shouldLogPanic(panicValue interface{}) bool { | ||
20 | return panicValue != nil | ||
21 | } | ||
22 | |||
23 | func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { | ||
24 | return nil | ||
25 | } | ||
26 | |||
27 | func reqBodyIsNoBody(io.ReadCloser) bool { return false } | ||
28 | |||
29 | func go18httpNoBody() io.ReadCloser { return nil } // for tests only | ||
diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go new file mode 100644 index 0000000..5ae0772 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go19.go | |||
@@ -0,0 +1,16 @@ | |||
1 | // Copyright 2016 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // +build !go1.9 | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import ( | ||
10 | "net/http" | ||
11 | ) | ||
12 | |||
13 | func configureServer19(s *http.Server, conf *Server) error { | ||
14 | // not supported prior to go1.9 | ||
15 | return nil | ||
16 | } | ||
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 0000000..a614009 --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go | |||
@@ -0,0 +1,163 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package http2 | ||
6 | |||
7 | import ( | ||
8 | "errors" | ||
9 | "io" | ||
10 | "sync" | ||
11 | ) | ||
12 | |||
13 | // pipe is a goroutine-safe io.Reader/io.Writer pair. It's like | ||
14 | // io.Pipe except there are no PipeReader/PipeWriter halves, and the | ||
15 | // underlying buffer is an interface. (io.Pipe is always unbuffered) | ||
16 | type pipe struct { | ||
17 | mu sync.Mutex | ||
18 | c sync.Cond // c.L lazily initialized to &p.mu | ||
19 | b pipeBuffer // nil when done reading | ||
20 | err error // read error once empty. non-nil means closed. | ||
21 | breakErr error // immediate read error (caller doesn't see rest of b) | ||
22 | donec chan struct{} // closed on error | ||
23 | readFn func() // optional code to run in Read before error | ||
24 | } | ||
25 | |||
26 | type pipeBuffer interface { | ||
27 | Len() int | ||
28 | io.Writer | ||
29 | io.Reader | ||
30 | } | ||
31 | |||
32 | func (p *pipe) Len() int { | ||
33 | p.mu.Lock() | ||
34 | defer p.mu.Unlock() | ||
35 | if p.b == nil { | ||
36 | return 0 | ||
37 | } | ||
38 | return p.b.Len() | ||
39 | } | ||
40 | |||
41 | // Read waits until data is available and copies bytes | ||
42 | // from the buffer into p. | ||
43 | func (p *pipe) Read(d []byte) (n int, err error) { | ||
44 | p.mu.Lock() | ||
45 | defer p.mu.Unlock() | ||
46 | if p.c.L == nil { | ||
47 | p.c.L = &p.mu | ||
48 | } | ||
49 | for { | ||
50 | if p.breakErr != nil { | ||
51 | return 0, p.breakErr | ||
52 | } | ||
53 | if p.b != nil && p.b.Len() > 0 { | ||
54 | return p.b.Read(d) | ||
55 | } | ||
56 | if p.err != nil { | ||
57 | if p.readFn != nil { | ||
58 | p.readFn() // e.g. copy trailers | ||
59 | p.readFn = nil // not sticky like p.err | ||
60 | } | ||
61 | p.b = nil | ||
62 | return 0, p.err | ||
63 | } | ||
64 | p.c.Wait() | ||
65 | } | ||
66 | } | ||
67 | |||
68 | var errClosedPipeWrite = errors.New("write on closed buffer") | ||
69 | |||
70 | // Write copies bytes from p into the buffer and wakes a reader. | ||
71 | // It is an error to write more data than the buffer can hold. | ||
72 | func (p *pipe) Write(d []byte) (n int, err error) { | ||
73 | p.mu.Lock() | ||
74 | defer p.mu.Unlock() | ||
75 | if p.c.L == nil { | ||
76 | p.c.L = &p.mu | ||
77 | } | ||
78 | defer p.c.Signal() | ||
79 | if p.err != nil { | ||
80 | return 0, errClosedPipeWrite | ||
81 | } | ||
82 | if p.breakErr != nil { | ||
83 | return len(d), nil // discard when there is no reader | ||
84 | } | ||
85 | return p.b.Write(d) | ||
86 | } | ||
87 | |||
88 | // CloseWithError causes the next Read (waking up a current blocked | ||
89 | // Read if needed) to return the provided err after all data has been | ||
90 | // read. | ||
91 | // | ||
92 | // The error must be non-nil. | ||
93 | func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } | ||
94 | |||
95 | // BreakWithError causes the next Read (waking up a current blocked | ||
96 | // Read if needed) to return the provided err immediately, without | ||
97 | // waiting for unread data. | ||
98 | func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } | ||
99 | |||
100 | // closeWithErrorAndCode is like CloseWithError but also sets some code to run | ||
101 | // in the caller's goroutine before returning the error. | ||
102 | func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } | ||
103 | |||
104 | func (p *pipe) closeWithError(dst *error, err error, fn func()) { | ||
105 | if err == nil { | ||
106 | panic("err must be non-nil") | ||
107 | } | ||
108 | p.mu.Lock() | ||
109 | defer p.mu.Unlock() | ||
110 | if p.c.L == nil { | ||
111 | p.c.L = &p.mu | ||
112 | } | ||
113 | defer p.c.Signal() | ||
114 | if *dst != nil { | ||
115 | // Already been done. | ||
116 | return | ||
117 | } | ||
118 | p.readFn = fn | ||
119 | if dst == &p.breakErr { | ||
120 | p.b = nil | ||
121 | } | ||
122 | *dst = err | ||
123 | p.closeDoneLocked() | ||
124 | } | ||
125 | |||
126 | // requires p.mu be held. | ||
127 | func (p *pipe) closeDoneLocked() { | ||
128 | if p.donec == nil { | ||
129 | return | ||
130 | } | ||
131 | // Close if unclosed. This isn't racy since we always | ||
132 | // hold p.mu while closing. | ||
133 | select { | ||
134 | case <-p.donec: | ||
135 | default: | ||
136 | close(p.donec) | ||
137 | } | ||
138 | } | ||
139 | |||
140 | // Err returns the error (if any) first set by BreakWithError or CloseWithError. | ||
141 | func (p *pipe) Err() error { | ||
142 | p.mu.Lock() | ||
143 | defer p.mu.Unlock() | ||
144 | if p.breakErr != nil { | ||
145 | return p.breakErr | ||
146 | } | ||
147 | return p.err | ||
148 | } | ||
149 | |||
150 | // Done returns a channel which is closed if and when this pipe is closed | ||
151 | // with CloseWithError. | ||
152 | func (p *pipe) Done() <-chan struct{} { | ||
153 | p.mu.Lock() | ||
154 | defer p.mu.Unlock() | ||
155 | if p.donec == nil { | ||
156 | p.donec = make(chan struct{}) | ||
157 | if p.err != nil || p.breakErr != nil { | ||
158 | // Already hit an error. | ||
159 | p.closeDoneLocked() | ||
160 | } | ||
161 | } | ||
162 | return p.donec | ||
163 | } | ||
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 0000000..eae143d --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go | |||
@@ -0,0 +1,2857 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // TODO: turn off the serve goroutine when idle, so | ||
6 | // an idle conn only has the readFrames goroutine active. (which could | ||
7 | // also be optimized probably to pin less memory in crypto/tls). This | ||
8 | // would involve tracking when the serve goroutine is active (atomic | ||
9 | // int32 read/CAS probably?) and starting it up when frames arrive, | ||
10 | // and shutting it down when all handlers exit. the occasional PING | ||
11 | // packets could use time.AfterFunc to call sc.wakeStartServeLoop() | ||
12 | // (which is a no-op if already running) and then queue the PING write | ||
13 | // as normal. The serve loop would then exit in most cases (if no | ||
14 | // Handlers running) and not be woken up again until the PING packet | ||
15 | // returns. | ||
16 | |||
17 | // TODO (maybe): add a mechanism for Handlers to going into | ||
18 | // half-closed-local mode (rw.(io.Closer) test?) but not exit their | ||
19 | // handler, and continue to be able to read from the | ||
20 | // Request.Body. This would be a somewhat semantic change from HTTP/1 | ||
21 | // (or at least what we expose in net/http), so I'd probably want to | ||
22 | // add it there too. For now, this package says that returning from | ||
23 | // the Handler ServeHTTP function means you're both done reading and | ||
24 | // done writing, without a way to stop just one or the other. | ||
25 | |||
26 | package http2 | ||
27 | |||
28 | import ( | ||
29 | "bufio" | ||
30 | "bytes" | ||
31 | "crypto/tls" | ||
32 | "errors" | ||
33 | "fmt" | ||
34 | "io" | ||
35 | "log" | ||
36 | "math" | ||
37 | "net" | ||
38 | "net/http" | ||
39 | "net/textproto" | ||
40 | "net/url" | ||
41 | "os" | ||
42 | "reflect" | ||
43 | "runtime" | ||
44 | "strconv" | ||
45 | "strings" | ||
46 | "sync" | ||
47 | "time" | ||
48 | |||
49 | "golang.org/x/net/http2/hpack" | ||
50 | ) | ||
51 | |||
52 | const ( | ||
53 | prefaceTimeout = 10 * time.Second | ||
54 | firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway | ||
55 | handlerChunkWriteSize = 4 << 10 | ||
56 | defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? | ||
57 | ) | ||
58 | |||
59 | var ( | ||
60 | errClientDisconnected = errors.New("client disconnected") | ||
61 | errClosedBody = errors.New("body closed by handler") | ||
62 | errHandlerComplete = errors.New("http2: request body closed due to handler exiting") | ||
63 | errStreamClosed = errors.New("http2: stream closed") | ||
64 | ) | ||
65 | |||
66 | var responseWriterStatePool = sync.Pool{ | ||
67 | New: func() interface{} { | ||
68 | rws := &responseWriterState{} | ||
69 | rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) | ||
70 | return rws | ||
71 | }, | ||
72 | } | ||
73 | |||
74 | // Test hooks. | ||
75 | var ( | ||
76 | testHookOnConn func() | ||
77 | testHookGetServerConn func(*serverConn) | ||
78 | testHookOnPanicMu *sync.Mutex // nil except in tests | ||
79 | testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) | ||
80 | ) | ||
81 | |||
82 | // Server is an HTTP/2 server. | ||
83 | type Server struct { | ||
84 | // MaxHandlers limits the number of http.Handler ServeHTTP goroutines | ||
85 | // which may run at a time over all connections. | ||
86 | // Negative or zero no limit. | ||
87 | // TODO: implement | ||
88 | MaxHandlers int | ||
89 | |||
90 | // MaxConcurrentStreams optionally specifies the number of | ||
91 | // concurrent streams that each client may have open at a | ||
92 | // time. This is unrelated to the number of http.Handler goroutines | ||
93 | // which may be active globally, which is MaxHandlers. | ||
94 | // If zero, MaxConcurrentStreams defaults to at least 100, per | ||
95 | // the HTTP/2 spec's recommendations. | ||
96 | MaxConcurrentStreams uint32 | ||
97 | |||
98 | // MaxReadFrameSize optionally specifies the largest frame | ||
99 | // this server is willing to read. A valid value is between | ||
100 | // 16k and 16M, inclusive. If zero or otherwise invalid, a | ||
101 | // default value is used. | ||
102 | MaxReadFrameSize uint32 | ||
103 | |||
104 | // PermitProhibitedCipherSuites, if true, permits the use of | ||
105 | // cipher suites prohibited by the HTTP/2 spec. | ||
106 | PermitProhibitedCipherSuites bool | ||
107 | |||
108 | // IdleTimeout specifies how long until idle clients should be | ||
109 | // closed with a GOAWAY frame. PING frames are not considered | ||
110 | // activity for the purposes of IdleTimeout. | ||
111 | IdleTimeout time.Duration | ||
112 | |||
113 | // MaxUploadBufferPerConnection is the size of the initial flow | ||
114 | // control window for each connections. The HTTP/2 spec does not | ||
115 | // allow this to be smaller than 65535 or larger than 2^32-1. | ||
116 | // If the value is outside this range, a default value will be | ||
117 | // used instead. | ||
118 | MaxUploadBufferPerConnection int32 | ||
119 | |||
120 | // MaxUploadBufferPerStream is the size of the initial flow control | ||
121 | // window for each stream. The HTTP/2 spec does not allow this to | ||
122 | // be larger than 2^32-1. If the value is zero or larger than the | ||
123 | // maximum, a default value will be used instead. | ||
124 | MaxUploadBufferPerStream int32 | ||
125 | |||
126 | // NewWriteScheduler constructs a write scheduler for a connection. | ||
127 | // If nil, a default scheduler is chosen. | ||
128 | NewWriteScheduler func() WriteScheduler | ||
129 | |||
130 | // Internal state. This is a pointer (rather than embedded directly) | ||
131 | // so that we don't embed a Mutex in this struct, which will make the | ||
132 | // struct non-copyable, which might break some callers. | ||
133 | state *serverInternalState | ||
134 | } | ||
135 | |||
136 | func (s *Server) initialConnRecvWindowSize() int32 { | ||
137 | if s.MaxUploadBufferPerConnection > initialWindowSize { | ||
138 | return s.MaxUploadBufferPerConnection | ||
139 | } | ||
140 | return 1 << 20 | ||
141 | } | ||
142 | |||
143 | func (s *Server) initialStreamRecvWindowSize() int32 { | ||
144 | if s.MaxUploadBufferPerStream > 0 { | ||
145 | return s.MaxUploadBufferPerStream | ||
146 | } | ||
147 | return 1 << 20 | ||
148 | } | ||
149 | |||
150 | func (s *Server) maxReadFrameSize() uint32 { | ||
151 | if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { | ||
152 | return v | ||
153 | } | ||
154 | return defaultMaxReadFrameSize | ||
155 | } | ||
156 | |||
157 | func (s *Server) maxConcurrentStreams() uint32 { | ||
158 | if v := s.MaxConcurrentStreams; v > 0 { | ||
159 | return v | ||
160 | } | ||
161 | return defaultMaxStreams | ||
162 | } | ||
163 | |||
164 | type serverInternalState struct { | ||
165 | mu sync.Mutex | ||
166 | activeConns map[*serverConn]struct{} | ||
167 | } | ||
168 | |||
169 | func (s *serverInternalState) registerConn(sc *serverConn) { | ||
170 | if s == nil { | ||
171 | return // if the Server was used without calling ConfigureServer | ||
172 | } | ||
173 | s.mu.Lock() | ||
174 | s.activeConns[sc] = struct{}{} | ||
175 | s.mu.Unlock() | ||
176 | } | ||
177 | |||
178 | func (s *serverInternalState) unregisterConn(sc *serverConn) { | ||
179 | if s == nil { | ||
180 | return // if the Server was used without calling ConfigureServer | ||
181 | } | ||
182 | s.mu.Lock() | ||
183 | delete(s.activeConns, sc) | ||
184 | s.mu.Unlock() | ||
185 | } | ||
186 | |||
187 | func (s *serverInternalState) startGracefulShutdown() { | ||
188 | if s == nil { | ||
189 | return // if the Server was used without calling ConfigureServer | ||
190 | } | ||
191 | s.mu.Lock() | ||
192 | for sc := range s.activeConns { | ||
193 | sc.startGracefulShutdown() | ||
194 | } | ||
195 | s.mu.Unlock() | ||
196 | } | ||
197 | |||
198 | // ConfigureServer adds HTTP/2 support to a net/http Server. | ||
199 | // | ||
200 | // The configuration conf may be nil. | ||
201 | // | ||
202 | // ConfigureServer must be called before s begins serving. | ||
203 | func ConfigureServer(s *http.Server, conf *Server) error { | ||
204 | if s == nil { | ||
205 | panic("nil *http.Server") | ||
206 | } | ||
207 | if conf == nil { | ||
208 | conf = new(Server) | ||
209 | } | ||
210 | conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} | ||
211 | if err := configureServer18(s, conf); err != nil { | ||
212 | return err | ||
213 | } | ||
214 | if err := configureServer19(s, conf); err != nil { | ||
215 | return err | ||
216 | } | ||
217 | |||
218 | if s.TLSConfig == nil { | ||
219 | s.TLSConfig = new(tls.Config) | ||
220 | } else if s.TLSConfig.CipherSuites != nil { | ||
221 | // If they already provided a CipherSuite list, return | ||
222 | // an error if it has a bad order or is missing | ||
223 | // ECDHE_RSA_WITH_AES_128_GCM_SHA256. | ||
224 | const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 | ||
225 | haveRequired := false | ||
226 | sawBad := false | ||
227 | for i, cs := range s.TLSConfig.CipherSuites { | ||
228 | if cs == requiredCipher { | ||
229 | haveRequired = true | ||
230 | } | ||
231 | if isBadCipher(cs) { | ||
232 | sawBad = true | ||
233 | } else if sawBad { | ||
234 | return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) | ||
235 | } | ||
236 | } | ||
237 | if !haveRequired { | ||
238 | return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") | ||
239 | } | ||
240 | } | ||
241 | |||
242 | // Note: not setting MinVersion to tls.VersionTLS12, | ||
243 | // as we don't want to interfere with HTTP/1.1 traffic | ||
244 | // on the user's server. We enforce TLS 1.2 later once | ||
245 | // we accept a connection. Ideally this should be done | ||
246 | // during next-proto selection, but using TLS <1.2 with | ||
247 | // HTTP/2 is still the client's bug. | ||
248 | |||
249 | s.TLSConfig.PreferServerCipherSuites = true | ||
250 | |||
251 | haveNPN := false | ||
252 | for _, p := range s.TLSConfig.NextProtos { | ||
253 | if p == NextProtoTLS { | ||
254 | haveNPN = true | ||
255 | break | ||
256 | } | ||
257 | } | ||
258 | if !haveNPN { | ||
259 | s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) | ||
260 | } | ||
261 | |||
262 | if s.TLSNextProto == nil { | ||
263 | s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} | ||
264 | } | ||
265 | protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { | ||
266 | if testHookOnConn != nil { | ||
267 | testHookOnConn() | ||
268 | } | ||
269 | conf.ServeConn(c, &ServeConnOpts{ | ||
270 | Handler: h, | ||
271 | BaseConfig: hs, | ||
272 | }) | ||
273 | } | ||
274 | s.TLSNextProto[NextProtoTLS] = protoHandler | ||
275 | return nil | ||
276 | } | ||
277 | |||
278 | // ServeConnOpts are options for the Server.ServeConn method. | ||
279 | type ServeConnOpts struct { | ||
280 | // BaseConfig optionally sets the base configuration | ||
281 | // for values. If nil, defaults are used. | ||
282 | BaseConfig *http.Server | ||
283 | |||
284 | // Handler specifies which handler to use for processing | ||
285 | // requests. If nil, BaseConfig.Handler is used. If BaseConfig | ||
286 | // or BaseConfig.Handler is nil, http.DefaultServeMux is used. | ||
287 | Handler http.Handler | ||
288 | } | ||
289 | |||
290 | func (o *ServeConnOpts) baseConfig() *http.Server { | ||
291 | if o != nil && o.BaseConfig != nil { | ||
292 | return o.BaseConfig | ||
293 | } | ||
294 | return new(http.Server) | ||
295 | } | ||
296 | |||
297 | func (o *ServeConnOpts) handler() http.Handler { | ||
298 | if o != nil { | ||
299 | if o.Handler != nil { | ||
300 | return o.Handler | ||
301 | } | ||
302 | if o.BaseConfig != nil && o.BaseConfig.Handler != nil { | ||
303 | return o.BaseConfig.Handler | ||
304 | } | ||
305 | } | ||
306 | return http.DefaultServeMux | ||
307 | } | ||
308 | |||
309 | // ServeConn serves HTTP/2 requests on the provided connection and | ||
310 | // blocks until the connection is no longer readable. | ||
311 | // | ||
312 | // ServeConn starts speaking HTTP/2 assuming that c has not had any | ||
313 | // reads or writes. It writes its initial settings frame and expects | ||
314 | // to be able to read the preface and settings frame from the | ||
315 | // client. If c has a ConnectionState method like a *tls.Conn, the | ||
316 | // ConnectionState is used to verify the TLS ciphersuite and to set | ||
317 | // the Request.TLS field in Handlers. | ||
318 | // | ||
319 | // ServeConn does not support h2c by itself. Any h2c support must be | ||
320 | // implemented in terms of providing a suitably-behaving net.Conn. | ||
321 | // | ||
322 | // The opts parameter is optional. If nil, default values are used. | ||
323 | func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { | ||
324 | baseCtx, cancel := serverConnBaseContext(c, opts) | ||
325 | defer cancel() | ||
326 | |||
327 | sc := &serverConn{ | ||
328 | srv: s, | ||
329 | hs: opts.baseConfig(), | ||
330 | conn: c, | ||
331 | baseCtx: baseCtx, | ||
332 | remoteAddrStr: c.RemoteAddr().String(), | ||
333 | bw: newBufferedWriter(c), | ||
334 | handler: opts.handler(), | ||
335 | streams: make(map[uint32]*stream), | ||
336 | readFrameCh: make(chan readFrameResult), | ||
337 | wantWriteFrameCh: make(chan FrameWriteRequest, 8), | ||
338 | serveMsgCh: make(chan interface{}, 8), | ||
339 | wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync | ||
340 | bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way | ||
341 | doneServing: make(chan struct{}), | ||
342 | clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" | ||
343 | advMaxStreams: s.maxConcurrentStreams(), | ||
344 | initialStreamSendWindowSize: initialWindowSize, | ||
345 | maxFrameSize: initialMaxFrameSize, | ||
346 | headerTableSize: initialHeaderTableSize, | ||
347 | serveG: newGoroutineLock(), | ||
348 | pushEnabled: true, | ||
349 | } | ||
350 | |||
351 | s.state.registerConn(sc) | ||
352 | defer s.state.unregisterConn(sc) | ||
353 | |||
354 | // The net/http package sets the write deadline from the | ||
355 | // http.Server.WriteTimeout during the TLS handshake, but then | ||
356 | // passes the connection off to us with the deadline already set. | ||
357 | // Write deadlines are set per stream in serverConn.newStream. | ||
358 | // Disarm the net.Conn write deadline here. | ||
359 | if sc.hs.WriteTimeout != 0 { | ||
360 | sc.conn.SetWriteDeadline(time.Time{}) | ||
361 | } | ||
362 | |||
363 | if s.NewWriteScheduler != nil { | ||
364 | sc.writeSched = s.NewWriteScheduler() | ||
365 | } else { | ||
366 | sc.writeSched = NewRandomWriteScheduler() | ||
367 | } | ||
368 | |||
369 | // These start at the RFC-specified defaults. If there is a higher | ||
370 | // configured value for inflow, that will be updated when we send a | ||
371 | // WINDOW_UPDATE shortly after sending SETTINGS. | ||
372 | sc.flow.add(initialWindowSize) | ||
373 | sc.inflow.add(initialWindowSize) | ||
374 | sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) | ||
375 | |||
376 | fr := NewFramer(sc.bw, c) | ||
377 | fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) | ||
378 | fr.MaxHeaderListSize = sc.maxHeaderListSize() | ||
379 | fr.SetMaxReadFrameSize(s.maxReadFrameSize()) | ||
380 | sc.framer = fr | ||
381 | |||
382 | if tc, ok := c.(connectionStater); ok { | ||
383 | sc.tlsState = new(tls.ConnectionState) | ||
384 | *sc.tlsState = tc.ConnectionState() | ||
385 | // 9.2 Use of TLS Features | ||
386 | // An implementation of HTTP/2 over TLS MUST use TLS | ||
387 | // 1.2 or higher with the restrictions on feature set | ||
388 | // and cipher suite described in this section. Due to | ||
389 | // implementation limitations, it might not be | ||
390 | // possible to fail TLS negotiation. An endpoint MUST | ||
391 | // immediately terminate an HTTP/2 connection that | ||
392 | // does not meet the TLS requirements described in | ||
393 | // this section with a connection error (Section | ||
394 | // 5.4.1) of type INADEQUATE_SECURITY. | ||
395 | if sc.tlsState.Version < tls.VersionTLS12 { | ||
396 | sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") | ||
397 | return | ||
398 | } | ||
399 | |||
400 | if sc.tlsState.ServerName == "" { | ||
401 | // Client must use SNI, but we don't enforce that anymore, | ||
402 | // since it was causing problems when connecting to bare IP | ||
403 | // addresses during development. | ||
404 | // | ||
405 | // TODO: optionally enforce? Or enforce at the time we receive | ||
406 | // a new request, and verify the the ServerName matches the :authority? | ||
407 | // But that precludes proxy situations, perhaps. | ||
408 | // | ||
409 | // So for now, do nothing here again. | ||
410 | } | ||
411 | |||
412 | if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { | ||
413 | // "Endpoints MAY choose to generate a connection error | ||
414 | // (Section 5.4.1) of type INADEQUATE_SECURITY if one of | ||
415 | // the prohibited cipher suites are negotiated." | ||
416 | // | ||
417 | // We choose that. In my opinion, the spec is weak | ||
418 | // here. It also says both parties must support at least | ||
419 | // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no | ||
420 | // excuses here. If we really must, we could allow an | ||
421 | // "AllowInsecureWeakCiphers" option on the server later. | ||
422 | // Let's see how it plays out first. | ||
423 | sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) | ||
424 | return | ||
425 | } | ||
426 | } | ||
427 | |||
428 | if hook := testHookGetServerConn; hook != nil { | ||
429 | hook(sc) | ||
430 | } | ||
431 | sc.serve() | ||
432 | } | ||
433 | |||
434 | func (sc *serverConn) rejectConn(err ErrCode, debug string) { | ||
435 | sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) | ||
436 | // ignoring errors. hanging up anyway. | ||
437 | sc.framer.WriteGoAway(0, err, []byte(debug)) | ||
438 | sc.bw.Flush() | ||
439 | sc.conn.Close() | ||
440 | } | ||
441 | |||
442 | type serverConn struct { | ||
443 | // Immutable: | ||
444 | srv *Server | ||
445 | hs *http.Server | ||
446 | conn net.Conn | ||
447 | bw *bufferedWriter // writing to conn | ||
448 | handler http.Handler | ||
449 | baseCtx contextContext | ||
450 | framer *Framer | ||
451 | doneServing chan struct{} // closed when serverConn.serve ends | ||
452 | readFrameCh chan readFrameResult // written by serverConn.readFrames | ||
453 | wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve | ||
454 | wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes | ||
455 | bodyReadCh chan bodyReadMsg // from handlers -> serve | ||
456 | serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop | ||
457 | flow flow // conn-wide (not stream-specific) outbound flow control | ||
458 | inflow flow // conn-wide inbound flow control | ||
459 | tlsState *tls.ConnectionState // shared by all handlers, like net/http | ||
460 | remoteAddrStr string | ||
461 | writeSched WriteScheduler | ||
462 | |||
463 | // Everything following is owned by the serve loop; use serveG.check(): | ||
464 | serveG goroutineLock // used to verify funcs are on serve() | ||
465 | pushEnabled bool | ||
466 | sawFirstSettings bool // got the initial SETTINGS frame after the preface | ||
467 | needToSendSettingsAck bool | ||
468 | unackedSettings int // how many SETTINGS have we sent without ACKs? | ||
469 | clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) | ||
470 | advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client | ||
471 | curClientStreams uint32 // number of open streams initiated by the client | ||
472 | curPushedStreams uint32 // number of open streams initiated by server push | ||
473 | maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests | ||
474 | maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes | ||
475 | streams map[uint32]*stream | ||
476 | initialStreamSendWindowSize int32 | ||
477 | maxFrameSize int32 | ||
478 | headerTableSize uint32 | ||
479 | peerMaxHeaderListSize uint32 // zero means unknown (default) | ||
480 | canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case | ||
481 | writingFrame bool // started writing a frame (on serve goroutine or separate) | ||
482 | writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh | ||
483 | needsFrameFlush bool // last frame write wasn't a flush | ||
484 | inGoAway bool // we've started to or sent GOAWAY | ||
485 | inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop | ||
486 | needToSendGoAway bool // we need to schedule a GOAWAY frame write | ||
487 | goAwayCode ErrCode | ||
488 | shutdownTimer *time.Timer // nil until used | ||
489 | idleTimer *time.Timer // nil if unused | ||
490 | |||
491 | // Owned by the writeFrameAsync goroutine: | ||
492 | headerWriteBuf bytes.Buffer | ||
493 | hpackEncoder *hpack.Encoder | ||
494 | |||
495 | // Used by startGracefulShutdown. | ||
496 | shutdownOnce sync.Once | ||
497 | } | ||
498 | |||
499 | func (sc *serverConn) maxHeaderListSize() uint32 { | ||
500 | n := sc.hs.MaxHeaderBytes | ||
501 | if n <= 0 { | ||
502 | n = http.DefaultMaxHeaderBytes | ||
503 | } | ||
504 | // http2's count is in a slightly different unit and includes 32 bytes per pair. | ||
505 | // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. | ||
506 | const perFieldOverhead = 32 // per http2 spec | ||
507 | const typicalHeaders = 10 // conservative | ||
508 | return uint32(n + typicalHeaders*perFieldOverhead) | ||
509 | } | ||
510 | |||
511 | func (sc *serverConn) curOpenStreams() uint32 { | ||
512 | sc.serveG.check() | ||
513 | return sc.curClientStreams + sc.curPushedStreams | ||
514 | } | ||
515 | |||
516 | // stream represents a stream. This is the minimal metadata needed by | ||
517 | // the serve goroutine. Most of the actual stream state is owned by | ||
518 | // the http.Handler's goroutine in the responseWriter. Because the | ||
519 | // responseWriter's responseWriterState is recycled at the end of a | ||
520 | // handler, this struct intentionally has no pointer to the | ||
521 | // *responseWriter{,State} itself, as the Handler ending nils out the | ||
522 | // responseWriter's state field. | ||
523 | type stream struct { | ||
524 | // immutable: | ||
525 | sc *serverConn | ||
526 | id uint32 | ||
527 | body *pipe // non-nil if expecting DATA frames | ||
528 | cw closeWaiter // closed wait stream transitions to closed state | ||
529 | ctx contextContext | ||
530 | cancelCtx func() | ||
531 | |||
532 | // owned by serverConn's serve loop: | ||
533 | bodyBytes int64 // body bytes seen so far | ||
534 | declBodyBytes int64 // or -1 if undeclared | ||
535 | flow flow // limits writing from Handler to client | ||
536 | inflow flow // what the client is allowed to POST/etc to us | ||
537 | parent *stream // or nil | ||
538 | numTrailerValues int64 | ||
539 | weight uint8 | ||
540 | state streamState | ||
541 | resetQueued bool // RST_STREAM queued for write; set by sc.resetStream | ||
542 | gotTrailerHeader bool // HEADER frame for trailers was seen | ||
543 | wroteHeaders bool // whether we wrote headers (not status 100) | ||
544 | writeDeadline *time.Timer // nil if unused | ||
545 | |||
546 | trailer http.Header // accumulated trailers | ||
547 | reqTrailer http.Header // handler's Request.Trailer | ||
548 | } | ||
549 | |||
550 | func (sc *serverConn) Framer() *Framer { return sc.framer } | ||
551 | func (sc *serverConn) CloseConn() error { return sc.conn.Close() } | ||
552 | func (sc *serverConn) Flush() error { return sc.bw.Flush() } | ||
553 | func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { | ||
554 | return sc.hpackEncoder, &sc.headerWriteBuf | ||
555 | } | ||
556 | |||
557 | func (sc *serverConn) state(streamID uint32) (streamState, *stream) { | ||
558 | sc.serveG.check() | ||
559 | // http://tools.ietf.org/html/rfc7540#section-5.1 | ||
560 | if st, ok := sc.streams[streamID]; ok { | ||
561 | return st.state, st | ||
562 | } | ||
563 | // "The first use of a new stream identifier implicitly closes all | ||
564 | // streams in the "idle" state that might have been initiated by | ||
565 | // that peer with a lower-valued stream identifier. For example, if | ||
566 | // a client sends a HEADERS frame on stream 7 without ever sending a | ||
567 | // frame on stream 5, then stream 5 transitions to the "closed" | ||
568 | // state when the first frame for stream 7 is sent or received." | ||
569 | if streamID%2 == 1 { | ||
570 | if streamID <= sc.maxClientStreamID { | ||
571 | return stateClosed, nil | ||
572 | } | ||
573 | } else { | ||
574 | if streamID <= sc.maxPushPromiseID { | ||
575 | return stateClosed, nil | ||
576 | } | ||
577 | } | ||
578 | return stateIdle, nil | ||
579 | } | ||
580 | |||
581 | // setConnState calls the net/http ConnState hook for this connection, if configured. | ||
582 | // Note that the net/http package does StateNew and StateClosed for us. | ||
583 | // There is currently no plan for StateHijacked or hijacking HTTP/2 connections. | ||
584 | func (sc *serverConn) setConnState(state http.ConnState) { | ||
585 | if sc.hs.ConnState != nil { | ||
586 | sc.hs.ConnState(sc.conn, state) | ||
587 | } | ||
588 | } | ||
589 | |||
590 | func (sc *serverConn) vlogf(format string, args ...interface{}) { | ||
591 | if VerboseLogs { | ||
592 | sc.logf(format, args...) | ||
593 | } | ||
594 | } | ||
595 | |||
596 | func (sc *serverConn) logf(format string, args ...interface{}) { | ||
597 | if lg := sc.hs.ErrorLog; lg != nil { | ||
598 | lg.Printf(format, args...) | ||
599 | } else { | ||
600 | log.Printf(format, args...) | ||
601 | } | ||
602 | } | ||
603 | |||
604 | // errno returns v's underlying uintptr, else 0. | ||
605 | // | ||
606 | // TODO: remove this helper function once http2 can use build | ||
607 | // tags. See comment in isClosedConnError. | ||
608 | func errno(v error) uintptr { | ||
609 | if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { | ||
610 | return uintptr(rv.Uint()) | ||
611 | } | ||
612 | return 0 | ||
613 | } | ||
614 | |||
615 | // isClosedConnError reports whether err is an error from use of a closed | ||
616 | // network connection. | ||
617 | func isClosedConnError(err error) bool { | ||
618 | if err == nil { | ||
619 | return false | ||
620 | } | ||
621 | |||
622 | // TODO: remove this string search and be more like the Windows | ||
623 | // case below. That might involve modifying the standard library | ||
624 | // to return better error types. | ||
625 | str := err.Error() | ||
626 | if strings.Contains(str, "use of closed network connection") { | ||
627 | return true | ||
628 | } | ||
629 | |||
630 | // TODO(bradfitz): x/tools/cmd/bundle doesn't really support | ||
631 | // build tags, so I can't make an http2_windows.go file with | ||
632 | // Windows-specific stuff. Fix that and move this, once we | ||
633 | // have a way to bundle this into std's net/http somehow. | ||
634 | if runtime.GOOS == "windows" { | ||
635 | if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { | ||
636 | if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { | ||
637 | const WSAECONNABORTED = 10053 | ||
638 | const WSAECONNRESET = 10054 | ||
639 | if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { | ||
640 | return true | ||
641 | } | ||
642 | } | ||
643 | } | ||
644 | } | ||
645 | return false | ||
646 | } | ||
647 | |||
648 | func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { | ||
649 | if err == nil { | ||
650 | return | ||
651 | } | ||
652 | if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { | ||
653 | // Boring, expected errors. | ||
654 | sc.vlogf(format, args...) | ||
655 | } else { | ||
656 | sc.logf(format, args...) | ||
657 | } | ||
658 | } | ||
659 | |||
660 | func (sc *serverConn) canonicalHeader(v string) string { | ||
661 | sc.serveG.check() | ||
662 | cv, ok := commonCanonHeader[v] | ||
663 | if ok { | ||
664 | return cv | ||
665 | } | ||
666 | cv, ok = sc.canonHeader[v] | ||
667 | if ok { | ||
668 | return cv | ||
669 | } | ||
670 | if sc.canonHeader == nil { | ||
671 | sc.canonHeader = make(map[string]string) | ||
672 | } | ||
673 | cv = http.CanonicalHeaderKey(v) | ||
674 | sc.canonHeader[v] = cv | ||
675 | return cv | ||
676 | } | ||
677 | |||
678 | type readFrameResult struct { | ||
679 | f Frame // valid until readMore is called | ||
680 | err error | ||
681 | |||
682 | // readMore should be called once the consumer no longer needs or | ||
683 | // retains f. After readMore, f is invalid and more frames can be | ||
684 | // read. | ||
685 | readMore func() | ||
686 | } | ||
687 | |||
688 | // readFrames is the loop that reads incoming frames. | ||
689 | // It takes care to only read one frame at a time, blocking until the | ||
690 | // consumer is done with the frame. | ||
691 | // It's run on its own goroutine. | ||
692 | func (sc *serverConn) readFrames() { | ||
693 | gate := make(gate) | ||
694 | gateDone := gate.Done | ||
695 | for { | ||
696 | f, err := sc.framer.ReadFrame() | ||
697 | select { | ||
698 | case sc.readFrameCh <- readFrameResult{f, err, gateDone}: | ||
699 | case <-sc.doneServing: | ||
700 | return | ||
701 | } | ||
702 | select { | ||
703 | case <-gate: | ||
704 | case <-sc.doneServing: | ||
705 | return | ||
706 | } | ||
707 | if terminalReadFrameError(err) { | ||
708 | return | ||
709 | } | ||
710 | } | ||
711 | } | ||
712 | |||
713 | // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. | ||
714 | type frameWriteResult struct { | ||
715 | wr FrameWriteRequest // what was written (or attempted) | ||
716 | err error // result of the writeFrame call | ||
717 | } | ||
718 | |||
719 | // writeFrameAsync runs in its own goroutine and writes a single frame | ||
720 | // and then reports when it's done. | ||
721 | // At most one goroutine can be running writeFrameAsync at a time per | ||
722 | // serverConn. | ||
723 | func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { | ||
724 | err := wr.write.writeFrame(sc) | ||
725 | sc.wroteFrameCh <- frameWriteResult{wr, err} | ||
726 | } | ||
727 | |||
728 | func (sc *serverConn) closeAllStreamsOnConnClose() { | ||
729 | sc.serveG.check() | ||
730 | for _, st := range sc.streams { | ||
731 | sc.closeStream(st, errClientDisconnected) | ||
732 | } | ||
733 | } | ||
734 | |||
735 | func (sc *serverConn) stopShutdownTimer() { | ||
736 | sc.serveG.check() | ||
737 | if t := sc.shutdownTimer; t != nil { | ||
738 | t.Stop() | ||
739 | } | ||
740 | } | ||
741 | |||
742 | func (sc *serverConn) notePanic() { | ||
743 | // Note: this is for serverConn.serve panicking, not http.Handler code. | ||
744 | if testHookOnPanicMu != nil { | ||
745 | testHookOnPanicMu.Lock() | ||
746 | defer testHookOnPanicMu.Unlock() | ||
747 | } | ||
748 | if testHookOnPanic != nil { | ||
749 | if e := recover(); e != nil { | ||
750 | if testHookOnPanic(sc, e) { | ||
751 | panic(e) | ||
752 | } | ||
753 | } | ||
754 | } | ||
755 | } | ||
756 | |||
757 | func (sc *serverConn) serve() { | ||
758 | sc.serveG.check() | ||
759 | defer sc.notePanic() | ||
760 | defer sc.conn.Close() | ||
761 | defer sc.closeAllStreamsOnConnClose() | ||
762 | defer sc.stopShutdownTimer() | ||
763 | defer close(sc.doneServing) // unblocks handlers trying to send | ||
764 | |||
765 | if VerboseLogs { | ||
766 | sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) | ||
767 | } | ||
768 | |||
769 | sc.writeFrame(FrameWriteRequest{ | ||
770 | write: writeSettings{ | ||
771 | {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, | ||
772 | {SettingMaxConcurrentStreams, sc.advMaxStreams}, | ||
773 | {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, | ||
774 | {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, | ||
775 | }, | ||
776 | }) | ||
777 | sc.unackedSettings++ | ||
778 | |||
779 | // Each connection starts with intialWindowSize inflow tokens. | ||
780 | // If a higher value is configured, we add more tokens. | ||
781 | if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { | ||
782 | sc.sendWindowUpdate(nil, int(diff)) | ||
783 | } | ||
784 | |||
785 | if err := sc.readPreface(); err != nil { | ||
786 | sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) | ||
787 | return | ||
788 | } | ||
789 | // Now that we've got the preface, get us out of the | ||
790 | // "StateNew" state. We can't go directly to idle, though. | ||
791 | // Active means we read some data and anticipate a request. We'll | ||
792 | // do another Active when we get a HEADERS frame. | ||
793 | sc.setConnState(http.StateActive) | ||
794 | sc.setConnState(http.StateIdle) | ||
795 | |||
796 | if sc.srv.IdleTimeout != 0 { | ||
797 | sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) | ||
798 | defer sc.idleTimer.Stop() | ||
799 | } | ||
800 | |||
801 | go sc.readFrames() // closed by defer sc.conn.Close above | ||
802 | |||
803 | settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) | ||
804 | defer settingsTimer.Stop() | ||
805 | |||
806 | loopNum := 0 | ||
807 | for { | ||
808 | loopNum++ | ||
809 | select { | ||
810 | case wr := <-sc.wantWriteFrameCh: | ||
811 | if se, ok := wr.write.(StreamError); ok { | ||
812 | sc.resetStream(se) | ||
813 | break | ||
814 | } | ||
815 | sc.writeFrame(wr) | ||
816 | case res := <-sc.wroteFrameCh: | ||
817 | sc.wroteFrame(res) | ||
818 | case res := <-sc.readFrameCh: | ||
819 | if !sc.processFrameFromReader(res) { | ||
820 | return | ||
821 | } | ||
822 | res.readMore() | ||
823 | if settingsTimer != nil { | ||
824 | settingsTimer.Stop() | ||
825 | settingsTimer = nil | ||
826 | } | ||
827 | case m := <-sc.bodyReadCh: | ||
828 | sc.noteBodyRead(m.st, m.n) | ||
829 | case msg := <-sc.serveMsgCh: | ||
830 | switch v := msg.(type) { | ||
831 | case func(int): | ||
832 | v(loopNum) // for testing | ||
833 | case *serverMessage: | ||
834 | switch v { | ||
835 | case settingsTimerMsg: | ||
836 | sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) | ||
837 | return | ||
838 | case idleTimerMsg: | ||
839 | sc.vlogf("connection is idle") | ||
840 | sc.goAway(ErrCodeNo) | ||
841 | case shutdownTimerMsg: | ||
842 | sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) | ||
843 | return | ||
844 | case gracefulShutdownMsg: | ||
845 | sc.startGracefulShutdownInternal() | ||
846 | default: | ||
847 | panic("unknown timer") | ||
848 | } | ||
849 | case *startPushRequest: | ||
850 | sc.startPush(v) | ||
851 | default: | ||
852 | panic(fmt.Sprintf("unexpected type %T", v)) | ||
853 | } | ||
854 | } | ||
855 | |||
856 | if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { | ||
857 | return | ||
858 | } | ||
859 | } | ||
860 | } | ||
861 | |||
862 | func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { | ||
863 | select { | ||
864 | case <-sc.doneServing: | ||
865 | case <-sharedCh: | ||
866 | close(privateCh) | ||
867 | } | ||
868 | } | ||
869 | |||
870 | type serverMessage int | ||
871 | |||
872 | // Message values sent to serveMsgCh. | ||
873 | var ( | ||
874 | settingsTimerMsg = new(serverMessage) | ||
875 | idleTimerMsg = new(serverMessage) | ||
876 | shutdownTimerMsg = new(serverMessage) | ||
877 | gracefulShutdownMsg = new(serverMessage) | ||
878 | ) | ||
879 | |||
880 | func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } | ||
881 | func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } | ||
882 | func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } | ||
883 | |||
884 | func (sc *serverConn) sendServeMsg(msg interface{}) { | ||
885 | sc.serveG.checkNotOn() // NOT | ||
886 | select { | ||
887 | case sc.serveMsgCh <- msg: | ||
888 | case <-sc.doneServing: | ||
889 | } | ||
890 | } | ||
891 | |||
892 | // readPreface reads the ClientPreface greeting from the peer | ||
893 | // or returns an error on timeout or an invalid greeting. | ||
894 | func (sc *serverConn) readPreface() error { | ||
895 | errc := make(chan error, 1) | ||
896 | go func() { | ||
897 | // Read the client preface | ||
898 | buf := make([]byte, len(ClientPreface)) | ||
899 | if _, err := io.ReadFull(sc.conn, buf); err != nil { | ||
900 | errc <- err | ||
901 | } else if !bytes.Equal(buf, clientPreface) { | ||
902 | errc <- fmt.Errorf("bogus greeting %q", buf) | ||
903 | } else { | ||
904 | errc <- nil | ||
905 | } | ||
906 | }() | ||
907 | timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? | ||
908 | defer timer.Stop() | ||
909 | select { | ||
910 | case <-timer.C: | ||
911 | return errors.New("timeout waiting for client preface") | ||
912 | case err := <-errc: | ||
913 | if err == nil { | ||
914 | if VerboseLogs { | ||
915 | sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) | ||
916 | } | ||
917 | } | ||
918 | return err | ||
919 | } | ||
920 | } | ||
921 | |||
922 | var errChanPool = sync.Pool{ | ||
923 | New: func() interface{} { return make(chan error, 1) }, | ||
924 | } | ||
925 | |||
926 | var writeDataPool = sync.Pool{ | ||
927 | New: func() interface{} { return new(writeData) }, | ||
928 | } | ||
929 | |||
930 | // writeDataFromHandler writes DATA response frames from a handler on | ||
931 | // the given stream. | ||
932 | func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { | ||
933 | ch := errChanPool.Get().(chan error) | ||
934 | writeArg := writeDataPool.Get().(*writeData) | ||
935 | *writeArg = writeData{stream.id, data, endStream} | ||
936 | err := sc.writeFrameFromHandler(FrameWriteRequest{ | ||
937 | write: writeArg, | ||
938 | stream: stream, | ||
939 | done: ch, | ||
940 | }) | ||
941 | if err != nil { | ||
942 | return err | ||
943 | } | ||
944 | var frameWriteDone bool // the frame write is done (successfully or not) | ||
945 | select { | ||
946 | case err = <-ch: | ||
947 | frameWriteDone = true | ||
948 | case <-sc.doneServing: | ||
949 | return errClientDisconnected | ||
950 | case <-stream.cw: | ||
951 | // If both ch and stream.cw were ready (as might | ||
952 | // happen on the final Write after an http.Handler | ||
953 | // ends), prefer the write result. Otherwise this | ||
954 | // might just be us successfully closing the stream. | ||
955 | // The writeFrameAsync and serve goroutines guarantee | ||
956 | // that the ch send will happen before the stream.cw | ||
957 | // close. | ||
958 | select { | ||
959 | case err = <-ch: | ||
960 | frameWriteDone = true | ||
961 | default: | ||
962 | return errStreamClosed | ||
963 | } | ||
964 | } | ||
965 | errChanPool.Put(ch) | ||
966 | if frameWriteDone { | ||
967 | writeDataPool.Put(writeArg) | ||
968 | } | ||
969 | return err | ||
970 | } | ||
971 | |||
972 | // writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts | ||
973 | // if the connection has gone away. | ||
974 | // | ||
975 | // This must not be run from the serve goroutine itself, else it might | ||
976 | // deadlock writing to sc.wantWriteFrameCh (which is only mildly | ||
977 | // buffered and is read by serve itself). If you're on the serve | ||
978 | // goroutine, call writeFrame instead. | ||
979 | func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { | ||
980 | sc.serveG.checkNotOn() // NOT | ||
981 | select { | ||
982 | case sc.wantWriteFrameCh <- wr: | ||
983 | return nil | ||
984 | case <-sc.doneServing: | ||
985 | // Serve loop is gone. | ||
986 | // Client has closed their connection to the server. | ||
987 | return errClientDisconnected | ||
988 | } | ||
989 | } | ||
990 | |||
991 | // writeFrame schedules a frame to write and sends it if there's nothing | ||
992 | // already being written. | ||
993 | // | ||
994 | // There is no pushback here (the serve goroutine never blocks). It's | ||
995 | // the http.Handlers that block, waiting for their previous frames to | ||
996 | // make it onto the wire | ||
997 | // | ||
998 | // If you're not on the serve goroutine, use writeFrameFromHandler instead. | ||
999 | func (sc *serverConn) writeFrame(wr FrameWriteRequest) { | ||
1000 | sc.serveG.check() | ||
1001 | |||
1002 | // If true, wr will not be written and wr.done will not be signaled. | ||
1003 | var ignoreWrite bool | ||
1004 | |||
1005 | // We are not allowed to write frames on closed streams. RFC 7540 Section | ||
1006 | // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on | ||
1007 | // a closed stream." Our server never sends PRIORITY, so that exception | ||
1008 | // does not apply. | ||
1009 | // | ||
1010 | // The serverConn might close an open stream while the stream's handler | ||
1011 | // is still running. For example, the server might close a stream when it | ||
1012 | // receives bad data from the client. If this happens, the handler might | ||
1013 | // attempt to write a frame after the stream has been closed (since the | ||
1014 | // handler hasn't yet been notified of the close). In this case, we simply | ||
1015 | // ignore the frame. The handler will notice that the stream is closed when | ||
1016 | // it waits for the frame to be written. | ||
1017 | // | ||
1018 | // As an exception to this rule, we allow sending RST_STREAM after close. | ||
1019 | // This allows us to immediately reject new streams without tracking any | ||
1020 | // state for those streams (except for the queued RST_STREAM frame). This | ||
1021 | // may result in duplicate RST_STREAMs in some cases, but the client should | ||
1022 | // ignore those. | ||
1023 | if wr.StreamID() != 0 { | ||
1024 | _, isReset := wr.write.(StreamError) | ||
1025 | if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { | ||
1026 | ignoreWrite = true | ||
1027 | } | ||
1028 | } | ||
1029 | |||
1030 | // Don't send a 100-continue response if we've already sent headers. | ||
1031 | // See golang.org/issue/14030. | ||
1032 | switch wr.write.(type) { | ||
1033 | case *writeResHeaders: | ||
1034 | wr.stream.wroteHeaders = true | ||
1035 | case write100ContinueHeadersFrame: | ||
1036 | if wr.stream.wroteHeaders { | ||
1037 | // We do not need to notify wr.done because this frame is | ||
1038 | // never written with wr.done != nil. | ||
1039 | if wr.done != nil { | ||
1040 | panic("wr.done != nil for write100ContinueHeadersFrame") | ||
1041 | } | ||
1042 | ignoreWrite = true | ||
1043 | } | ||
1044 | } | ||
1045 | |||
1046 | if !ignoreWrite { | ||
1047 | sc.writeSched.Push(wr) | ||
1048 | } | ||
1049 | sc.scheduleFrameWrite() | ||
1050 | } | ||
1051 | |||
1052 | // startFrameWrite starts a goroutine to write wr (in a separate | ||
1053 | // goroutine since that might block on the network), and updates the | ||
1054 | // serve goroutine's state about the world, updated from info in wr. | ||
1055 | func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { | ||
1056 | sc.serveG.check() | ||
1057 | if sc.writingFrame { | ||
1058 | panic("internal error: can only be writing one frame at a time") | ||
1059 | } | ||
1060 | |||
1061 | st := wr.stream | ||
1062 | if st != nil { | ||
1063 | switch st.state { | ||
1064 | case stateHalfClosedLocal: | ||
1065 | switch wr.write.(type) { | ||
1066 | case StreamError, handlerPanicRST, writeWindowUpdate: | ||
1067 | // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE | ||
1068 | // in this state. (We never send PRIORITY from the server, so that is not checked.) | ||
1069 | default: | ||
1070 | panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) | ||
1071 | } | ||
1072 | case stateClosed: | ||
1073 | panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) | ||
1074 | } | ||
1075 | } | ||
1076 | if wpp, ok := wr.write.(*writePushPromise); ok { | ||
1077 | var err error | ||
1078 | wpp.promisedID, err = wpp.allocatePromisedID() | ||
1079 | if err != nil { | ||
1080 | sc.writingFrameAsync = false | ||
1081 | wr.replyToWriter(err) | ||
1082 | return | ||
1083 | } | ||
1084 | } | ||
1085 | |||
1086 | sc.writingFrame = true | ||
1087 | sc.needsFrameFlush = true | ||
1088 | if wr.write.staysWithinBuffer(sc.bw.Available()) { | ||
1089 | sc.writingFrameAsync = false | ||
1090 | err := wr.write.writeFrame(sc) | ||
1091 | sc.wroteFrame(frameWriteResult{wr, err}) | ||
1092 | } else { | ||
1093 | sc.writingFrameAsync = true | ||
1094 | go sc.writeFrameAsync(wr) | ||
1095 | } | ||
1096 | } | ||
1097 | |||
1098 | // errHandlerPanicked is the error given to any callers blocked in a read from | ||
1099 | // Request.Body when the main goroutine panics. Since most handlers read in the | ||
1100 | // the main ServeHTTP goroutine, this will show up rarely. | ||
1101 | var errHandlerPanicked = errors.New("http2: handler panicked") | ||
1102 | |||
1103 | // wroteFrame is called on the serve goroutine with the result of | ||
1104 | // whatever happened on writeFrameAsync. | ||
1105 | func (sc *serverConn) wroteFrame(res frameWriteResult) { | ||
1106 | sc.serveG.check() | ||
1107 | if !sc.writingFrame { | ||
1108 | panic("internal error: expected to be already writing a frame") | ||
1109 | } | ||
1110 | sc.writingFrame = false | ||
1111 | sc.writingFrameAsync = false | ||
1112 | |||
1113 | wr := res.wr | ||
1114 | |||
1115 | if writeEndsStream(wr.write) { | ||
1116 | st := wr.stream | ||
1117 | if st == nil { | ||
1118 | panic("internal error: expecting non-nil stream") | ||
1119 | } | ||
1120 | switch st.state { | ||
1121 | case stateOpen: | ||
1122 | // Here we would go to stateHalfClosedLocal in | ||
1123 | // theory, but since our handler is done and | ||
1124 | // the net/http package provides no mechanism | ||
1125 | // for closing a ResponseWriter while still | ||
1126 | // reading data (see possible TODO at top of | ||
1127 | // this file), we go into closed state here | ||
1128 | // anyway, after telling the peer we're | ||
1129 | // hanging up on them. We'll transition to | ||
1130 | // stateClosed after the RST_STREAM frame is | ||
1131 | // written. | ||
1132 | st.state = stateHalfClosedLocal | ||
1133 | // Section 8.1: a server MAY request that the client abort | ||
1134 | // transmission of a request without error by sending a | ||
1135 | // RST_STREAM with an error code of NO_ERROR after sending | ||
1136 | // a complete response. | ||
1137 | sc.resetStream(streamError(st.id, ErrCodeNo)) | ||
1138 | case stateHalfClosedRemote: | ||
1139 | sc.closeStream(st, errHandlerComplete) | ||
1140 | } | ||
1141 | } else { | ||
1142 | switch v := wr.write.(type) { | ||
1143 | case StreamError: | ||
1144 | // st may be unknown if the RST_STREAM was generated to reject bad input. | ||
1145 | if st, ok := sc.streams[v.StreamID]; ok { | ||
1146 | sc.closeStream(st, v) | ||
1147 | } | ||
1148 | case handlerPanicRST: | ||
1149 | sc.closeStream(wr.stream, errHandlerPanicked) | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | // Reply (if requested) to unblock the ServeHTTP goroutine. | ||
1154 | wr.replyToWriter(res.err) | ||
1155 | |||
1156 | sc.scheduleFrameWrite() | ||
1157 | } | ||
1158 | |||
1159 | // scheduleFrameWrite tickles the frame writing scheduler. | ||
1160 | // | ||
1161 | // If a frame is already being written, nothing happens. This will be called again | ||
1162 | // when the frame is done being written. | ||
1163 | // | ||
1164 | // If a frame isn't being written we need to send one, the best frame | ||
1165 | // to send is selected, preferring first things that aren't | ||
1166 | // stream-specific (e.g. ACKing settings), and then finding the | ||
1167 | // highest priority stream. | ||
1168 | // | ||
1169 | // If a frame isn't being written and there's nothing else to send, we | ||
1170 | // flush the write buffer. | ||
1171 | func (sc *serverConn) scheduleFrameWrite() { | ||
1172 | sc.serveG.check() | ||
1173 | if sc.writingFrame || sc.inFrameScheduleLoop { | ||
1174 | return | ||
1175 | } | ||
1176 | sc.inFrameScheduleLoop = true | ||
1177 | for !sc.writingFrameAsync { | ||
1178 | if sc.needToSendGoAway { | ||
1179 | sc.needToSendGoAway = false | ||
1180 | sc.startFrameWrite(FrameWriteRequest{ | ||
1181 | write: &writeGoAway{ | ||
1182 | maxStreamID: sc.maxClientStreamID, | ||
1183 | code: sc.goAwayCode, | ||
1184 | }, | ||
1185 | }) | ||
1186 | continue | ||
1187 | } | ||
1188 | if sc.needToSendSettingsAck { | ||
1189 | sc.needToSendSettingsAck = false | ||
1190 | sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) | ||
1191 | continue | ||
1192 | } | ||
1193 | if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { | ||
1194 | if wr, ok := sc.writeSched.Pop(); ok { | ||
1195 | sc.startFrameWrite(wr) | ||
1196 | continue | ||
1197 | } | ||
1198 | } | ||
1199 | if sc.needsFrameFlush { | ||
1200 | sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) | ||
1201 | sc.needsFrameFlush = false // after startFrameWrite, since it sets this true | ||
1202 | continue | ||
1203 | } | ||
1204 | break | ||
1205 | } | ||
1206 | sc.inFrameScheduleLoop = false | ||
1207 | } | ||
1208 | |||
1209 | // startGracefulShutdown gracefully shuts down a connection. This | ||
1210 | // sends GOAWAY with ErrCodeNo to tell the client we're gracefully | ||
1211 | // shutting down. The connection isn't closed until all current | ||
1212 | // streams are done. | ||
1213 | // | ||
1214 | // startGracefulShutdown returns immediately; it does not wait until | ||
1215 | // the connection has shut down. | ||
1216 | func (sc *serverConn) startGracefulShutdown() { | ||
1217 | sc.serveG.checkNotOn() // NOT | ||
1218 | sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) | ||
1219 | } | ||
1220 | |||
1221 | func (sc *serverConn) startGracefulShutdownInternal() { | ||
1222 | sc.goAwayIn(ErrCodeNo, 0) | ||
1223 | } | ||
1224 | |||
1225 | func (sc *serverConn) goAway(code ErrCode) { | ||
1226 | sc.serveG.check() | ||
1227 | var forceCloseIn time.Duration | ||
1228 | if code != ErrCodeNo { | ||
1229 | forceCloseIn = 250 * time.Millisecond | ||
1230 | } else { | ||
1231 | // TODO: configurable | ||
1232 | forceCloseIn = 1 * time.Second | ||
1233 | } | ||
1234 | sc.goAwayIn(code, forceCloseIn) | ||
1235 | } | ||
1236 | |||
1237 | func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { | ||
1238 | sc.serveG.check() | ||
1239 | if sc.inGoAway { | ||
1240 | return | ||
1241 | } | ||
1242 | if forceCloseIn != 0 { | ||
1243 | sc.shutDownIn(forceCloseIn) | ||
1244 | } | ||
1245 | sc.inGoAway = true | ||
1246 | sc.needToSendGoAway = true | ||
1247 | sc.goAwayCode = code | ||
1248 | sc.scheduleFrameWrite() | ||
1249 | } | ||
1250 | |||
1251 | func (sc *serverConn) shutDownIn(d time.Duration) { | ||
1252 | sc.serveG.check() | ||
1253 | sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) | ||
1254 | } | ||
1255 | |||
1256 | func (sc *serverConn) resetStream(se StreamError) { | ||
1257 | sc.serveG.check() | ||
1258 | sc.writeFrame(FrameWriteRequest{write: se}) | ||
1259 | if st, ok := sc.streams[se.StreamID]; ok { | ||
1260 | st.resetQueued = true | ||
1261 | } | ||
1262 | } | ||
1263 | |||
1264 | // processFrameFromReader processes the serve loop's read from readFrameCh from the | ||
1265 | // frame-reading goroutine. | ||
1266 | // processFrameFromReader returns whether the connection should be kept open. | ||
1267 | func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { | ||
1268 | sc.serveG.check() | ||
1269 | err := res.err | ||
1270 | if err != nil { | ||
1271 | if err == ErrFrameTooLarge { | ||
1272 | sc.goAway(ErrCodeFrameSize) | ||
1273 | return true // goAway will close the loop | ||
1274 | } | ||
1275 | clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) | ||
1276 | if clientGone { | ||
1277 | // TODO: could we also get into this state if | ||
1278 | // the peer does a half close | ||
1279 | // (e.g. CloseWrite) because they're done | ||
1280 | // sending frames but they're still wanting | ||
1281 | // our open replies? Investigate. | ||
1282 | // TODO: add CloseWrite to crypto/tls.Conn first | ||
1283 | // so we have a way to test this? I suppose | ||
1284 | // just for testing we could have a non-TLS mode. | ||
1285 | return false | ||
1286 | } | ||
1287 | } else { | ||
1288 | f := res.f | ||
1289 | if VerboseLogs { | ||
1290 | sc.vlogf("http2: server read frame %v", summarizeFrame(f)) | ||
1291 | } | ||
1292 | err = sc.processFrame(f) | ||
1293 | if err == nil { | ||
1294 | return true | ||
1295 | } | ||
1296 | } | ||
1297 | |||
1298 | switch ev := err.(type) { | ||
1299 | case StreamError: | ||
1300 | sc.resetStream(ev) | ||
1301 | return true | ||
1302 | case goAwayFlowError: | ||
1303 | sc.goAway(ErrCodeFlowControl) | ||
1304 | return true | ||
1305 | case ConnectionError: | ||
1306 | sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) | ||
1307 | sc.goAway(ErrCode(ev)) | ||
1308 | return true // goAway will handle shutdown | ||
1309 | default: | ||
1310 | if res.err != nil { | ||
1311 | sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) | ||
1312 | } else { | ||
1313 | sc.logf("http2: server closing client connection: %v", err) | ||
1314 | } | ||
1315 | return false | ||
1316 | } | ||
1317 | } | ||
1318 | |||
1319 | func (sc *serverConn) processFrame(f Frame) error { | ||
1320 | sc.serveG.check() | ||
1321 | |||
1322 | // First frame received must be SETTINGS. | ||
1323 | if !sc.sawFirstSettings { | ||
1324 | if _, ok := f.(*SettingsFrame); !ok { | ||
1325 | return ConnectionError(ErrCodeProtocol) | ||
1326 | } | ||
1327 | sc.sawFirstSettings = true | ||
1328 | } | ||
1329 | |||
1330 | switch f := f.(type) { | ||
1331 | case *SettingsFrame: | ||
1332 | return sc.processSettings(f) | ||
1333 | case *MetaHeadersFrame: | ||
1334 | return sc.processHeaders(f) | ||
1335 | case *WindowUpdateFrame: | ||
1336 | return sc.processWindowUpdate(f) | ||
1337 | case *PingFrame: | ||
1338 | return sc.processPing(f) | ||
1339 | case *DataFrame: | ||
1340 | return sc.processData(f) | ||
1341 | case *RSTStreamFrame: | ||
1342 | return sc.processResetStream(f) | ||
1343 | case *PriorityFrame: | ||
1344 | return sc.processPriority(f) | ||
1345 | case *GoAwayFrame: | ||
1346 | return sc.processGoAway(f) | ||
1347 | case *PushPromiseFrame: | ||
1348 | // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE | ||
1349 | // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. | ||
1350 | return ConnectionError(ErrCodeProtocol) | ||
1351 | default: | ||
1352 | sc.vlogf("http2: server ignoring frame: %v", f.Header()) | ||
1353 | return nil | ||
1354 | } | ||
1355 | } | ||
1356 | |||
1357 | func (sc *serverConn) processPing(f *PingFrame) error { | ||
1358 | sc.serveG.check() | ||
1359 | if f.IsAck() { | ||
1360 | // 6.7 PING: " An endpoint MUST NOT respond to PING frames | ||
1361 | // containing this flag." | ||
1362 | return nil | ||
1363 | } | ||
1364 | if f.StreamID != 0 { | ||
1365 | // "PING frames are not associated with any individual | ||
1366 | // stream. If a PING frame is received with a stream | ||
1367 | // identifier field value other than 0x0, the recipient MUST | ||
1368 | // respond with a connection error (Section 5.4.1) of type | ||
1369 | // PROTOCOL_ERROR." | ||
1370 | return ConnectionError(ErrCodeProtocol) | ||
1371 | } | ||
1372 | if sc.inGoAway && sc.goAwayCode != ErrCodeNo { | ||
1373 | return nil | ||
1374 | } | ||
1375 | sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) | ||
1376 | return nil | ||
1377 | } | ||
1378 | |||
1379 | func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { | ||
1380 | sc.serveG.check() | ||
1381 | switch { | ||
1382 | case f.StreamID != 0: // stream-level flow control | ||
1383 | state, st := sc.state(f.StreamID) | ||
1384 | if state == stateIdle { | ||
1385 | // Section 5.1: "Receiving any frame other than HEADERS | ||
1386 | // or PRIORITY on a stream in this state MUST be | ||
1387 | // treated as a connection error (Section 5.4.1) of | ||
1388 | // type PROTOCOL_ERROR." | ||
1389 | return ConnectionError(ErrCodeProtocol) | ||
1390 | } | ||
1391 | if st == nil { | ||
1392 | // "WINDOW_UPDATE can be sent by a peer that has sent a | ||
1393 | // frame bearing the END_STREAM flag. This means that a | ||
1394 | // receiver could receive a WINDOW_UPDATE frame on a "half | ||
1395 | // closed (remote)" or "closed" stream. A receiver MUST | ||
1396 | // NOT treat this as an error, see Section 5.1." | ||
1397 | return nil | ||
1398 | } | ||
1399 | if !st.flow.add(int32(f.Increment)) { | ||
1400 | return streamError(f.StreamID, ErrCodeFlowControl) | ||
1401 | } | ||
1402 | default: // connection-level flow control | ||
1403 | if !sc.flow.add(int32(f.Increment)) { | ||
1404 | return goAwayFlowError{} | ||
1405 | } | ||
1406 | } | ||
1407 | sc.scheduleFrameWrite() | ||
1408 | return nil | ||
1409 | } | ||
1410 | |||
1411 | func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { | ||
1412 | sc.serveG.check() | ||
1413 | |||
1414 | state, st := sc.state(f.StreamID) | ||
1415 | if state == stateIdle { | ||
1416 | // 6.4 "RST_STREAM frames MUST NOT be sent for a | ||
1417 | // stream in the "idle" state. If a RST_STREAM frame | ||
1418 | // identifying an idle stream is received, the | ||
1419 | // recipient MUST treat this as a connection error | ||
1420 | // (Section 5.4.1) of type PROTOCOL_ERROR. | ||
1421 | return ConnectionError(ErrCodeProtocol) | ||
1422 | } | ||
1423 | if st != nil { | ||
1424 | st.cancelCtx() | ||
1425 | sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) | ||
1426 | } | ||
1427 | return nil | ||
1428 | } | ||
1429 | |||
1430 | func (sc *serverConn) closeStream(st *stream, err error) { | ||
1431 | sc.serveG.check() | ||
1432 | if st.state == stateIdle || st.state == stateClosed { | ||
1433 | panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) | ||
1434 | } | ||
1435 | st.state = stateClosed | ||
1436 | if st.writeDeadline != nil { | ||
1437 | st.writeDeadline.Stop() | ||
1438 | } | ||
1439 | if st.isPushed() { | ||
1440 | sc.curPushedStreams-- | ||
1441 | } else { | ||
1442 | sc.curClientStreams-- | ||
1443 | } | ||
1444 | delete(sc.streams, st.id) | ||
1445 | if len(sc.streams) == 0 { | ||
1446 | sc.setConnState(http.StateIdle) | ||
1447 | if sc.srv.IdleTimeout != 0 { | ||
1448 | sc.idleTimer.Reset(sc.srv.IdleTimeout) | ||
1449 | } | ||
1450 | if h1ServerKeepAlivesDisabled(sc.hs) { | ||
1451 | sc.startGracefulShutdownInternal() | ||
1452 | } | ||
1453 | } | ||
1454 | if p := st.body; p != nil { | ||
1455 | // Return any buffered unread bytes worth of conn-level flow control. | ||
1456 | // See golang.org/issue/16481 | ||
1457 | sc.sendWindowUpdate(nil, p.Len()) | ||
1458 | |||
1459 | p.CloseWithError(err) | ||
1460 | } | ||
1461 | st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc | ||
1462 | sc.writeSched.CloseStream(st.id) | ||
1463 | } | ||
1464 | |||
1465 | func (sc *serverConn) processSettings(f *SettingsFrame) error { | ||
1466 | sc.serveG.check() | ||
1467 | if f.IsAck() { | ||
1468 | sc.unackedSettings-- | ||
1469 | if sc.unackedSettings < 0 { | ||
1470 | // Why is the peer ACKing settings we never sent? | ||
1471 | // The spec doesn't mention this case, but | ||
1472 | // hang up on them anyway. | ||
1473 | return ConnectionError(ErrCodeProtocol) | ||
1474 | } | ||
1475 | return nil | ||
1476 | } | ||
1477 | if err := f.ForeachSetting(sc.processSetting); err != nil { | ||
1478 | return err | ||
1479 | } | ||
1480 | sc.needToSendSettingsAck = true | ||
1481 | sc.scheduleFrameWrite() | ||
1482 | return nil | ||
1483 | } | ||
1484 | |||
1485 | func (sc *serverConn) processSetting(s Setting) error { | ||
1486 | sc.serveG.check() | ||
1487 | if err := s.Valid(); err != nil { | ||
1488 | return err | ||
1489 | } | ||
1490 | if VerboseLogs { | ||
1491 | sc.vlogf("http2: server processing setting %v", s) | ||
1492 | } | ||
1493 | switch s.ID { | ||
1494 | case SettingHeaderTableSize: | ||
1495 | sc.headerTableSize = s.Val | ||
1496 | sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) | ||
1497 | case SettingEnablePush: | ||
1498 | sc.pushEnabled = s.Val != 0 | ||
1499 | case SettingMaxConcurrentStreams: | ||
1500 | sc.clientMaxStreams = s.Val | ||
1501 | case SettingInitialWindowSize: | ||
1502 | return sc.processSettingInitialWindowSize(s.Val) | ||
1503 | case SettingMaxFrameSize: | ||
1504 | sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 | ||
1505 | case SettingMaxHeaderListSize: | ||
1506 | sc.peerMaxHeaderListSize = s.Val | ||
1507 | default: | ||
1508 | // Unknown setting: "An endpoint that receives a SETTINGS | ||
1509 | // frame with any unknown or unsupported identifier MUST | ||
1510 | // ignore that setting." | ||
1511 | if VerboseLogs { | ||
1512 | sc.vlogf("http2: server ignoring unknown setting %v", s) | ||
1513 | } | ||
1514 | } | ||
1515 | return nil | ||
1516 | } | ||
1517 | |||
1518 | func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { | ||
1519 | sc.serveG.check() | ||
1520 | // Note: val already validated to be within range by | ||
1521 | // processSetting's Valid call. | ||
1522 | |||
1523 | // "A SETTINGS frame can alter the initial flow control window | ||
1524 | // size for all current streams. When the value of | ||
1525 | // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST | ||
1526 | // adjust the size of all stream flow control windows that it | ||
1527 | // maintains by the difference between the new value and the | ||
1528 | // old value." | ||
1529 | old := sc.initialStreamSendWindowSize | ||
1530 | sc.initialStreamSendWindowSize = int32(val) | ||
1531 | growth := int32(val) - old // may be negative | ||
1532 | for _, st := range sc.streams { | ||
1533 | if !st.flow.add(growth) { | ||
1534 | // 6.9.2 Initial Flow Control Window Size | ||
1535 | // "An endpoint MUST treat a change to | ||
1536 | // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow | ||
1537 | // control window to exceed the maximum size as a | ||
1538 | // connection error (Section 5.4.1) of type | ||
1539 | // FLOW_CONTROL_ERROR." | ||
1540 | return ConnectionError(ErrCodeFlowControl) | ||
1541 | } | ||
1542 | } | ||
1543 | return nil | ||
1544 | } | ||
1545 | |||
1546 | func (sc *serverConn) processData(f *DataFrame) error { | ||
1547 | sc.serveG.check() | ||
1548 | if sc.inGoAway && sc.goAwayCode != ErrCodeNo { | ||
1549 | return nil | ||
1550 | } | ||
1551 | data := f.Data() | ||
1552 | |||
1553 | // "If a DATA frame is received whose stream is not in "open" | ||
1554 | // or "half closed (local)" state, the recipient MUST respond | ||
1555 | // with a stream error (Section 5.4.2) of type STREAM_CLOSED." | ||
1556 | id := f.Header().StreamID | ||
1557 | state, st := sc.state(id) | ||
1558 | if id == 0 || state == stateIdle { | ||
1559 | // Section 5.1: "Receiving any frame other than HEADERS | ||
1560 | // or PRIORITY on a stream in this state MUST be | ||
1561 | // treated as a connection error (Section 5.4.1) of | ||
1562 | // type PROTOCOL_ERROR." | ||
1563 | return ConnectionError(ErrCodeProtocol) | ||
1564 | } | ||
1565 | if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { | ||
1566 | // This includes sending a RST_STREAM if the stream is | ||
1567 | // in stateHalfClosedLocal (which currently means that | ||
1568 | // the http.Handler returned, so it's done reading & | ||
1569 | // done writing). Try to stop the client from sending | ||
1570 | // more DATA. | ||
1571 | |||
1572 | // But still enforce their connection-level flow control, | ||
1573 | // and return any flow control bytes since we're not going | ||
1574 | // to consume them. | ||
1575 | if sc.inflow.available() < int32(f.Length) { | ||
1576 | return streamError(id, ErrCodeFlowControl) | ||
1577 | } | ||
1578 | // Deduct the flow control from inflow, since we're | ||
1579 | // going to immediately add it back in | ||
1580 | // sendWindowUpdate, which also schedules sending the | ||
1581 | // frames. | ||
1582 | sc.inflow.take(int32(f.Length)) | ||
1583 | sc.sendWindowUpdate(nil, int(f.Length)) // conn-level | ||
1584 | |||
1585 | if st != nil && st.resetQueued { | ||
1586 | // Already have a stream error in flight. Don't send another. | ||
1587 | return nil | ||
1588 | } | ||
1589 | return streamError(id, ErrCodeStreamClosed) | ||
1590 | } | ||
1591 | if st.body == nil { | ||
1592 | panic("internal error: should have a body in this state") | ||
1593 | } | ||
1594 | |||
1595 | // Sender sending more than they'd declared? | ||
1596 | if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { | ||
1597 | st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) | ||
1598 | return streamError(id, ErrCodeStreamClosed) | ||
1599 | } | ||
1600 | if f.Length > 0 { | ||
1601 | // Check whether the client has flow control quota. | ||
1602 | if st.inflow.available() < int32(f.Length) { | ||
1603 | return streamError(id, ErrCodeFlowControl) | ||
1604 | } | ||
1605 | st.inflow.take(int32(f.Length)) | ||
1606 | |||
1607 | if len(data) > 0 { | ||
1608 | wrote, err := st.body.Write(data) | ||
1609 | if err != nil { | ||
1610 | return streamError(id, ErrCodeStreamClosed) | ||
1611 | } | ||
1612 | if wrote != len(data) { | ||
1613 | panic("internal error: bad Writer") | ||
1614 | } | ||
1615 | st.bodyBytes += int64(len(data)) | ||
1616 | } | ||
1617 | |||
1618 | // Return any padded flow control now, since we won't | ||
1619 | // refund it later on body reads. | ||
1620 | if pad := int32(f.Length) - int32(len(data)); pad > 0 { | ||
1621 | sc.sendWindowUpdate32(nil, pad) | ||
1622 | sc.sendWindowUpdate32(st, pad) | ||
1623 | } | ||
1624 | } | ||
1625 | if f.StreamEnded() { | ||
1626 | st.endStream() | ||
1627 | } | ||
1628 | return nil | ||
1629 | } | ||
1630 | |||
1631 | func (sc *serverConn) processGoAway(f *GoAwayFrame) error { | ||
1632 | sc.serveG.check() | ||
1633 | if f.ErrCode != ErrCodeNo { | ||
1634 | sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) | ||
1635 | } else { | ||
1636 | sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) | ||
1637 | } | ||
1638 | sc.startGracefulShutdownInternal() | ||
1639 | // http://tools.ietf.org/html/rfc7540#section-6.8 | ||
1640 | // We should not create any new streams, which means we should disable push. | ||
1641 | sc.pushEnabled = false | ||
1642 | return nil | ||
1643 | } | ||
1644 | |||
1645 | // isPushed reports whether the stream is server-initiated. | ||
1646 | func (st *stream) isPushed() bool { | ||
1647 | return st.id%2 == 0 | ||
1648 | } | ||
1649 | |||
1650 | // endStream closes a Request.Body's pipe. It is called when a DATA | ||
1651 | // frame says a request body is over (or after trailers). | ||
1652 | func (st *stream) endStream() { | ||
1653 | sc := st.sc | ||
1654 | sc.serveG.check() | ||
1655 | |||
1656 | if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { | ||
1657 | st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", | ||
1658 | st.declBodyBytes, st.bodyBytes)) | ||
1659 | } else { | ||
1660 | st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) | ||
1661 | st.body.CloseWithError(io.EOF) | ||
1662 | } | ||
1663 | st.state = stateHalfClosedRemote | ||
1664 | } | ||
1665 | |||
1666 | // copyTrailersToHandlerRequest is run in the Handler's goroutine in | ||
1667 | // its Request.Body.Read just before it gets io.EOF. | ||
1668 | func (st *stream) copyTrailersToHandlerRequest() { | ||
1669 | for k, vv := range st.trailer { | ||
1670 | if _, ok := st.reqTrailer[k]; ok { | ||
1671 | // Only copy it over it was pre-declared. | ||
1672 | st.reqTrailer[k] = vv | ||
1673 | } | ||
1674 | } | ||
1675 | } | ||
1676 | |||
1677 | // onWriteTimeout is run on its own goroutine (from time.AfterFunc) | ||
1678 | // when the stream's WriteTimeout has fired. | ||
1679 | func (st *stream) onWriteTimeout() { | ||
1680 | st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) | ||
1681 | } | ||
1682 | |||
1683 | func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { | ||
1684 | sc.serveG.check() | ||
1685 | id := f.StreamID | ||
1686 | if sc.inGoAway { | ||
1687 | // Ignore. | ||
1688 | return nil | ||
1689 | } | ||
1690 | // http://tools.ietf.org/html/rfc7540#section-5.1.1 | ||
1691 | // Streams initiated by a client MUST use odd-numbered stream | ||
1692 | // identifiers. [...] An endpoint that receives an unexpected | ||
1693 | // stream identifier MUST respond with a connection error | ||
1694 | // (Section 5.4.1) of type PROTOCOL_ERROR. | ||
1695 | if id%2 != 1 { | ||
1696 | return ConnectionError(ErrCodeProtocol) | ||
1697 | } | ||
1698 | // A HEADERS frame can be used to create a new stream or | ||
1699 | // send a trailer for an open one. If we already have a stream | ||
1700 | // open, let it process its own HEADERS frame (trailers at this | ||
1701 | // point, if it's valid). | ||
1702 | if st := sc.streams[f.StreamID]; st != nil { | ||
1703 | if st.resetQueued { | ||
1704 | // We're sending RST_STREAM to close the stream, so don't bother | ||
1705 | // processing this frame. | ||
1706 | return nil | ||
1707 | } | ||
1708 | return st.processTrailerHeaders(f) | ||
1709 | } | ||
1710 | |||
1711 | // [...] The identifier of a newly established stream MUST be | ||
1712 | // numerically greater than all streams that the initiating | ||
1713 | // endpoint has opened or reserved. [...] An endpoint that | ||
1714 | // receives an unexpected stream identifier MUST respond with | ||
1715 | // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. | ||
1716 | if id <= sc.maxClientStreamID { | ||
1717 | return ConnectionError(ErrCodeProtocol) | ||
1718 | } | ||
1719 | sc.maxClientStreamID = id | ||
1720 | |||
1721 | if sc.idleTimer != nil { | ||
1722 | sc.idleTimer.Stop() | ||
1723 | } | ||
1724 | |||
1725 | // http://tools.ietf.org/html/rfc7540#section-5.1.2 | ||
1726 | // [...] Endpoints MUST NOT exceed the limit set by their peer. An | ||
1727 | // endpoint that receives a HEADERS frame that causes their | ||
1728 | // advertised concurrent stream limit to be exceeded MUST treat | ||
1729 | // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR | ||
1730 | // or REFUSED_STREAM. | ||
1731 | if sc.curClientStreams+1 > sc.advMaxStreams { | ||
1732 | if sc.unackedSettings == 0 { | ||
1733 | // They should know better. | ||
1734 | return streamError(id, ErrCodeProtocol) | ||
1735 | } | ||
1736 | // Assume it's a network race, where they just haven't | ||
1737 | // received our last SETTINGS update. But actually | ||
1738 | // this can't happen yet, because we don't yet provide | ||
1739 | // a way for users to adjust server parameters at | ||
1740 | // runtime. | ||
1741 | return streamError(id, ErrCodeRefusedStream) | ||
1742 | } | ||
1743 | |||
1744 | initialState := stateOpen | ||
1745 | if f.StreamEnded() { | ||
1746 | initialState = stateHalfClosedRemote | ||
1747 | } | ||
1748 | st := sc.newStream(id, 0, initialState) | ||
1749 | |||
1750 | if f.HasPriority() { | ||
1751 | if err := checkPriority(f.StreamID, f.Priority); err != nil { | ||
1752 | return err | ||
1753 | } | ||
1754 | sc.writeSched.AdjustStream(st.id, f.Priority) | ||
1755 | } | ||
1756 | |||
1757 | rw, req, err := sc.newWriterAndRequest(st, f) | ||
1758 | if err != nil { | ||
1759 | return err | ||
1760 | } | ||
1761 | st.reqTrailer = req.Trailer | ||
1762 | if st.reqTrailer != nil { | ||
1763 | st.trailer = make(http.Header) | ||
1764 | } | ||
1765 | st.body = req.Body.(*requestBody).pipe // may be nil | ||
1766 | st.declBodyBytes = req.ContentLength | ||
1767 | |||
1768 | handler := sc.handler.ServeHTTP | ||
1769 | if f.Truncated { | ||
1770 | // Their header list was too long. Send a 431 error. | ||
1771 | handler = handleHeaderListTooLong | ||
1772 | } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { | ||
1773 | handler = new400Handler(err) | ||
1774 | } | ||
1775 | |||
1776 | // The net/http package sets the read deadline from the | ||
1777 | // http.Server.ReadTimeout during the TLS handshake, but then | ||
1778 | // passes the connection off to us with the deadline already | ||
1779 | // set. Disarm it here after the request headers are read, | ||
1780 | // similar to how the http1 server works. Here it's | ||
1781 | // technically more like the http1 Server's ReadHeaderTimeout | ||
1782 | // (in Go 1.8), though. That's a more sane option anyway. | ||
1783 | if sc.hs.ReadTimeout != 0 { | ||
1784 | sc.conn.SetReadDeadline(time.Time{}) | ||
1785 | } | ||
1786 | |||
1787 | go sc.runHandler(rw, req, handler) | ||
1788 | return nil | ||
1789 | } | ||
1790 | |||
1791 | func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { | ||
1792 | sc := st.sc | ||
1793 | sc.serveG.check() | ||
1794 | if st.gotTrailerHeader { | ||
1795 | return ConnectionError(ErrCodeProtocol) | ||
1796 | } | ||
1797 | st.gotTrailerHeader = true | ||
1798 | if !f.StreamEnded() { | ||
1799 | return streamError(st.id, ErrCodeProtocol) | ||
1800 | } | ||
1801 | |||
1802 | if len(f.PseudoFields()) > 0 { | ||
1803 | return streamError(st.id, ErrCodeProtocol) | ||
1804 | } | ||
1805 | if st.trailer != nil { | ||
1806 | for _, hf := range f.RegularFields() { | ||
1807 | key := sc.canonicalHeader(hf.Name) | ||
1808 | if !ValidTrailerHeader(key) { | ||
1809 | // TODO: send more details to the peer somehow. But http2 has | ||
1810 | // no way to send debug data at a stream level. Discuss with | ||
1811 | // HTTP folk. | ||
1812 | return streamError(st.id, ErrCodeProtocol) | ||
1813 | } | ||
1814 | st.trailer[key] = append(st.trailer[key], hf.Value) | ||
1815 | } | ||
1816 | } | ||
1817 | st.endStream() | ||
1818 | return nil | ||
1819 | } | ||
1820 | |||
1821 | func checkPriority(streamID uint32, p PriorityParam) error { | ||
1822 | if streamID == p.StreamDep { | ||
1823 | // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat | ||
1824 | // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." | ||
1825 | // Section 5.3.3 says that a stream can depend on one of its dependencies, | ||
1826 | // so it's only self-dependencies that are forbidden. | ||
1827 | return streamError(streamID, ErrCodeProtocol) | ||
1828 | } | ||
1829 | return nil | ||
1830 | } | ||
1831 | |||
1832 | func (sc *serverConn) processPriority(f *PriorityFrame) error { | ||
1833 | if sc.inGoAway { | ||
1834 | return nil | ||
1835 | } | ||
1836 | if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { | ||
1837 | return err | ||
1838 | } | ||
1839 | sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) | ||
1840 | return nil | ||
1841 | } | ||
1842 | |||
1843 | func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { | ||
1844 | sc.serveG.check() | ||
1845 | if id == 0 { | ||
1846 | panic("internal error: cannot create stream with id 0") | ||
1847 | } | ||
1848 | |||
1849 | ctx, cancelCtx := contextWithCancel(sc.baseCtx) | ||
1850 | st := &stream{ | ||
1851 | sc: sc, | ||
1852 | id: id, | ||
1853 | state: state, | ||
1854 | ctx: ctx, | ||
1855 | cancelCtx: cancelCtx, | ||
1856 | } | ||
1857 | st.cw.Init() | ||
1858 | st.flow.conn = &sc.flow // link to conn-level counter | ||
1859 | st.flow.add(sc.initialStreamSendWindowSize) | ||
1860 | st.inflow.conn = &sc.inflow // link to conn-level counter | ||
1861 | st.inflow.add(sc.srv.initialStreamRecvWindowSize()) | ||
1862 | if sc.hs.WriteTimeout != 0 { | ||
1863 | st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) | ||
1864 | } | ||
1865 | |||
1866 | sc.streams[id] = st | ||
1867 | sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) | ||
1868 | if st.isPushed() { | ||
1869 | sc.curPushedStreams++ | ||
1870 | } else { | ||
1871 | sc.curClientStreams++ | ||
1872 | } | ||
1873 | if sc.curOpenStreams() == 1 { | ||
1874 | sc.setConnState(http.StateActive) | ||
1875 | } | ||
1876 | |||
1877 | return st | ||
1878 | } | ||
1879 | |||
1880 | func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { | ||
1881 | sc.serveG.check() | ||
1882 | |||
1883 | rp := requestParam{ | ||
1884 | method: f.PseudoValue("method"), | ||
1885 | scheme: f.PseudoValue("scheme"), | ||
1886 | authority: f.PseudoValue("authority"), | ||
1887 | path: f.PseudoValue("path"), | ||
1888 | } | ||
1889 | |||
1890 | isConnect := rp.method == "CONNECT" | ||
1891 | if isConnect { | ||
1892 | if rp.path != "" || rp.scheme != "" || rp.authority == "" { | ||
1893 | return nil, nil, streamError(f.StreamID, ErrCodeProtocol) | ||
1894 | } | ||
1895 | } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { | ||
1896 | // See 8.1.2.6 Malformed Requests and Responses: | ||
1897 | // | ||
1898 | // Malformed requests or responses that are detected | ||
1899 | // MUST be treated as a stream error (Section 5.4.2) | ||
1900 | // of type PROTOCOL_ERROR." | ||
1901 | // | ||
1902 | // 8.1.2.3 Request Pseudo-Header Fields | ||
1903 | // "All HTTP/2 requests MUST include exactly one valid | ||
1904 | // value for the :method, :scheme, and :path | ||
1905 | // pseudo-header fields" | ||
1906 | return nil, nil, streamError(f.StreamID, ErrCodeProtocol) | ||
1907 | } | ||
1908 | |||
1909 | bodyOpen := !f.StreamEnded() | ||
1910 | if rp.method == "HEAD" && bodyOpen { | ||
1911 | // HEAD requests can't have bodies | ||
1912 | return nil, nil, streamError(f.StreamID, ErrCodeProtocol) | ||
1913 | } | ||
1914 | |||
1915 | rp.header = make(http.Header) | ||
1916 | for _, hf := range f.RegularFields() { | ||
1917 | rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) | ||
1918 | } | ||
1919 | if rp.authority == "" { | ||
1920 | rp.authority = rp.header.Get("Host") | ||
1921 | } | ||
1922 | |||
1923 | rw, req, err := sc.newWriterAndRequestNoBody(st, rp) | ||
1924 | if err != nil { | ||
1925 | return nil, nil, err | ||
1926 | } | ||
1927 | if bodyOpen { | ||
1928 | if vv, ok := rp.header["Content-Length"]; ok { | ||
1929 | req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) | ||
1930 | } else { | ||
1931 | req.ContentLength = -1 | ||
1932 | } | ||
1933 | req.Body.(*requestBody).pipe = &pipe{ | ||
1934 | b: &dataBuffer{expected: req.ContentLength}, | ||
1935 | } | ||
1936 | } | ||
1937 | return rw, req, nil | ||
1938 | } | ||
1939 | |||
1940 | type requestParam struct { | ||
1941 | method string | ||
1942 | scheme, authority, path string | ||
1943 | header http.Header | ||
1944 | } | ||
1945 | |||
1946 | func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { | ||
1947 | sc.serveG.check() | ||
1948 | |||
1949 | var tlsState *tls.ConnectionState // nil if not scheme https | ||
1950 | if rp.scheme == "https" { | ||
1951 | tlsState = sc.tlsState | ||
1952 | } | ||
1953 | |||
1954 | needsContinue := rp.header.Get("Expect") == "100-continue" | ||
1955 | if needsContinue { | ||
1956 | rp.header.Del("Expect") | ||
1957 | } | ||
1958 | // Merge Cookie headers into one "; "-delimited value. | ||
1959 | if cookies := rp.header["Cookie"]; len(cookies) > 1 { | ||
1960 | rp.header.Set("Cookie", strings.Join(cookies, "; ")) | ||
1961 | } | ||
1962 | |||
1963 | // Setup Trailers | ||
1964 | var trailer http.Header | ||
1965 | for _, v := range rp.header["Trailer"] { | ||
1966 | for _, key := range strings.Split(v, ",") { | ||
1967 | key = http.CanonicalHeaderKey(strings.TrimSpace(key)) | ||
1968 | switch key { | ||
1969 | case "Transfer-Encoding", "Trailer", "Content-Length": | ||
1970 | // Bogus. (copy of http1 rules) | ||
1971 | // Ignore. | ||
1972 | default: | ||
1973 | if trailer == nil { | ||
1974 | trailer = make(http.Header) | ||
1975 | } | ||
1976 | trailer[key] = nil | ||
1977 | } | ||
1978 | } | ||
1979 | } | ||
1980 | delete(rp.header, "Trailer") | ||
1981 | |||
1982 | var url_ *url.URL | ||
1983 | var requestURI string | ||
1984 | if rp.method == "CONNECT" { | ||
1985 | url_ = &url.URL{Host: rp.authority} | ||
1986 | requestURI = rp.authority // mimic HTTP/1 server behavior | ||
1987 | } else { | ||
1988 | var err error | ||
1989 | url_, err = url.ParseRequestURI(rp.path) | ||
1990 | if err != nil { | ||
1991 | return nil, nil, streamError(st.id, ErrCodeProtocol) | ||
1992 | } | ||
1993 | requestURI = rp.path | ||
1994 | } | ||
1995 | |||
1996 | body := &requestBody{ | ||
1997 | conn: sc, | ||
1998 | stream: st, | ||
1999 | needsContinue: needsContinue, | ||
2000 | } | ||
2001 | req := &http.Request{ | ||
2002 | Method: rp.method, | ||
2003 | URL: url_, | ||
2004 | RemoteAddr: sc.remoteAddrStr, | ||
2005 | Header: rp.header, | ||
2006 | RequestURI: requestURI, | ||
2007 | Proto: "HTTP/2.0", | ||
2008 | ProtoMajor: 2, | ||
2009 | ProtoMinor: 0, | ||
2010 | TLS: tlsState, | ||
2011 | Host: rp.authority, | ||
2012 | Body: body, | ||
2013 | Trailer: trailer, | ||
2014 | } | ||
2015 | req = requestWithContext(req, st.ctx) | ||
2016 | |||
2017 | rws := responseWriterStatePool.Get().(*responseWriterState) | ||
2018 | bwSave := rws.bw | ||
2019 | *rws = responseWriterState{} // zero all the fields | ||
2020 | rws.conn = sc | ||
2021 | rws.bw = bwSave | ||
2022 | rws.bw.Reset(chunkWriter{rws}) | ||
2023 | rws.stream = st | ||
2024 | rws.req = req | ||
2025 | rws.body = body | ||
2026 | |||
2027 | rw := &responseWriter{rws: rws} | ||
2028 | return rw, req, nil | ||
2029 | } | ||
2030 | |||
2031 | // Run on its own goroutine. | ||
2032 | func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { | ||
2033 | didPanic := true | ||
2034 | defer func() { | ||
2035 | rw.rws.stream.cancelCtx() | ||
2036 | if didPanic { | ||
2037 | e := recover() | ||
2038 | sc.writeFrameFromHandler(FrameWriteRequest{ | ||
2039 | write: handlerPanicRST{rw.rws.stream.id}, | ||
2040 | stream: rw.rws.stream, | ||
2041 | }) | ||
2042 | // Same as net/http: | ||
2043 | if shouldLogPanic(e) { | ||
2044 | const size = 64 << 10 | ||
2045 | buf := make([]byte, size) | ||
2046 | buf = buf[:runtime.Stack(buf, false)] | ||
2047 | sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) | ||
2048 | } | ||
2049 | return | ||
2050 | } | ||
2051 | rw.handlerDone() | ||
2052 | }() | ||
2053 | handler(rw, req) | ||
2054 | didPanic = false | ||
2055 | } | ||
2056 | |||
2057 | func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { | ||
2058 | // 10.5.1 Limits on Header Block Size: | ||
2059 | // .. "A server that receives a larger header block than it is | ||
2060 | // willing to handle can send an HTTP 431 (Request Header Fields Too | ||
2061 | // Large) status code" | ||
2062 | const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ | ||
2063 | w.WriteHeader(statusRequestHeaderFieldsTooLarge) | ||
2064 | io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>") | ||
2065 | } | ||
2066 | |||
2067 | // called from handler goroutines. | ||
2068 | // h may be nil. | ||
2069 | func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { | ||
2070 | sc.serveG.checkNotOn() // NOT on | ||
2071 | var errc chan error | ||
2072 | if headerData.h != nil { | ||
2073 | // If there's a header map (which we don't own), so we have to block on | ||
2074 | // waiting for this frame to be written, so an http.Flush mid-handler | ||
2075 | // writes out the correct value of keys, before a handler later potentially | ||
2076 | // mutates it. | ||
2077 | errc = errChanPool.Get().(chan error) | ||
2078 | } | ||
2079 | if err := sc.writeFrameFromHandler(FrameWriteRequest{ | ||
2080 | write: headerData, | ||
2081 | stream: st, | ||
2082 | done: errc, | ||
2083 | }); err != nil { | ||
2084 | return err | ||
2085 | } | ||
2086 | if errc != nil { | ||
2087 | select { | ||
2088 | case err := <-errc: | ||
2089 | errChanPool.Put(errc) | ||
2090 | return err | ||
2091 | case <-sc.doneServing: | ||
2092 | return errClientDisconnected | ||
2093 | case <-st.cw: | ||
2094 | return errStreamClosed | ||
2095 | } | ||
2096 | } | ||
2097 | return nil | ||
2098 | } | ||
2099 | |||
2100 | // called from handler goroutines. | ||
2101 | func (sc *serverConn) write100ContinueHeaders(st *stream) { | ||
2102 | sc.writeFrameFromHandler(FrameWriteRequest{ | ||
2103 | write: write100ContinueHeadersFrame{st.id}, | ||
2104 | stream: st, | ||
2105 | }) | ||
2106 | } | ||
2107 | |||
2108 | // A bodyReadMsg tells the server loop that the http.Handler read n | ||
2109 | // bytes of the DATA from the client on the given stream. | ||
2110 | type bodyReadMsg struct { | ||
2111 | st *stream | ||
2112 | n int | ||
2113 | } | ||
2114 | |||
2115 | // called from handler goroutines. | ||
2116 | // Notes that the handler for the given stream ID read n bytes of its body | ||
2117 | // and schedules flow control tokens to be sent. | ||
2118 | func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { | ||
2119 | sc.serveG.checkNotOn() // NOT on | ||
2120 | if n > 0 { | ||
2121 | select { | ||
2122 | case sc.bodyReadCh <- bodyReadMsg{st, n}: | ||
2123 | case <-sc.doneServing: | ||
2124 | } | ||
2125 | } | ||
2126 | } | ||
2127 | |||
2128 | func (sc *serverConn) noteBodyRead(st *stream, n int) { | ||
2129 | sc.serveG.check() | ||
2130 | sc.sendWindowUpdate(nil, n) // conn-level | ||
2131 | if st.state != stateHalfClosedRemote && st.state != stateClosed { | ||
2132 | // Don't send this WINDOW_UPDATE if the stream is closed | ||
2133 | // remotely. | ||
2134 | sc.sendWindowUpdate(st, n) | ||
2135 | } | ||
2136 | } | ||
2137 | |||
2138 | // st may be nil for conn-level | ||
2139 | func (sc *serverConn) sendWindowUpdate(st *stream, n int) { | ||
2140 | sc.serveG.check() | ||
2141 | // "The legal range for the increment to the flow control | ||
2142 | // window is 1 to 2^31-1 (2,147,483,647) octets." | ||
2143 | // A Go Read call on 64-bit machines could in theory read | ||
2144 | // a larger Read than this. Very unlikely, but we handle it here | ||
2145 | // rather than elsewhere for now. | ||
2146 | const maxUint31 = 1<<31 - 1 | ||
2147 | for n >= maxUint31 { | ||
2148 | sc.sendWindowUpdate32(st, maxUint31) | ||
2149 | n -= maxUint31 | ||
2150 | } | ||
2151 | sc.sendWindowUpdate32(st, int32(n)) | ||
2152 | } | ||
2153 | |||
2154 | // st may be nil for conn-level | ||
2155 | func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { | ||
2156 | sc.serveG.check() | ||
2157 | if n == 0 { | ||
2158 | return | ||
2159 | } | ||
2160 | if n < 0 { | ||
2161 | panic("negative update") | ||
2162 | } | ||
2163 | var streamID uint32 | ||
2164 | if st != nil { | ||
2165 | streamID = st.id | ||
2166 | } | ||
2167 | sc.writeFrame(FrameWriteRequest{ | ||
2168 | write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, | ||
2169 | stream: st, | ||
2170 | }) | ||
2171 | var ok bool | ||
2172 | if st == nil { | ||
2173 | ok = sc.inflow.add(n) | ||
2174 | } else { | ||
2175 | ok = st.inflow.add(n) | ||
2176 | } | ||
2177 | if !ok { | ||
2178 | panic("internal error; sent too many window updates without decrements?") | ||
2179 | } | ||
2180 | } | ||
2181 | |||
2182 | // requestBody is the Handler's Request.Body type. | ||
2183 | // Read and Close may be called concurrently. | ||
2184 | type requestBody struct { | ||
2185 | stream *stream | ||
2186 | conn *serverConn | ||
2187 | closed bool // for use by Close only | ||
2188 | sawEOF bool // for use by Read only | ||
2189 | pipe *pipe // non-nil if we have a HTTP entity message body | ||
2190 | needsContinue bool // need to send a 100-continue | ||
2191 | } | ||
2192 | |||
2193 | func (b *requestBody) Close() error { | ||
2194 | if b.pipe != nil && !b.closed { | ||
2195 | b.pipe.BreakWithError(errClosedBody) | ||
2196 | } | ||
2197 | b.closed = true | ||
2198 | return nil | ||
2199 | } | ||
2200 | |||
2201 | func (b *requestBody) Read(p []byte) (n int, err error) { | ||
2202 | if b.needsContinue { | ||
2203 | b.needsContinue = false | ||
2204 | b.conn.write100ContinueHeaders(b.stream) | ||
2205 | } | ||
2206 | if b.pipe == nil || b.sawEOF { | ||
2207 | return 0, io.EOF | ||
2208 | } | ||
2209 | n, err = b.pipe.Read(p) | ||
2210 | if err == io.EOF { | ||
2211 | b.sawEOF = true | ||
2212 | } | ||
2213 | if b.conn == nil && inTests { | ||
2214 | return | ||
2215 | } | ||
2216 | b.conn.noteBodyReadFromHandler(b.stream, n, err) | ||
2217 | return | ||
2218 | } | ||
2219 | |||
2220 | // responseWriter is the http.ResponseWriter implementation. It's | ||
2221 | // intentionally small (1 pointer wide) to minimize garbage. The | ||
2222 | // responseWriterState pointer inside is zeroed at the end of a | ||
2223 | // request (in handlerDone) and calls on the responseWriter thereafter | ||
2224 | // simply crash (caller's mistake), but the much larger responseWriterState | ||
2225 | // and buffers are reused between multiple requests. | ||
2226 | type responseWriter struct { | ||
2227 | rws *responseWriterState | ||
2228 | } | ||
2229 | |||
2230 | // Optional http.ResponseWriter interfaces implemented. | ||
2231 | var ( | ||
2232 | _ http.CloseNotifier = (*responseWriter)(nil) | ||
2233 | _ http.Flusher = (*responseWriter)(nil) | ||
2234 | _ stringWriter = (*responseWriter)(nil) | ||
2235 | ) | ||
2236 | |||
2237 | type responseWriterState struct { | ||
2238 | // immutable within a request: | ||
2239 | stream *stream | ||
2240 | req *http.Request | ||
2241 | body *requestBody // to close at end of request, if DATA frames didn't | ||
2242 | conn *serverConn | ||
2243 | |||
2244 | // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc | ||
2245 | bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} | ||
2246 | |||
2247 | // mutated by http.Handler goroutine: | ||
2248 | handlerHeader http.Header // nil until called | ||
2249 | snapHeader http.Header // snapshot of handlerHeader at WriteHeader time | ||
2250 | trailers []string // set in writeChunk | ||
2251 | status int // status code passed to WriteHeader | ||
2252 | wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. | ||
2253 | sentHeader bool // have we sent the header frame? | ||
2254 | handlerDone bool // handler has finished | ||
2255 | dirty bool // a Write failed; don't reuse this responseWriterState | ||
2256 | |||
2257 | sentContentLen int64 // non-zero if handler set a Content-Length header | ||
2258 | wroteBytes int64 | ||
2259 | |||
2260 | closeNotifierMu sync.Mutex // guards closeNotifierCh | ||
2261 | closeNotifierCh chan bool // nil until first used | ||
2262 | } | ||
2263 | |||
2264 | type chunkWriter struct{ rws *responseWriterState } | ||
2265 | |||
2266 | func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } | ||
2267 | |||
2268 | func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } | ||
2269 | |||
2270 | // declareTrailer is called for each Trailer header when the | ||
2271 | // response header is written. It notes that a header will need to be | ||
2272 | // written in the trailers at the end of the response. | ||
2273 | func (rws *responseWriterState) declareTrailer(k string) { | ||
2274 | k = http.CanonicalHeaderKey(k) | ||
2275 | if !ValidTrailerHeader(k) { | ||
2276 | // Forbidden by RFC 2616 14.40. | ||
2277 | rws.conn.logf("ignoring invalid trailer %q", k) | ||
2278 | return | ||
2279 | } | ||
2280 | if !strSliceContains(rws.trailers, k) { | ||
2281 | rws.trailers = append(rws.trailers, k) | ||
2282 | } | ||
2283 | } | ||
2284 | |||
2285 | // writeChunk writes chunks from the bufio.Writer. But because | ||
2286 | // bufio.Writer may bypass its chunking, sometimes p may be | ||
2287 | // arbitrarily large. | ||
2288 | // | ||
2289 | // writeChunk is also responsible (on the first chunk) for sending the | ||
2290 | // HEADER response. | ||
2291 | func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { | ||
2292 | if !rws.wroteHeader { | ||
2293 | rws.writeHeader(200) | ||
2294 | } | ||
2295 | |||
2296 | isHeadResp := rws.req.Method == "HEAD" | ||
2297 | if !rws.sentHeader { | ||
2298 | rws.sentHeader = true | ||
2299 | var ctype, clen string | ||
2300 | if clen = rws.snapHeader.Get("Content-Length"); clen != "" { | ||
2301 | rws.snapHeader.Del("Content-Length") | ||
2302 | clen64, err := strconv.ParseInt(clen, 10, 64) | ||
2303 | if err == nil && clen64 >= 0 { | ||
2304 | rws.sentContentLen = clen64 | ||
2305 | } else { | ||
2306 | clen = "" | ||
2307 | } | ||
2308 | } | ||
2309 | if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { | ||
2310 | clen = strconv.Itoa(len(p)) | ||
2311 | } | ||
2312 | _, hasContentType := rws.snapHeader["Content-Type"] | ||
2313 | if !hasContentType && bodyAllowedForStatus(rws.status) { | ||
2314 | ctype = http.DetectContentType(p) | ||
2315 | } | ||
2316 | var date string | ||
2317 | if _, ok := rws.snapHeader["Date"]; !ok { | ||
2318 | // TODO(bradfitz): be faster here, like net/http? measure. | ||
2319 | date = time.Now().UTC().Format(http.TimeFormat) | ||
2320 | } | ||
2321 | |||
2322 | for _, v := range rws.snapHeader["Trailer"] { | ||
2323 | foreachHeaderElement(v, rws.declareTrailer) | ||
2324 | } | ||
2325 | |||
2326 | endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp | ||
2327 | err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ | ||
2328 | streamID: rws.stream.id, | ||
2329 | httpResCode: rws.status, | ||
2330 | h: rws.snapHeader, | ||
2331 | endStream: endStream, | ||
2332 | contentType: ctype, | ||
2333 | contentLength: clen, | ||
2334 | date: date, | ||
2335 | }) | ||
2336 | if err != nil { | ||
2337 | rws.dirty = true | ||
2338 | return 0, err | ||
2339 | } | ||
2340 | if endStream { | ||
2341 | return 0, nil | ||
2342 | } | ||
2343 | } | ||
2344 | if isHeadResp { | ||
2345 | return len(p), nil | ||
2346 | } | ||
2347 | if len(p) == 0 && !rws.handlerDone { | ||
2348 | return 0, nil | ||
2349 | } | ||
2350 | |||
2351 | if rws.handlerDone { | ||
2352 | rws.promoteUndeclaredTrailers() | ||
2353 | } | ||
2354 | |||
2355 | endStream := rws.handlerDone && !rws.hasTrailers() | ||
2356 | if len(p) > 0 || endStream { | ||
2357 | // only send a 0 byte DATA frame if we're ending the stream. | ||
2358 | if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { | ||
2359 | rws.dirty = true | ||
2360 | return 0, err | ||
2361 | } | ||
2362 | } | ||
2363 | |||
2364 | if rws.handlerDone && rws.hasTrailers() { | ||
2365 | err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ | ||
2366 | streamID: rws.stream.id, | ||
2367 | h: rws.handlerHeader, | ||
2368 | trailers: rws.trailers, | ||
2369 | endStream: true, | ||
2370 | }) | ||
2371 | if err != nil { | ||
2372 | rws.dirty = true | ||
2373 | } | ||
2374 | return len(p), err | ||
2375 | } | ||
2376 | return len(p), nil | ||
2377 | } | ||
2378 | |||
2379 | // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys | ||
2380 | // that, if present, signals that the map entry is actually for | ||
2381 | // the response trailers, and not the response headers. The prefix | ||
2382 | // is stripped after the ServeHTTP call finishes and the values are | ||
2383 | // sent in the trailers. | ||
2384 | // | ||
2385 | // This mechanism is intended only for trailers that are not known | ||
2386 | // prior to the headers being written. If the set of trailers is fixed | ||
2387 | // or known before the header is written, the normal Go trailers mechanism | ||
2388 | // is preferred: | ||
2389 | // https://golang.org/pkg/net/http/#ResponseWriter | ||
2390 | // https://golang.org/pkg/net/http/#example_ResponseWriter_trailers | ||
2391 | const TrailerPrefix = "Trailer:" | ||
2392 | |||
2393 | // promoteUndeclaredTrailers permits http.Handlers to set trailers | ||
2394 | // after the header has already been flushed. Because the Go | ||
2395 | // ResponseWriter interface has no way to set Trailers (only the | ||
2396 | // Header), and because we didn't want to expand the ResponseWriter | ||
2397 | // interface, and because nobody used trailers, and because RFC 2616 | ||
2398 | // says you SHOULD (but not must) predeclare any trailers in the | ||
2399 | // header, the official ResponseWriter rules said trailers in Go must | ||
2400 | // be predeclared, and then we reuse the same ResponseWriter.Header() | ||
2401 | // map to mean both Headers and Trailers. When it's time to write the | ||
2402 | // Trailers, we pick out the fields of Headers that were declared as | ||
2403 | // trailers. That worked for a while, until we found the first major | ||
2404 | // user of Trailers in the wild: gRPC (using them only over http2), | ||
2405 | // and gRPC libraries permit setting trailers mid-stream without | ||
2406 | // predeclarnig them. So: change of plans. We still permit the old | ||
2407 | // way, but we also permit this hack: if a Header() key begins with | ||
2408 | // "Trailer:", the suffix of that key is a Trailer. Because ':' is an | ||
2409 | // invalid token byte anyway, there is no ambiguity. (And it's already | ||
2410 | // filtered out) It's mildly hacky, but not terrible. | ||
2411 | // | ||
2412 | // This method runs after the Handler is done and promotes any Header | ||
2413 | // fields to be trailers. | ||
2414 | func (rws *responseWriterState) promoteUndeclaredTrailers() { | ||
2415 | for k, vv := range rws.handlerHeader { | ||
2416 | if !strings.HasPrefix(k, TrailerPrefix) { | ||
2417 | continue | ||
2418 | } | ||
2419 | trailerKey := strings.TrimPrefix(k, TrailerPrefix) | ||
2420 | rws.declareTrailer(trailerKey) | ||
2421 | rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv | ||
2422 | } | ||
2423 | |||
2424 | if len(rws.trailers) > 1 { | ||
2425 | sorter := sorterPool.Get().(*sorter) | ||
2426 | sorter.SortStrings(rws.trailers) | ||
2427 | sorterPool.Put(sorter) | ||
2428 | } | ||
2429 | } | ||
2430 | |||
2431 | func (w *responseWriter) Flush() { | ||
2432 | rws := w.rws | ||
2433 | if rws == nil { | ||
2434 | panic("Header called after Handler finished") | ||
2435 | } | ||
2436 | if rws.bw.Buffered() > 0 { | ||
2437 | if err := rws.bw.Flush(); err != nil { | ||
2438 | // Ignore the error. The frame writer already knows. | ||
2439 | return | ||
2440 | } | ||
2441 | } else { | ||
2442 | // The bufio.Writer won't call chunkWriter.Write | ||
2443 | // (writeChunk with zero bytes, so we have to do it | ||
2444 | // ourselves to force the HTTP response header and/or | ||
2445 | // final DATA frame (with END_STREAM) to be sent. | ||
2446 | rws.writeChunk(nil) | ||
2447 | } | ||
2448 | } | ||
2449 | |||
2450 | func (w *responseWriter) CloseNotify() <-chan bool { | ||
2451 | rws := w.rws | ||
2452 | if rws == nil { | ||
2453 | panic("CloseNotify called after Handler finished") | ||
2454 | } | ||
2455 | rws.closeNotifierMu.Lock() | ||
2456 | ch := rws.closeNotifierCh | ||
2457 | if ch == nil { | ||
2458 | ch = make(chan bool, 1) | ||
2459 | rws.closeNotifierCh = ch | ||
2460 | cw := rws.stream.cw | ||
2461 | go func() { | ||
2462 | cw.Wait() // wait for close | ||
2463 | ch <- true | ||
2464 | }() | ||
2465 | } | ||
2466 | rws.closeNotifierMu.Unlock() | ||
2467 | return ch | ||
2468 | } | ||
2469 | |||
2470 | func (w *responseWriter) Header() http.Header { | ||
2471 | rws := w.rws | ||
2472 | if rws == nil { | ||
2473 | panic("Header called after Handler finished") | ||
2474 | } | ||
2475 | if rws.handlerHeader == nil { | ||
2476 | rws.handlerHeader = make(http.Header) | ||
2477 | } | ||
2478 | return rws.handlerHeader | ||
2479 | } | ||
2480 | |||
2481 | func (w *responseWriter) WriteHeader(code int) { | ||
2482 | rws := w.rws | ||
2483 | if rws == nil { | ||
2484 | panic("WriteHeader called after Handler finished") | ||
2485 | } | ||
2486 | rws.writeHeader(code) | ||
2487 | } | ||
2488 | |||
2489 | func (rws *responseWriterState) writeHeader(code int) { | ||
2490 | if !rws.wroteHeader { | ||
2491 | rws.wroteHeader = true | ||
2492 | rws.status = code | ||
2493 | if len(rws.handlerHeader) > 0 { | ||
2494 | rws.snapHeader = cloneHeader(rws.handlerHeader) | ||
2495 | } | ||
2496 | } | ||
2497 | } | ||
2498 | |||
2499 | func cloneHeader(h http.Header) http.Header { | ||
2500 | h2 := make(http.Header, len(h)) | ||
2501 | for k, vv := range h { | ||
2502 | vv2 := make([]string, len(vv)) | ||
2503 | copy(vv2, vv) | ||
2504 | h2[k] = vv2 | ||
2505 | } | ||
2506 | return h2 | ||
2507 | } | ||
2508 | |||
2509 | // The Life Of A Write is like this: | ||
2510 | // | ||
2511 | // * Handler calls w.Write or w.WriteString -> | ||
2512 | // * -> rws.bw (*bufio.Writer) -> | ||
2513 | // * (Handler might call Flush) | ||
2514 | // * -> chunkWriter{rws} | ||
2515 | // * -> responseWriterState.writeChunk(p []byte) | ||
2516 | // * -> responseWriterState.writeChunk (most of the magic; see comment there) | ||
2517 | func (w *responseWriter) Write(p []byte) (n int, err error) { | ||
2518 | return w.write(len(p), p, "") | ||
2519 | } | ||
2520 | |||
2521 | func (w *responseWriter) WriteString(s string) (n int, err error) { | ||
2522 | return w.write(len(s), nil, s) | ||
2523 | } | ||
2524 | |||
2525 | // either dataB or dataS is non-zero. | ||
2526 | func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { | ||
2527 | rws := w.rws | ||
2528 | if rws == nil { | ||
2529 | panic("Write called after Handler finished") | ||
2530 | } | ||
2531 | if !rws.wroteHeader { | ||
2532 | w.WriteHeader(200) | ||
2533 | } | ||
2534 | if !bodyAllowedForStatus(rws.status) { | ||
2535 | return 0, http.ErrBodyNotAllowed | ||
2536 | } | ||
2537 | rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set | ||
2538 | if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { | ||
2539 | // TODO: send a RST_STREAM | ||
2540 | return 0, errors.New("http2: handler wrote more than declared Content-Length") | ||
2541 | } | ||
2542 | |||
2543 | if dataB != nil { | ||
2544 | return rws.bw.Write(dataB) | ||
2545 | } else { | ||
2546 | return rws.bw.WriteString(dataS) | ||
2547 | } | ||
2548 | } | ||
2549 | |||
2550 | func (w *responseWriter) handlerDone() { | ||
2551 | rws := w.rws | ||
2552 | dirty := rws.dirty | ||
2553 | rws.handlerDone = true | ||
2554 | w.Flush() | ||
2555 | w.rws = nil | ||
2556 | if !dirty { | ||
2557 | // Only recycle the pool if all prior Write calls to | ||
2558 | // the serverConn goroutine completed successfully. If | ||
2559 | // they returned earlier due to resets from the peer | ||
2560 | // there might still be write goroutines outstanding | ||
2561 | // from the serverConn referencing the rws memory. See | ||
2562 | // issue 20704. | ||
2563 | responseWriterStatePool.Put(rws) | ||
2564 | } | ||
2565 | } | ||
2566 | |||
2567 | // Push errors. | ||
2568 | var ( | ||
2569 | ErrRecursivePush = errors.New("http2: recursive push not allowed") | ||
2570 | ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") | ||
2571 | ) | ||
2572 | |||
2573 | // pushOptions is the internal version of http.PushOptions, which we | ||
2574 | // cannot include here because it's only defined in Go 1.8 and later. | ||
2575 | type pushOptions struct { | ||
2576 | Method string | ||
2577 | Header http.Header | ||
2578 | } | ||
2579 | |||
2580 | func (w *responseWriter) push(target string, opts pushOptions) error { | ||
2581 | st := w.rws.stream | ||
2582 | sc := st.sc | ||
2583 | sc.serveG.checkNotOn() | ||
2584 | |||
2585 | // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." | ||
2586 | // http://tools.ietf.org/html/rfc7540#section-6.6 | ||
2587 | if st.isPushed() { | ||
2588 | return ErrRecursivePush | ||
2589 | } | ||
2590 | |||
2591 | // Default options. | ||
2592 | if opts.Method == "" { | ||
2593 | opts.Method = "GET" | ||
2594 | } | ||
2595 | if opts.Header == nil { | ||
2596 | opts.Header = http.Header{} | ||
2597 | } | ||
2598 | wantScheme := "http" | ||
2599 | if w.rws.req.TLS != nil { | ||
2600 | wantScheme = "https" | ||
2601 | } | ||
2602 | |||
2603 | // Validate the request. | ||
2604 | u, err := url.Parse(target) | ||
2605 | if err != nil { | ||
2606 | return err | ||
2607 | } | ||
2608 | if u.Scheme == "" { | ||
2609 | if !strings.HasPrefix(target, "/") { | ||
2610 | return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) | ||
2611 | } | ||
2612 | u.Scheme = wantScheme | ||
2613 | u.Host = w.rws.req.Host | ||
2614 | } else { | ||
2615 | if u.Scheme != wantScheme { | ||
2616 | return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) | ||
2617 | } | ||
2618 | if u.Host == "" { | ||
2619 | return errors.New("URL must have a host") | ||
2620 | } | ||
2621 | } | ||
2622 | for k := range opts.Header { | ||
2623 | if strings.HasPrefix(k, ":") { | ||
2624 | return fmt.Errorf("promised request headers cannot include pseudo header %q", k) | ||
2625 | } | ||
2626 | // These headers are meaningful only if the request has a body, | ||
2627 | // but PUSH_PROMISE requests cannot have a body. | ||
2628 | // http://tools.ietf.org/html/rfc7540#section-8.2 | ||
2629 | // Also disallow Host, since the promised URL must be absolute. | ||
2630 | switch strings.ToLower(k) { | ||
2631 | case "content-length", "content-encoding", "trailer", "te", "expect", "host": | ||
2632 | return fmt.Errorf("promised request headers cannot include %q", k) | ||
2633 | } | ||
2634 | } | ||
2635 | if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { | ||
2636 | return err | ||
2637 | } | ||
2638 | |||
2639 | // The RFC effectively limits promised requests to GET and HEAD: | ||
2640 | // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" | ||
2641 | // http://tools.ietf.org/html/rfc7540#section-8.2 | ||
2642 | if opts.Method != "GET" && opts.Method != "HEAD" { | ||
2643 | return fmt.Errorf("method %q must be GET or HEAD", opts.Method) | ||
2644 | } | ||
2645 | |||
2646 | msg := &startPushRequest{ | ||
2647 | parent: st, | ||
2648 | method: opts.Method, | ||
2649 | url: u, | ||
2650 | header: cloneHeader(opts.Header), | ||
2651 | done: errChanPool.Get().(chan error), | ||
2652 | } | ||
2653 | |||
2654 | select { | ||
2655 | case <-sc.doneServing: | ||
2656 | return errClientDisconnected | ||
2657 | case <-st.cw: | ||
2658 | return errStreamClosed | ||
2659 | case sc.serveMsgCh <- msg: | ||
2660 | } | ||
2661 | |||
2662 | select { | ||
2663 | case <-sc.doneServing: | ||
2664 | return errClientDisconnected | ||
2665 | case <-st.cw: | ||
2666 | return errStreamClosed | ||
2667 | case err := <-msg.done: | ||
2668 | errChanPool.Put(msg.done) | ||
2669 | return err | ||
2670 | } | ||
2671 | } | ||
2672 | |||
2673 | type startPushRequest struct { | ||
2674 | parent *stream | ||
2675 | method string | ||
2676 | url *url.URL | ||
2677 | header http.Header | ||
2678 | done chan error | ||
2679 | } | ||
2680 | |||
2681 | func (sc *serverConn) startPush(msg *startPushRequest) { | ||
2682 | sc.serveG.check() | ||
2683 | |||
2684 | // http://tools.ietf.org/html/rfc7540#section-6.6. | ||
2685 | // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that | ||
2686 | // is in either the "open" or "half-closed (remote)" state. | ||
2687 | if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { | ||
2688 | // responseWriter.Push checks that the stream is peer-initiaed. | ||
2689 | msg.done <- errStreamClosed | ||
2690 | return | ||
2691 | } | ||
2692 | |||
2693 | // http://tools.ietf.org/html/rfc7540#section-6.6. | ||
2694 | if !sc.pushEnabled { | ||
2695 | msg.done <- http.ErrNotSupported | ||
2696 | return | ||
2697 | } | ||
2698 | |||
2699 | // PUSH_PROMISE frames must be sent in increasing order by stream ID, so | ||
2700 | // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE | ||
2701 | // is written. Once the ID is allocated, we start the request handler. | ||
2702 | allocatePromisedID := func() (uint32, error) { | ||
2703 | sc.serveG.check() | ||
2704 | |||
2705 | // Check this again, just in case. Technically, we might have received | ||
2706 | // an updated SETTINGS by the time we got around to writing this frame. | ||
2707 | if !sc.pushEnabled { | ||
2708 | return 0, http.ErrNotSupported | ||
2709 | } | ||
2710 | // http://tools.ietf.org/html/rfc7540#section-6.5.2. | ||
2711 | if sc.curPushedStreams+1 > sc.clientMaxStreams { | ||
2712 | return 0, ErrPushLimitReached | ||
2713 | } | ||
2714 | |||
2715 | // http://tools.ietf.org/html/rfc7540#section-5.1.1. | ||
2716 | // Streams initiated by the server MUST use even-numbered identifiers. | ||
2717 | // A server that is unable to establish a new stream identifier can send a GOAWAY | ||
2718 | // frame so that the client is forced to open a new connection for new streams. | ||
2719 | if sc.maxPushPromiseID+2 >= 1<<31 { | ||
2720 | sc.startGracefulShutdownInternal() | ||
2721 | return 0, ErrPushLimitReached | ||
2722 | } | ||
2723 | sc.maxPushPromiseID += 2 | ||
2724 | promisedID := sc.maxPushPromiseID | ||
2725 | |||
2726 | // http://tools.ietf.org/html/rfc7540#section-8.2. | ||
2727 | // Strictly speaking, the new stream should start in "reserved (local)", then | ||
2728 | // transition to "half closed (remote)" after sending the initial HEADERS, but | ||
2729 | // we start in "half closed (remote)" for simplicity. | ||
2730 | // See further comments at the definition of stateHalfClosedRemote. | ||
2731 | promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) | ||
2732 | rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ | ||
2733 | method: msg.method, | ||
2734 | scheme: msg.url.Scheme, | ||
2735 | authority: msg.url.Host, | ||
2736 | path: msg.url.RequestURI(), | ||
2737 | header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE | ||
2738 | }) | ||
2739 | if err != nil { | ||
2740 | // Should not happen, since we've already validated msg.url. | ||
2741 | panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) | ||
2742 | } | ||
2743 | |||
2744 | go sc.runHandler(rw, req, sc.handler.ServeHTTP) | ||
2745 | return promisedID, nil | ||
2746 | } | ||
2747 | |||
2748 | sc.writeFrame(FrameWriteRequest{ | ||
2749 | write: &writePushPromise{ | ||
2750 | streamID: msg.parent.id, | ||
2751 | method: msg.method, | ||
2752 | url: msg.url, | ||
2753 | h: msg.header, | ||
2754 | allocatePromisedID: allocatePromisedID, | ||
2755 | }, | ||
2756 | stream: msg.parent, | ||
2757 | done: msg.done, | ||
2758 | }) | ||
2759 | } | ||
2760 | |||
2761 | // foreachHeaderElement splits v according to the "#rule" construction | ||
2762 | // in RFC 2616 section 2.1 and calls fn for each non-empty element. | ||
2763 | func foreachHeaderElement(v string, fn func(string)) { | ||
2764 | v = textproto.TrimString(v) | ||
2765 | if v == "" { | ||
2766 | return | ||
2767 | } | ||
2768 | if !strings.Contains(v, ",") { | ||
2769 | fn(v) | ||
2770 | return | ||
2771 | } | ||
2772 | for _, f := range strings.Split(v, ",") { | ||
2773 | if f = textproto.TrimString(f); f != "" { | ||
2774 | fn(f) | ||
2775 | } | ||
2776 | } | ||
2777 | } | ||
2778 | |||
2779 | // From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 | ||
2780 | var connHeaders = []string{ | ||
2781 | "Connection", | ||
2782 | "Keep-Alive", | ||
2783 | "Proxy-Connection", | ||
2784 | "Transfer-Encoding", | ||
2785 | "Upgrade", | ||
2786 | } | ||
2787 | |||
2788 | // checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, | ||
2789 | // per RFC 7540 Section 8.1.2.2. | ||
2790 | // The returned error is reported to users. | ||
2791 | func checkValidHTTP2RequestHeaders(h http.Header) error { | ||
2792 | for _, k := range connHeaders { | ||
2793 | if _, ok := h[k]; ok { | ||
2794 | return fmt.Errorf("request header %q is not valid in HTTP/2", k) | ||
2795 | } | ||
2796 | } | ||
2797 | te := h["Te"] | ||
2798 | if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { | ||
2799 | return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) | ||
2800 | } | ||
2801 | return nil | ||
2802 | } | ||
2803 | |||
2804 | func new400Handler(err error) http.HandlerFunc { | ||
2805 | return func(w http.ResponseWriter, r *http.Request) { | ||
2806 | http.Error(w, err.Error(), http.StatusBadRequest) | ||
2807 | } | ||
2808 | } | ||
2809 | |||
2810 | // ValidTrailerHeader reports whether name is a valid header field name to appear | ||
2811 | // in trailers. | ||
2812 | // See: http://tools.ietf.org/html/rfc7230#section-4.1.2 | ||
2813 | func ValidTrailerHeader(name string) bool { | ||
2814 | name = http.CanonicalHeaderKey(name) | ||
2815 | if strings.HasPrefix(name, "If-") || badTrailer[name] { | ||
2816 | return false | ||
2817 | } | ||
2818 | return true | ||
2819 | } | ||
2820 | |||
2821 | var badTrailer = map[string]bool{ | ||
2822 | "Authorization": true, | ||
2823 | "Cache-Control": true, | ||
2824 | "Connection": true, | ||
2825 | "Content-Encoding": true, | ||
2826 | "Content-Length": true, | ||
2827 | "Content-Range": true, | ||
2828 | "Content-Type": true, | ||
2829 | "Expect": true, | ||
2830 | "Host": true, | ||
2831 | "Keep-Alive": true, | ||
2832 | "Max-Forwards": true, | ||
2833 | "Pragma": true, | ||
2834 | "Proxy-Authenticate": true, | ||
2835 | "Proxy-Authorization": true, | ||
2836 | "Proxy-Connection": true, | ||
2837 | "Range": true, | ||
2838 | "Realm": true, | ||
2839 | "Te": true, | ||
2840 | "Trailer": true, | ||
2841 | "Transfer-Encoding": true, | ||
2842 | "Www-Authenticate": true, | ||
2843 | } | ||
2844 | |||
2845 | // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives | ||
2846 | // disabled. See comments on h1ServerShutdownChan above for why | ||
2847 | // the code is written this way. | ||
2848 | func h1ServerKeepAlivesDisabled(hs *http.Server) bool { | ||
2849 | var x interface{} = hs | ||
2850 | type I interface { | ||
2851 | doKeepAlives() bool | ||
2852 | } | ||
2853 | if hs, ok := x.(I); ok { | ||
2854 | return !hs.doKeepAlives() | ||
2855 | } | ||
2856 | return false | ||
2857 | } | ||
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 0000000..adb77ff --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go | |||
@@ -0,0 +1,2275 @@ | |||
1 | // Copyright 2015 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | // Transport code. | ||
6 | |||
7 | package http2 | ||
8 | |||
9 | import ( | ||
10 | "bufio" | ||
11 | "bytes" | ||
12 | "compress/gzip" | ||
13 | "crypto/rand" | ||
14 | "crypto/tls" | ||
15 | "errors" | ||
16 | "fmt" | ||
17 | "io" | ||
18 | "io/ioutil" | ||
19 | "log" | ||
20 | "math" | ||
21 | mathrand "math/rand" | ||
22 | "net" | ||
23 | "net/http" | ||
24 | "sort" | ||
25 | "strconv" | ||
26 | "strings" | ||
27 | "sync" | ||
28 | "time" | ||
29 | |||
30 | "golang.org/x/net/http2/hpack" | ||
31 | "golang.org/x/net/idna" | ||
32 | "golang.org/x/net/lex/httplex" | ||
33 | ) | ||
34 | |||
35 | const ( | ||
36 | // transportDefaultConnFlow is how many connection-level flow control | ||
37 | // tokens we give the server at start-up, past the default 64k. | ||
38 | transportDefaultConnFlow = 1 << 30 | ||
39 | |||
40 | // transportDefaultStreamFlow is how many stream-level flow | ||
41 | // control tokens we announce to the peer, and how many bytes | ||
42 | // we buffer per stream. | ||
43 | transportDefaultStreamFlow = 4 << 20 | ||
44 | |||
45 | // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send | ||
46 | // a stream-level WINDOW_UPDATE for at a time. | ||
47 | transportDefaultStreamMinRefresh = 4 << 10 | ||
48 | |||
49 | defaultUserAgent = "Go-http-client/2.0" | ||
50 | ) | ||
51 | |||
52 | // Transport is an HTTP/2 Transport. | ||
53 | // | ||
54 | // A Transport internally caches connections to servers. It is safe | ||
55 | // for concurrent use by multiple goroutines. | ||
56 | type Transport struct { | ||
57 | // DialTLS specifies an optional dial function for creating | ||
58 | // TLS connections for requests. | ||
59 | // | ||
60 | // If DialTLS is nil, tls.Dial is used. | ||
61 | // | ||
62 | // If the returned net.Conn has a ConnectionState method like tls.Conn, | ||
63 | // it will be used to set http.Response.TLS. | ||
64 | DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) | ||
65 | |||
66 | // TLSClientConfig specifies the TLS configuration to use with | ||
67 | // tls.Client. If nil, the default configuration is used. | ||
68 | TLSClientConfig *tls.Config | ||
69 | |||
70 | // ConnPool optionally specifies an alternate connection pool to use. | ||
71 | // If nil, the default is used. | ||
72 | ConnPool ClientConnPool | ||
73 | |||
74 | // DisableCompression, if true, prevents the Transport from | ||
75 | // requesting compression with an "Accept-Encoding: gzip" | ||
76 | // request header when the Request contains no existing | ||
77 | // Accept-Encoding value. If the Transport requests gzip on | ||
78 | // its own and gets a gzipped response, it's transparently | ||
79 | // decoded in the Response.Body. However, if the user | ||
80 | // explicitly requested gzip it is not automatically | ||
81 | // uncompressed. | ||
82 | DisableCompression bool | ||
83 | |||
84 | // AllowHTTP, if true, permits HTTP/2 requests using the insecure, | ||
85 | // plain-text "http" scheme. Note that this does not enable h2c support. | ||
86 | AllowHTTP bool | ||
87 | |||
88 | // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to | ||
89 | // send in the initial settings frame. It is how many bytes | ||
90 | // of response headers are allowed. Unlike the http2 spec, zero here | ||
91 | // means to use a default limit (currently 10MB). If you actually | ||
92 | // want to advertise an ulimited value to the peer, Transport | ||
93 | // interprets the highest possible value here (0xffffffff or 1<<32-1) | ||
94 | // to mean no limit. | ||
95 | MaxHeaderListSize uint32 | ||
96 | |||
97 | // t1, if non-nil, is the standard library Transport using | ||
98 | // this transport. Its settings are used (but not its | ||
99 | // RoundTrip method, etc). | ||
100 | t1 *http.Transport | ||
101 | |||
102 | connPoolOnce sync.Once | ||
103 | connPoolOrDef ClientConnPool // non-nil version of ConnPool | ||
104 | } | ||
105 | |||
106 | func (t *Transport) maxHeaderListSize() uint32 { | ||
107 | if t.MaxHeaderListSize == 0 { | ||
108 | return 10 << 20 | ||
109 | } | ||
110 | if t.MaxHeaderListSize == 0xffffffff { | ||
111 | return 0 | ||
112 | } | ||
113 | return t.MaxHeaderListSize | ||
114 | } | ||
115 | |||
116 | func (t *Transport) disableCompression() bool { | ||
117 | return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) | ||
118 | } | ||
119 | |||
120 | var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") | ||
121 | |||
122 | // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. | ||
123 | // It requires Go 1.6 or later and returns an error if the net/http package is too old | ||
124 | // or if t1 has already been HTTP/2-enabled. | ||
125 | func ConfigureTransport(t1 *http.Transport) error { | ||
126 | _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go | ||
127 | return err | ||
128 | } | ||
129 | |||
130 | func (t *Transport) connPool() ClientConnPool { | ||
131 | t.connPoolOnce.Do(t.initConnPool) | ||
132 | return t.connPoolOrDef | ||
133 | } | ||
134 | |||
135 | func (t *Transport) initConnPool() { | ||
136 | if t.ConnPool != nil { | ||
137 | t.connPoolOrDef = t.ConnPool | ||
138 | } else { | ||
139 | t.connPoolOrDef = &clientConnPool{t: t} | ||
140 | } | ||
141 | } | ||
142 | |||
143 | // ClientConn is the state of a single HTTP/2 client connection to an | ||
144 | // HTTP/2 server. | ||
145 | type ClientConn struct { | ||
146 | t *Transport | ||
147 | tconn net.Conn // usually *tls.Conn, except specialized impls | ||
148 | tlsState *tls.ConnectionState // nil only for specialized impls | ||
149 | singleUse bool // whether being used for a single http.Request | ||
150 | |||
151 | // readLoop goroutine fields: | ||
152 | readerDone chan struct{} // closed on error | ||
153 | readerErr error // set before readerDone is closed | ||
154 | |||
155 | idleTimeout time.Duration // or 0 for never | ||
156 | idleTimer *time.Timer | ||
157 | |||
158 | mu sync.Mutex // guards following | ||
159 | cond *sync.Cond // hold mu; broadcast on flow/closed changes | ||
160 | flow flow // our conn-level flow control quota (cs.flow is per stream) | ||
161 | inflow flow // peer's conn-level flow control | ||
162 | closed bool | ||
163 | wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back | ||
164 | goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received | ||
165 | goAwayDebug string // goAway frame's debug data, retained as a string | ||
166 | streams map[uint32]*clientStream // client-initiated | ||
167 | nextStreamID uint32 | ||
168 | pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams | ||
169 | pings map[[8]byte]chan struct{} // in flight ping data to notification channel | ||
170 | bw *bufio.Writer | ||
171 | br *bufio.Reader | ||
172 | fr *Framer | ||
173 | lastActive time.Time | ||
174 | // Settings from peer: (also guarded by mu) | ||
175 | maxFrameSize uint32 | ||
176 | maxConcurrentStreams uint32 | ||
177 | peerMaxHeaderListSize uint64 | ||
178 | initialWindowSize uint32 | ||
179 | |||
180 | hbuf bytes.Buffer // HPACK encoder writes into this | ||
181 | henc *hpack.Encoder | ||
182 | freeBuf [][]byte | ||
183 | |||
184 | wmu sync.Mutex // held while writing; acquire AFTER mu if holding both | ||
185 | werr error // first write error that has occurred | ||
186 | } | ||
187 | |||
188 | // clientStream is the state for a single HTTP/2 stream. One of these | ||
189 | // is created for each Transport.RoundTrip call. | ||
190 | type clientStream struct { | ||
191 | cc *ClientConn | ||
192 | req *http.Request | ||
193 | trace *clientTrace // or nil | ||
194 | ID uint32 | ||
195 | resc chan resAndError | ||
196 | bufPipe pipe // buffered pipe with the flow-controlled response payload | ||
197 | startedWrite bool // started request body write; guarded by cc.mu | ||
198 | requestedGzip bool | ||
199 | on100 func() // optional code to run if get a 100 continue response | ||
200 | |||
201 | flow flow // guarded by cc.mu | ||
202 | inflow flow // guarded by cc.mu | ||
203 | bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read | ||
204 | readErr error // sticky read error; owned by transportResponseBody.Read | ||
205 | stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu | ||
206 | didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu | ||
207 | |||
208 | peerReset chan struct{} // closed on peer reset | ||
209 | resetErr error // populated before peerReset is closed | ||
210 | |||
211 | done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu | ||
212 | |||
213 | // owned by clientConnReadLoop: | ||
214 | firstByte bool // got the first response byte | ||
215 | pastHeaders bool // got first MetaHeadersFrame (actual headers) | ||
216 | pastTrailers bool // got optional second MetaHeadersFrame (trailers) | ||
217 | |||
218 | trailer http.Header // accumulated trailers | ||
219 | resTrailer *http.Header // client's Response.Trailer | ||
220 | } | ||
221 | |||
222 | // awaitRequestCancel waits for the user to cancel a request or for the done | ||
223 | // channel to be signaled. A non-nil error is returned only if the request was | ||
224 | // canceled. | ||
225 | func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { | ||
226 | ctx := reqContext(req) | ||
227 | if req.Cancel == nil && ctx.Done() == nil { | ||
228 | return nil | ||
229 | } | ||
230 | select { | ||
231 | case <-req.Cancel: | ||
232 | return errRequestCanceled | ||
233 | case <-ctx.Done(): | ||
234 | return ctx.Err() | ||
235 | case <-done: | ||
236 | return nil | ||
237 | } | ||
238 | } | ||
239 | |||
240 | // awaitRequestCancel waits for the user to cancel a request, its context to | ||
241 | // expire, or for the request to be done (any way it might be removed from the | ||
242 | // cc.streams map: peer reset, successful completion, TCP connection breakage, | ||
243 | // etc). If the request is canceled, then cs will be canceled and closed. | ||
244 | func (cs *clientStream) awaitRequestCancel(req *http.Request) { | ||
245 | if err := awaitRequestCancel(req, cs.done); err != nil { | ||
246 | cs.cancelStream() | ||
247 | cs.bufPipe.CloseWithError(err) | ||
248 | } | ||
249 | } | ||
250 | |||
251 | func (cs *clientStream) cancelStream() { | ||
252 | cc := cs.cc | ||
253 | cc.mu.Lock() | ||
254 | didReset := cs.didReset | ||
255 | cs.didReset = true | ||
256 | cc.mu.Unlock() | ||
257 | |||
258 | if !didReset { | ||
259 | cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) | ||
260 | cc.forgetStreamID(cs.ID) | ||
261 | } | ||
262 | } | ||
263 | |||
264 | // checkResetOrDone reports any error sent in a RST_STREAM frame by the | ||
265 | // server, or errStreamClosed if the stream is complete. | ||
266 | func (cs *clientStream) checkResetOrDone() error { | ||
267 | select { | ||
268 | case <-cs.peerReset: | ||
269 | return cs.resetErr | ||
270 | case <-cs.done: | ||
271 | return errStreamClosed | ||
272 | default: | ||
273 | return nil | ||
274 | } | ||
275 | } | ||
276 | |||
277 | func (cs *clientStream) abortRequestBodyWrite(err error) { | ||
278 | if err == nil { | ||
279 | panic("nil error") | ||
280 | } | ||
281 | cc := cs.cc | ||
282 | cc.mu.Lock() | ||
283 | cs.stopReqBody = err | ||
284 | cc.cond.Broadcast() | ||
285 | cc.mu.Unlock() | ||
286 | } | ||
287 | |||
288 | type stickyErrWriter struct { | ||
289 | w io.Writer | ||
290 | err *error | ||
291 | } | ||
292 | |||
293 | func (sew stickyErrWriter) Write(p []byte) (n int, err error) { | ||
294 | if *sew.err != nil { | ||
295 | return 0, *sew.err | ||
296 | } | ||
297 | n, err = sew.w.Write(p) | ||
298 | *sew.err = err | ||
299 | return | ||
300 | } | ||
301 | |||
302 | var ErrNoCachedConn = errors.New("http2: no cached connection was available") | ||
303 | |||
304 | // RoundTripOpt are options for the Transport.RoundTripOpt method. | ||
305 | type RoundTripOpt struct { | ||
306 | // OnlyCachedConn controls whether RoundTripOpt may | ||
307 | // create a new TCP connection. If set true and | ||
308 | // no cached connection is available, RoundTripOpt | ||
309 | // will return ErrNoCachedConn. | ||
310 | OnlyCachedConn bool | ||
311 | } | ||
312 | |||
313 | func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { | ||
314 | return t.RoundTripOpt(req, RoundTripOpt{}) | ||
315 | } | ||
316 | |||
317 | // authorityAddr returns a given authority (a host/IP, or host:port / ip:port) | ||
318 | // and returns a host:port. The port 443 is added if needed. | ||
319 | func authorityAddr(scheme string, authority string) (addr string) { | ||
320 | host, port, err := net.SplitHostPort(authority) | ||
321 | if err != nil { // authority didn't have a port | ||
322 | port = "443" | ||
323 | if scheme == "http" { | ||
324 | port = "80" | ||
325 | } | ||
326 | host = authority | ||
327 | } | ||
328 | if a, err := idna.ToASCII(host); err == nil { | ||
329 | host = a | ||
330 | } | ||
331 | // IPv6 address literal, without a port: | ||
332 | if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { | ||
333 | return host + ":" + port | ||
334 | } | ||
335 | return net.JoinHostPort(host, port) | ||
336 | } | ||
337 | |||
338 | // RoundTripOpt is like RoundTrip, but takes options. | ||
339 | func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { | ||
340 | if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { | ||
341 | return nil, errors.New("http2: unsupported scheme") | ||
342 | } | ||
343 | |||
344 | addr := authorityAddr(req.URL.Scheme, req.URL.Host) | ||
345 | for retry := 0; ; retry++ { | ||
346 | cc, err := t.connPool().GetClientConn(req, addr) | ||
347 | if err != nil { | ||
348 | t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) | ||
349 | return nil, err | ||
350 | } | ||
351 | traceGotConn(req, cc) | ||
352 | res, err := cc.RoundTrip(req) | ||
353 | if err != nil && retry <= 6 { | ||
354 | afterBodyWrite := false | ||
355 | if e, ok := err.(afterReqBodyWriteError); ok { | ||
356 | err = e | ||
357 | afterBodyWrite = true | ||
358 | } | ||
359 | if req, err = shouldRetryRequest(req, err, afterBodyWrite); err == nil { | ||
360 | // After the first retry, do exponential backoff with 10% jitter. | ||
361 | if retry == 0 { | ||
362 | continue | ||
363 | } | ||
364 | backoff := float64(uint(1) << (uint(retry) - 1)) | ||
365 | backoff += backoff * (0.1 * mathrand.Float64()) | ||
366 | select { | ||
367 | case <-time.After(time.Second * time.Duration(backoff)): | ||
368 | continue | ||
369 | case <-reqContext(req).Done(): | ||
370 | return nil, reqContext(req).Err() | ||
371 | } | ||
372 | } | ||
373 | } | ||
374 | if err != nil { | ||
375 | t.vlogf("RoundTrip failure: %v", err) | ||
376 | return nil, err | ||
377 | } | ||
378 | return res, nil | ||
379 | } | ||
380 | } | ||
381 | |||
382 | // CloseIdleConnections closes any connections which were previously | ||
383 | // connected from previous requests but are now sitting idle. | ||
384 | // It does not interrupt any connections currently in use. | ||
385 | func (t *Transport) CloseIdleConnections() { | ||
386 | if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { | ||
387 | cp.closeIdleConnections() | ||
388 | } | ||
389 | } | ||
390 | |||
391 | var ( | ||
392 | errClientConnClosed = errors.New("http2: client conn is closed") | ||
393 | errClientConnUnusable = errors.New("http2: client conn not usable") | ||
394 | errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") | ||
395 | ) | ||
396 | |||
397 | // afterReqBodyWriteError is a wrapper around errors returned by ClientConn.RoundTrip. | ||
398 | // It is used to signal that err happened after part of Request.Body was sent to the server. | ||
399 | type afterReqBodyWriteError struct { | ||
400 | err error | ||
401 | } | ||
402 | |||
403 | func (e afterReqBodyWriteError) Error() string { | ||
404 | return e.err.Error() + "; some request body already written" | ||
405 | } | ||
406 | |||
407 | // shouldRetryRequest is called by RoundTrip when a request fails to get | ||
408 | // response headers. It is always called with a non-nil error. | ||
409 | // It returns either a request to retry (either the same request, or a | ||
410 | // modified clone), or an error if the request can't be replayed. | ||
411 | func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { | ||
412 | if !canRetryError(err) { | ||
413 | return nil, err | ||
414 | } | ||
415 | if !afterBodyWrite { | ||
416 | return req, nil | ||
417 | } | ||
418 | // If the Body is nil (or http.NoBody), it's safe to reuse | ||
419 | // this request and its Body. | ||
420 | if req.Body == nil || reqBodyIsNoBody(req.Body) { | ||
421 | return req, nil | ||
422 | } | ||
423 | // Otherwise we depend on the Request having its GetBody | ||
424 | // func defined. | ||
425 | getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody | ||
426 | if getBody == nil { | ||
427 | return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) | ||
428 | } | ||
429 | body, err := getBody() | ||
430 | if err != nil { | ||
431 | return nil, err | ||
432 | } | ||
433 | newReq := *req | ||
434 | newReq.Body = body | ||
435 | return &newReq, nil | ||
436 | } | ||
437 | |||
438 | func canRetryError(err error) bool { | ||
439 | if err == errClientConnUnusable || err == errClientConnGotGoAway { | ||
440 | return true | ||
441 | } | ||
442 | if se, ok := err.(StreamError); ok { | ||
443 | return se.Code == ErrCodeRefusedStream | ||
444 | } | ||
445 | return false | ||
446 | } | ||
447 | |||
448 | func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { | ||
449 | host, _, err := net.SplitHostPort(addr) | ||
450 | if err != nil { | ||
451 | return nil, err | ||
452 | } | ||
453 | tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) | ||
454 | if err != nil { | ||
455 | return nil, err | ||
456 | } | ||
457 | return t.newClientConn(tconn, singleUse) | ||
458 | } | ||
459 | |||
460 | func (t *Transport) newTLSConfig(host string) *tls.Config { | ||
461 | cfg := new(tls.Config) | ||
462 | if t.TLSClientConfig != nil { | ||
463 | *cfg = *cloneTLSConfig(t.TLSClientConfig) | ||
464 | } | ||
465 | if !strSliceContains(cfg.NextProtos, NextProtoTLS) { | ||
466 | cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) | ||
467 | } | ||
468 | if cfg.ServerName == "" { | ||
469 | cfg.ServerName = host | ||
470 | } | ||
471 | return cfg | ||
472 | } | ||
473 | |||
474 | func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { | ||
475 | if t.DialTLS != nil { | ||
476 | return t.DialTLS | ||
477 | } | ||
478 | return t.dialTLSDefault | ||
479 | } | ||
480 | |||
481 | func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { | ||
482 | cn, err := tls.Dial(network, addr, cfg) | ||
483 | if err != nil { | ||
484 | return nil, err | ||
485 | } | ||
486 | if err := cn.Handshake(); err != nil { | ||
487 | return nil, err | ||
488 | } | ||
489 | if !cfg.InsecureSkipVerify { | ||
490 | if err := cn.VerifyHostname(cfg.ServerName); err != nil { | ||
491 | return nil, err | ||
492 | } | ||
493 | } | ||
494 | state := cn.ConnectionState() | ||
495 | if p := state.NegotiatedProtocol; p != NextProtoTLS { | ||
496 | return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) | ||
497 | } | ||
498 | if !state.NegotiatedProtocolIsMutual { | ||
499 | return nil, errors.New("http2: could not negotiate protocol mutually") | ||
500 | } | ||
501 | return cn, nil | ||
502 | } | ||
503 | |||
504 | // disableKeepAlives reports whether connections should be closed as | ||
505 | // soon as possible after handling the first request. | ||
506 | func (t *Transport) disableKeepAlives() bool { | ||
507 | return t.t1 != nil && t.t1.DisableKeepAlives | ||
508 | } | ||
509 | |||
510 | func (t *Transport) expectContinueTimeout() time.Duration { | ||
511 | if t.t1 == nil { | ||
512 | return 0 | ||
513 | } | ||
514 | return transportExpectContinueTimeout(t.t1) | ||
515 | } | ||
516 | |||
517 | func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { | ||
518 | return t.newClientConn(c, false) | ||
519 | } | ||
520 | |||
521 | func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { | ||
522 | cc := &ClientConn{ | ||
523 | t: t, | ||
524 | tconn: c, | ||
525 | readerDone: make(chan struct{}), | ||
526 | nextStreamID: 1, | ||
527 | maxFrameSize: 16 << 10, // spec default | ||
528 | initialWindowSize: 65535, // spec default | ||
529 | maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. | ||
530 | peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. | ||
531 | streams: make(map[uint32]*clientStream), | ||
532 | singleUse: singleUse, | ||
533 | wantSettingsAck: true, | ||
534 | pings: make(map[[8]byte]chan struct{}), | ||
535 | } | ||
536 | if d := t.idleConnTimeout(); d != 0 { | ||
537 | cc.idleTimeout = d | ||
538 | cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) | ||
539 | } | ||
540 | if VerboseLogs { | ||
541 | t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) | ||
542 | } | ||
543 | |||
544 | cc.cond = sync.NewCond(&cc.mu) | ||
545 | cc.flow.add(int32(initialWindowSize)) | ||
546 | |||
547 | // TODO: adjust this writer size to account for frame size + | ||
548 | // MTU + crypto/tls record padding. | ||
549 | cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) | ||
550 | cc.br = bufio.NewReader(c) | ||
551 | cc.fr = NewFramer(cc.bw, cc.br) | ||
552 | cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) | ||
553 | cc.fr.MaxHeaderListSize = t.maxHeaderListSize() | ||
554 | |||
555 | // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on | ||
556 | // henc in response to SETTINGS frames? | ||
557 | cc.henc = hpack.NewEncoder(&cc.hbuf) | ||
558 | |||
559 | if cs, ok := c.(connectionStater); ok { | ||
560 | state := cs.ConnectionState() | ||
561 | cc.tlsState = &state | ||
562 | } | ||
563 | |||
564 | initialSettings := []Setting{ | ||
565 | {ID: SettingEnablePush, Val: 0}, | ||
566 | {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, | ||
567 | } | ||
568 | if max := t.maxHeaderListSize(); max != 0 { | ||
569 | initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) | ||
570 | } | ||
571 | |||
572 | cc.bw.Write(clientPreface) | ||
573 | cc.fr.WriteSettings(initialSettings...) | ||
574 | cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) | ||
575 | cc.inflow.add(transportDefaultConnFlow + initialWindowSize) | ||
576 | cc.bw.Flush() | ||
577 | if cc.werr != nil { | ||
578 | return nil, cc.werr | ||
579 | } | ||
580 | |||
581 | go cc.readLoop() | ||
582 | return cc, nil | ||
583 | } | ||
584 | |||
585 | func (cc *ClientConn) setGoAway(f *GoAwayFrame) { | ||
586 | cc.mu.Lock() | ||
587 | defer cc.mu.Unlock() | ||
588 | |||
589 | old := cc.goAway | ||
590 | cc.goAway = f | ||
591 | |||
592 | // Merge the previous and current GoAway error frames. | ||
593 | if cc.goAwayDebug == "" { | ||
594 | cc.goAwayDebug = string(f.DebugData()) | ||
595 | } | ||
596 | if old != nil && old.ErrCode != ErrCodeNo { | ||
597 | cc.goAway.ErrCode = old.ErrCode | ||
598 | } | ||
599 | last := f.LastStreamID | ||
600 | for streamID, cs := range cc.streams { | ||
601 | if streamID > last { | ||
602 | select { | ||
603 | case cs.resc <- resAndError{err: errClientConnGotGoAway}: | ||
604 | default: | ||
605 | } | ||
606 | } | ||
607 | } | ||
608 | } | ||
609 | |||
610 | // CanTakeNewRequest reports whether the connection can take a new request, | ||
611 | // meaning it has not been closed or received or sent a GOAWAY. | ||
612 | func (cc *ClientConn) CanTakeNewRequest() bool { | ||
613 | cc.mu.Lock() | ||
614 | defer cc.mu.Unlock() | ||
615 | return cc.canTakeNewRequestLocked() | ||
616 | } | ||
617 | |||
618 | func (cc *ClientConn) canTakeNewRequestLocked() bool { | ||
619 | if cc.singleUse && cc.nextStreamID > 1 { | ||
620 | return false | ||
621 | } | ||
622 | return cc.goAway == nil && !cc.closed && | ||
623 | int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 | ||
624 | } | ||
625 | |||
626 | // onIdleTimeout is called from a time.AfterFunc goroutine. It will | ||
627 | // only be called when we're idle, but because we're coming from a new | ||
628 | // goroutine, there could be a new request coming in at the same time, | ||
629 | // so this simply calls the synchronized closeIfIdle to shut down this | ||
630 | // connection. The timer could just call closeIfIdle, but this is more | ||
631 | // clear. | ||
632 | func (cc *ClientConn) onIdleTimeout() { | ||
633 | cc.closeIfIdle() | ||
634 | } | ||
635 | |||
636 | func (cc *ClientConn) closeIfIdle() { | ||
637 | cc.mu.Lock() | ||
638 | if len(cc.streams) > 0 { | ||
639 | cc.mu.Unlock() | ||
640 | return | ||
641 | } | ||
642 | cc.closed = true | ||
643 | nextID := cc.nextStreamID | ||
644 | // TODO: do clients send GOAWAY too? maybe? Just Close: | ||
645 | cc.mu.Unlock() | ||
646 | |||
647 | if VerboseLogs { | ||
648 | cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) | ||
649 | } | ||
650 | cc.tconn.Close() | ||
651 | } | ||
652 | |||
653 | const maxAllocFrameSize = 512 << 10 | ||
654 | |||
655 | // frameBuffer returns a scratch buffer suitable for writing DATA frames. | ||
656 | // They're capped at the min of the peer's max frame size or 512KB | ||
657 | // (kinda arbitrarily), but definitely capped so we don't allocate 4GB | ||
658 | // bufers. | ||
659 | func (cc *ClientConn) frameScratchBuffer() []byte { | ||
660 | cc.mu.Lock() | ||
661 | size := cc.maxFrameSize | ||
662 | if size > maxAllocFrameSize { | ||
663 | size = maxAllocFrameSize | ||
664 | } | ||
665 | for i, buf := range cc.freeBuf { | ||
666 | if len(buf) >= int(size) { | ||
667 | cc.freeBuf[i] = nil | ||
668 | cc.mu.Unlock() | ||
669 | return buf[:size] | ||
670 | } | ||
671 | } | ||
672 | cc.mu.Unlock() | ||
673 | return make([]byte, size) | ||
674 | } | ||
675 | |||
676 | func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { | ||
677 | cc.mu.Lock() | ||
678 | defer cc.mu.Unlock() | ||
679 | const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. | ||
680 | if len(cc.freeBuf) < maxBufs { | ||
681 | cc.freeBuf = append(cc.freeBuf, buf) | ||
682 | return | ||
683 | } | ||
684 | for i, old := range cc.freeBuf { | ||
685 | if old == nil { | ||
686 | cc.freeBuf[i] = buf | ||
687 | return | ||
688 | } | ||
689 | } | ||
690 | // forget about it. | ||
691 | } | ||
692 | |||
693 | // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not | ||
694 | // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. | ||
695 | var errRequestCanceled = errors.New("net/http: request canceled") | ||
696 | |||
697 | func commaSeparatedTrailers(req *http.Request) (string, error) { | ||
698 | keys := make([]string, 0, len(req.Trailer)) | ||
699 | for k := range req.Trailer { | ||
700 | k = http.CanonicalHeaderKey(k) | ||
701 | switch k { | ||
702 | case "Transfer-Encoding", "Trailer", "Content-Length": | ||
703 | return "", &badStringError{"invalid Trailer key", k} | ||
704 | } | ||
705 | keys = append(keys, k) | ||
706 | } | ||
707 | if len(keys) > 0 { | ||
708 | sort.Strings(keys) | ||
709 | return strings.Join(keys, ","), nil | ||
710 | } | ||
711 | return "", nil | ||
712 | } | ||
713 | |||
714 | func (cc *ClientConn) responseHeaderTimeout() time.Duration { | ||
715 | if cc.t.t1 != nil { | ||
716 | return cc.t.t1.ResponseHeaderTimeout | ||
717 | } | ||
718 | // No way to do this (yet?) with just an http2.Transport. Probably | ||
719 | // no need. Request.Cancel this is the new way. We only need to support | ||
720 | // this for compatibility with the old http.Transport fields when | ||
721 | // we're doing transparent http2. | ||
722 | return 0 | ||
723 | } | ||
724 | |||
725 | // checkConnHeaders checks whether req has any invalid connection-level headers. | ||
726 | // per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. | ||
727 | // Certain headers are special-cased as okay but not transmitted later. | ||
728 | func checkConnHeaders(req *http.Request) error { | ||
729 | if v := req.Header.Get("Upgrade"); v != "" { | ||
730 | return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) | ||
731 | } | ||
732 | if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { | ||
733 | return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) | ||
734 | } | ||
735 | if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { | ||
736 | return fmt.Errorf("http2: invalid Connection request header: %q", vv) | ||
737 | } | ||
738 | return nil | ||
739 | } | ||
740 | |||
741 | // actualContentLength returns a sanitized version of | ||
742 | // req.ContentLength, where 0 actually means zero (not unknown) and -1 | ||
743 | // means unknown. | ||
744 | func actualContentLength(req *http.Request) int64 { | ||
745 | if req.Body == nil || reqBodyIsNoBody(req.Body) { | ||
746 | return 0 | ||
747 | } | ||
748 | if req.ContentLength != 0 { | ||
749 | return req.ContentLength | ||
750 | } | ||
751 | return -1 | ||
752 | } | ||
753 | |||
754 | func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { | ||
755 | if err := checkConnHeaders(req); err != nil { | ||
756 | return nil, err | ||
757 | } | ||
758 | if cc.idleTimer != nil { | ||
759 | cc.idleTimer.Stop() | ||
760 | } | ||
761 | |||
762 | trailers, err := commaSeparatedTrailers(req) | ||
763 | if err != nil { | ||
764 | return nil, err | ||
765 | } | ||
766 | hasTrailers := trailers != "" | ||
767 | |||
768 | cc.mu.Lock() | ||
769 | if err := cc.awaitOpenSlotForRequest(req); err != nil { | ||
770 | cc.mu.Unlock() | ||
771 | return nil, err | ||
772 | } | ||
773 | |||
774 | body := req.Body | ||
775 | contentLen := actualContentLength(req) | ||
776 | hasBody := contentLen != 0 | ||
777 | |||
778 | // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? | ||
779 | var requestedGzip bool | ||
780 | if !cc.t.disableCompression() && | ||
781 | req.Header.Get("Accept-Encoding") == "" && | ||
782 | req.Header.Get("Range") == "" && | ||
783 | req.Method != "HEAD" { | ||
784 | // Request gzip only, not deflate. Deflate is ambiguous and | ||
785 | // not as universally supported anyway. | ||
786 | // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 | ||
787 | // | ||
788 | // Note that we don't request this for HEAD requests, | ||
789 | // due to a bug in nginx: | ||
790 | // http://trac.nginx.org/nginx/ticket/358 | ||
791 | // https://golang.org/issue/5522 | ||
792 | // | ||
793 | // We don't request gzip if the request is for a range, since | ||
794 | // auto-decoding a portion of a gzipped document will just fail | ||
795 | // anyway. See https://golang.org/issue/8923 | ||
796 | requestedGzip = true | ||
797 | } | ||
798 | |||
799 | // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is | ||
800 | // sent by writeRequestBody below, along with any Trailers, | ||
801 | // again in form HEADERS{1}, CONTINUATION{0,}) | ||
802 | hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) | ||
803 | if err != nil { | ||
804 | cc.mu.Unlock() | ||
805 | return nil, err | ||
806 | } | ||
807 | |||
808 | cs := cc.newStream() | ||
809 | cs.req = req | ||
810 | cs.trace = requestTrace(req) | ||
811 | cs.requestedGzip = requestedGzip | ||
812 | bodyWriter := cc.t.getBodyWriterState(cs, body) | ||
813 | cs.on100 = bodyWriter.on100 | ||
814 | |||
815 | cc.wmu.Lock() | ||
816 | endStream := !hasBody && !hasTrailers | ||
817 | werr := cc.writeHeaders(cs.ID, endStream, hdrs) | ||
818 | cc.wmu.Unlock() | ||
819 | traceWroteHeaders(cs.trace) | ||
820 | cc.mu.Unlock() | ||
821 | |||
822 | if werr != nil { | ||
823 | if hasBody { | ||
824 | req.Body.Close() // per RoundTripper contract | ||
825 | bodyWriter.cancel() | ||
826 | } | ||
827 | cc.forgetStreamID(cs.ID) | ||
828 | // Don't bother sending a RST_STREAM (our write already failed; | ||
829 | // no need to keep writing) | ||
830 | traceWroteRequest(cs.trace, werr) | ||
831 | return nil, werr | ||
832 | } | ||
833 | |||
834 | var respHeaderTimer <-chan time.Time | ||
835 | if hasBody { | ||
836 | bodyWriter.scheduleBodyWrite() | ||
837 | } else { | ||
838 | traceWroteRequest(cs.trace, nil) | ||
839 | if d := cc.responseHeaderTimeout(); d != 0 { | ||
840 | timer := time.NewTimer(d) | ||
841 | defer timer.Stop() | ||
842 | respHeaderTimer = timer.C | ||
843 | } | ||
844 | } | ||
845 | |||
846 | readLoopResCh := cs.resc | ||
847 | bodyWritten := false | ||
848 | ctx := reqContext(req) | ||
849 | |||
850 | handleReadLoopResponse := func(re resAndError) (*http.Response, error) { | ||
851 | res := re.res | ||
852 | if re.err != nil || res.StatusCode > 299 { | ||
853 | // On error or status code 3xx, 4xx, 5xx, etc abort any | ||
854 | // ongoing write, assuming that the server doesn't care | ||
855 | // about our request body. If the server replied with 1xx or | ||
856 | // 2xx, however, then assume the server DOES potentially | ||
857 | // want our body (e.g. full-duplex streaming: | ||
858 | // golang.org/issue/13444). If it turns out the server | ||
859 | // doesn't, they'll RST_STREAM us soon enough. This is a | ||
860 | // heuristic to avoid adding knobs to Transport. Hopefully | ||
861 | // we can keep it. | ||
862 | bodyWriter.cancel() | ||
863 | cs.abortRequestBodyWrite(errStopReqBodyWrite) | ||
864 | } | ||
865 | if re.err != nil { | ||
866 | cc.mu.Lock() | ||
867 | afterBodyWrite := cs.startedWrite | ||
868 | cc.mu.Unlock() | ||
869 | cc.forgetStreamID(cs.ID) | ||
870 | if afterBodyWrite { | ||
871 | return nil, afterReqBodyWriteError{re.err} | ||
872 | } | ||
873 | return nil, re.err | ||
874 | } | ||
875 | res.Request = req | ||
876 | res.TLS = cc.tlsState | ||
877 | return res, nil | ||
878 | } | ||
879 | |||
880 | for { | ||
881 | select { | ||
882 | case re := <-readLoopResCh: | ||
883 | return handleReadLoopResponse(re) | ||
884 | case <-respHeaderTimer: | ||
885 | if !hasBody || bodyWritten { | ||
886 | cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) | ||
887 | } else { | ||
888 | bodyWriter.cancel() | ||
889 | cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) | ||
890 | } | ||
891 | cc.forgetStreamID(cs.ID) | ||
892 | return nil, errTimeout | ||
893 | case <-ctx.Done(): | ||
894 | if !hasBody || bodyWritten { | ||
895 | cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) | ||
896 | } else { | ||
897 | bodyWriter.cancel() | ||
898 | cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) | ||
899 | } | ||
900 | cc.forgetStreamID(cs.ID) | ||
901 | return nil, ctx.Err() | ||
902 | case <-req.Cancel: | ||
903 | if !hasBody || bodyWritten { | ||
904 | cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) | ||
905 | } else { | ||
906 | bodyWriter.cancel() | ||
907 | cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) | ||
908 | } | ||
909 | cc.forgetStreamID(cs.ID) | ||
910 | return nil, errRequestCanceled | ||
911 | case <-cs.peerReset: | ||
912 | // processResetStream already removed the | ||
913 | // stream from the streams map; no need for | ||
914 | // forgetStreamID. | ||
915 | return nil, cs.resetErr | ||
916 | case err := <-bodyWriter.resc: | ||
917 | // Prefer the read loop's response, if available. Issue 16102. | ||
918 | select { | ||
919 | case re := <-readLoopResCh: | ||
920 | return handleReadLoopResponse(re) | ||
921 | default: | ||
922 | } | ||
923 | if err != nil { | ||
924 | return nil, err | ||
925 | } | ||
926 | bodyWritten = true | ||
927 | if d := cc.responseHeaderTimeout(); d != 0 { | ||
928 | timer := time.NewTimer(d) | ||
929 | defer timer.Stop() | ||
930 | respHeaderTimer = timer.C | ||
931 | } | ||
932 | } | ||
933 | } | ||
934 | } | ||
935 | |||
936 | // awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. | ||
937 | // Must hold cc.mu. | ||
938 | func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { | ||
939 | var waitingForConn chan struct{} | ||
940 | var waitingForConnErr error // guarded by cc.mu | ||
941 | for { | ||
942 | cc.lastActive = time.Now() | ||
943 | if cc.closed || !cc.canTakeNewRequestLocked() { | ||
944 | return errClientConnUnusable | ||
945 | } | ||
946 | if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { | ||
947 | if waitingForConn != nil { | ||
948 | close(waitingForConn) | ||
949 | } | ||
950 | return nil | ||
951 | } | ||
952 | // Unfortunately, we cannot wait on a condition variable and channel at | ||
953 | // the same time, so instead, we spin up a goroutine to check if the | ||
954 | // request is canceled while we wait for a slot to open in the connection. | ||
955 | if waitingForConn == nil { | ||
956 | waitingForConn = make(chan struct{}) | ||
957 | go func() { | ||
958 | if err := awaitRequestCancel(req, waitingForConn); err != nil { | ||
959 | cc.mu.Lock() | ||
960 | waitingForConnErr = err | ||
961 | cc.cond.Broadcast() | ||
962 | cc.mu.Unlock() | ||
963 | } | ||
964 | }() | ||
965 | } | ||
966 | cc.pendingRequests++ | ||
967 | cc.cond.Wait() | ||
968 | cc.pendingRequests-- | ||
969 | if waitingForConnErr != nil { | ||
970 | return waitingForConnErr | ||
971 | } | ||
972 | } | ||
973 | } | ||
974 | |||
975 | // requires cc.wmu be held | ||
976 | func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { | ||
977 | first := true // first frame written (HEADERS is first, then CONTINUATION) | ||
978 | frameSize := int(cc.maxFrameSize) | ||
979 | for len(hdrs) > 0 && cc.werr == nil { | ||
980 | chunk := hdrs | ||
981 | if len(chunk) > frameSize { | ||
982 | chunk = chunk[:frameSize] | ||
983 | } | ||
984 | hdrs = hdrs[len(chunk):] | ||
985 | endHeaders := len(hdrs) == 0 | ||
986 | if first { | ||
987 | cc.fr.WriteHeaders(HeadersFrameParam{ | ||
988 | StreamID: streamID, | ||
989 | BlockFragment: chunk, | ||
990 | EndStream: endStream, | ||
991 | EndHeaders: endHeaders, | ||
992 | }) | ||
993 | first = false | ||
994 | } else { | ||
995 | cc.fr.WriteContinuation(streamID, endHeaders, chunk) | ||
996 | } | ||
997 | } | ||
998 | // TODO(bradfitz): this Flush could potentially block (as | ||
999 | // could the WriteHeaders call(s) above), which means they | ||
1000 | // wouldn't respond to Request.Cancel being readable. That's | ||
1001 | // rare, but this should probably be in a goroutine. | ||
1002 | cc.bw.Flush() | ||
1003 | return cc.werr | ||
1004 | } | ||
1005 | |||
1006 | // internal error values; they don't escape to callers | ||
1007 | var ( | ||
1008 | // abort request body write; don't send cancel | ||
1009 | errStopReqBodyWrite = errors.New("http2: aborting request body write") | ||
1010 | |||
1011 | // abort request body write, but send stream reset of cancel. | ||
1012 | errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") | ||
1013 | ) | ||
1014 | |||
1015 | func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { | ||
1016 | cc := cs.cc | ||
1017 | sentEnd := false // whether we sent the final DATA frame w/ END_STREAM | ||
1018 | buf := cc.frameScratchBuffer() | ||
1019 | defer cc.putFrameScratchBuffer(buf) | ||
1020 | |||
1021 | defer func() { | ||
1022 | traceWroteRequest(cs.trace, err) | ||
1023 | // TODO: write h12Compare test showing whether | ||
1024 | // Request.Body is closed by the Transport, | ||
1025 | // and in multiple cases: server replies <=299 and >299 | ||
1026 | // while still writing request body | ||
1027 | cerr := bodyCloser.Close() | ||
1028 | if err == nil { | ||
1029 | err = cerr | ||
1030 | } | ||
1031 | }() | ||
1032 | |||
1033 | req := cs.req | ||
1034 | hasTrailers := req.Trailer != nil | ||
1035 | |||
1036 | var sawEOF bool | ||
1037 | for !sawEOF { | ||
1038 | n, err := body.Read(buf) | ||
1039 | if err == io.EOF { | ||
1040 | sawEOF = true | ||
1041 | err = nil | ||
1042 | } else if err != nil { | ||
1043 | return err | ||
1044 | } | ||
1045 | |||
1046 | remain := buf[:n] | ||
1047 | for len(remain) > 0 && err == nil { | ||
1048 | var allowed int32 | ||
1049 | allowed, err = cs.awaitFlowControl(len(remain)) | ||
1050 | switch { | ||
1051 | case err == errStopReqBodyWrite: | ||
1052 | return err | ||
1053 | case err == errStopReqBodyWriteAndCancel: | ||
1054 | cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) | ||
1055 | return err | ||
1056 | case err != nil: | ||
1057 | return err | ||
1058 | } | ||
1059 | cc.wmu.Lock() | ||
1060 | data := remain[:allowed] | ||
1061 | remain = remain[allowed:] | ||
1062 | sentEnd = sawEOF && len(remain) == 0 && !hasTrailers | ||
1063 | err = cc.fr.WriteData(cs.ID, sentEnd, data) | ||
1064 | if err == nil { | ||
1065 | // TODO(bradfitz): this flush is for latency, not bandwidth. | ||
1066 | // Most requests won't need this. Make this opt-in or | ||
1067 | // opt-out? Use some heuristic on the body type? Nagel-like | ||
1068 | // timers? Based on 'n'? Only last chunk of this for loop, | ||
1069 | // unless flow control tokens are low? For now, always. | ||
1070 | // If we change this, see comment below. | ||
1071 | err = cc.bw.Flush() | ||
1072 | } | ||
1073 | cc.wmu.Unlock() | ||
1074 | } | ||
1075 | if err != nil { | ||
1076 | return err | ||
1077 | } | ||
1078 | } | ||
1079 | |||
1080 | if sentEnd { | ||
1081 | // Already sent END_STREAM (which implies we have no | ||
1082 | // trailers) and flushed, because currently all | ||
1083 | // WriteData frames above get a flush. So we're done. | ||
1084 | return nil | ||
1085 | } | ||
1086 | |||
1087 | var trls []byte | ||
1088 | if hasTrailers { | ||
1089 | cc.mu.Lock() | ||
1090 | trls, err = cc.encodeTrailers(req) | ||
1091 | cc.mu.Unlock() | ||
1092 | if err != nil { | ||
1093 | cc.writeStreamReset(cs.ID, ErrCodeInternal, err) | ||
1094 | cc.forgetStreamID(cs.ID) | ||
1095 | return err | ||
1096 | } | ||
1097 | } | ||
1098 | |||
1099 | cc.wmu.Lock() | ||
1100 | defer cc.wmu.Unlock() | ||
1101 | |||
1102 | // Two ways to send END_STREAM: either with trailers, or | ||
1103 | // with an empty DATA frame. | ||
1104 | if len(trls) > 0 { | ||
1105 | err = cc.writeHeaders(cs.ID, true, trls) | ||
1106 | } else { | ||
1107 | err = cc.fr.WriteData(cs.ID, true, nil) | ||
1108 | } | ||
1109 | if ferr := cc.bw.Flush(); ferr != nil && err == nil { | ||
1110 | err = ferr | ||
1111 | } | ||
1112 | return err | ||
1113 | } | ||
1114 | |||
1115 | // awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow | ||
1116 | // control tokens from the server. | ||
1117 | // It returns either the non-zero number of tokens taken or an error | ||
1118 | // if the stream is dead. | ||
1119 | func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { | ||
1120 | cc := cs.cc | ||
1121 | cc.mu.Lock() | ||
1122 | defer cc.mu.Unlock() | ||
1123 | for { | ||
1124 | if cc.closed { | ||
1125 | return 0, errClientConnClosed | ||
1126 | } | ||
1127 | if cs.stopReqBody != nil { | ||
1128 | return 0, cs.stopReqBody | ||
1129 | } | ||
1130 | if err := cs.checkResetOrDone(); err != nil { | ||
1131 | return 0, err | ||
1132 | } | ||
1133 | if a := cs.flow.available(); a > 0 { | ||
1134 | take := a | ||
1135 | if int(take) > maxBytes { | ||
1136 | |||
1137 | take = int32(maxBytes) // can't truncate int; take is int32 | ||
1138 | } | ||
1139 | if take > int32(cc.maxFrameSize) { | ||
1140 | take = int32(cc.maxFrameSize) | ||
1141 | } | ||
1142 | cs.flow.take(take) | ||
1143 | return take, nil | ||
1144 | } | ||
1145 | cc.cond.Wait() | ||
1146 | } | ||
1147 | } | ||
1148 | |||
1149 | type badStringError struct { | ||
1150 | what string | ||
1151 | str string | ||
1152 | } | ||
1153 | |||
1154 | func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } | ||
1155 | |||
1156 | // requires cc.mu be held. | ||
1157 | func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { | ||
1158 | cc.hbuf.Reset() | ||
1159 | |||
1160 | host := req.Host | ||
1161 | if host == "" { | ||
1162 | host = req.URL.Host | ||
1163 | } | ||
1164 | host, err := httplex.PunycodeHostPort(host) | ||
1165 | if err != nil { | ||
1166 | return nil, err | ||
1167 | } | ||
1168 | |||
1169 | var path string | ||
1170 | if req.Method != "CONNECT" { | ||
1171 | path = req.URL.RequestURI() | ||
1172 | if !validPseudoPath(path) { | ||
1173 | orig := path | ||
1174 | path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) | ||
1175 | if !validPseudoPath(path) { | ||
1176 | if req.URL.Opaque != "" { | ||
1177 | return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) | ||
1178 | } else { | ||
1179 | return nil, fmt.Errorf("invalid request :path %q", orig) | ||
1180 | } | ||
1181 | } | ||
1182 | } | ||
1183 | } | ||
1184 | |||
1185 | // Check for any invalid headers and return an error before we | ||
1186 | // potentially pollute our hpack state. (We want to be able to | ||
1187 | // continue to reuse the hpack encoder for future requests) | ||
1188 | for k, vv := range req.Header { | ||
1189 | if !httplex.ValidHeaderFieldName(k) { | ||
1190 | return nil, fmt.Errorf("invalid HTTP header name %q", k) | ||
1191 | } | ||
1192 | for _, v := range vv { | ||
1193 | if !httplex.ValidHeaderFieldValue(v) { | ||
1194 | return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) | ||
1195 | } | ||
1196 | } | ||
1197 | } | ||
1198 | |||
1199 | enumerateHeaders := func(f func(name, value string)) { | ||
1200 | // 8.1.2.3 Request Pseudo-Header Fields | ||
1201 | // The :path pseudo-header field includes the path and query parts of the | ||
1202 | // target URI (the path-absolute production and optionally a '?' character | ||
1203 | // followed by the query production (see Sections 3.3 and 3.4 of | ||
1204 | // [RFC3986]). | ||
1205 | f(":authority", host) | ||
1206 | f(":method", req.Method) | ||
1207 | if req.Method != "CONNECT" { | ||
1208 | f(":path", path) | ||
1209 | f(":scheme", req.URL.Scheme) | ||
1210 | } | ||
1211 | if trailers != "" { | ||
1212 | f("trailer", trailers) | ||
1213 | } | ||
1214 | |||
1215 | var didUA bool | ||
1216 | for k, vv := range req.Header { | ||
1217 | if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { | ||
1218 | // Host is :authority, already sent. | ||
1219 | // Content-Length is automatic, set below. | ||
1220 | continue | ||
1221 | } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || | ||
1222 | strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || | ||
1223 | strings.EqualFold(k, "keep-alive") { | ||
1224 | // Per 8.1.2.2 Connection-Specific Header | ||
1225 | // Fields, don't send connection-specific | ||
1226 | // fields. We have already checked if any | ||
1227 | // are error-worthy so just ignore the rest. | ||
1228 | continue | ||
1229 | } else if strings.EqualFold(k, "user-agent") { | ||
1230 | // Match Go's http1 behavior: at most one | ||
1231 | // User-Agent. If set to nil or empty string, | ||
1232 | // then omit it. Otherwise if not mentioned, | ||
1233 | // include the default (below). | ||
1234 | didUA = true | ||
1235 | if len(vv) < 1 { | ||
1236 | continue | ||
1237 | } | ||
1238 | vv = vv[:1] | ||
1239 | if vv[0] == "" { | ||
1240 | continue | ||
1241 | } | ||
1242 | |||
1243 | } | ||
1244 | |||
1245 | for _, v := range vv { | ||
1246 | f(k, v) | ||
1247 | } | ||
1248 | } | ||
1249 | if shouldSendReqContentLength(req.Method, contentLength) { | ||
1250 | f("content-length", strconv.FormatInt(contentLength, 10)) | ||
1251 | } | ||
1252 | if addGzipHeader { | ||
1253 | f("accept-encoding", "gzip") | ||
1254 | } | ||
1255 | if !didUA { | ||
1256 | f("user-agent", defaultUserAgent) | ||
1257 | } | ||
1258 | } | ||
1259 | |||
1260 | // Do a first pass over the headers counting bytes to ensure | ||
1261 | // we don't exceed cc.peerMaxHeaderListSize. This is done as a | ||
1262 | // separate pass before encoding the headers to prevent | ||
1263 | // modifying the hpack state. | ||
1264 | hlSize := uint64(0) | ||
1265 | enumerateHeaders(func(name, value string) { | ||
1266 | hf := hpack.HeaderField{Name: name, Value: value} | ||
1267 | hlSize += uint64(hf.Size()) | ||
1268 | }) | ||
1269 | |||
1270 | if hlSize > cc.peerMaxHeaderListSize { | ||
1271 | return nil, errRequestHeaderListSize | ||
1272 | } | ||
1273 | |||
1274 | // Header list size is ok. Write the headers. | ||
1275 | enumerateHeaders(func(name, value string) { | ||
1276 | cc.writeHeader(strings.ToLower(name), value) | ||
1277 | }) | ||
1278 | |||
1279 | return cc.hbuf.Bytes(), nil | ||
1280 | } | ||
1281 | |||
1282 | // shouldSendReqContentLength reports whether the http2.Transport should send | ||
1283 | // a "content-length" request header. This logic is basically a copy of the net/http | ||
1284 | // transferWriter.shouldSendContentLength. | ||
1285 | // The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). | ||
1286 | // -1 means unknown. | ||
1287 | func shouldSendReqContentLength(method string, contentLength int64) bool { | ||
1288 | if contentLength > 0 { | ||
1289 | return true | ||
1290 | } | ||
1291 | if contentLength < 0 { | ||
1292 | return false | ||
1293 | } | ||
1294 | // For zero bodies, whether we send a content-length depends on the method. | ||
1295 | // It also kinda doesn't matter for http2 either way, with END_STREAM. | ||
1296 | switch method { | ||
1297 | case "POST", "PUT", "PATCH": | ||
1298 | return true | ||
1299 | default: | ||
1300 | return false | ||
1301 | } | ||
1302 | } | ||
1303 | |||
1304 | // requires cc.mu be held. | ||
1305 | func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { | ||
1306 | cc.hbuf.Reset() | ||
1307 | |||
1308 | hlSize := uint64(0) | ||
1309 | for k, vv := range req.Trailer { | ||
1310 | for _, v := range vv { | ||
1311 | hf := hpack.HeaderField{Name: k, Value: v} | ||
1312 | hlSize += uint64(hf.Size()) | ||
1313 | } | ||
1314 | } | ||
1315 | if hlSize > cc.peerMaxHeaderListSize { | ||
1316 | return nil, errRequestHeaderListSize | ||
1317 | } | ||
1318 | |||
1319 | for k, vv := range req.Trailer { | ||
1320 | // Transfer-Encoding, etc.. have already been filtered at the | ||
1321 | // start of RoundTrip | ||
1322 | lowKey := strings.ToLower(k) | ||
1323 | for _, v := range vv { | ||
1324 | cc.writeHeader(lowKey, v) | ||
1325 | } | ||
1326 | } | ||
1327 | return cc.hbuf.Bytes(), nil | ||
1328 | } | ||
1329 | |||
1330 | func (cc *ClientConn) writeHeader(name, value string) { | ||
1331 | if VerboseLogs { | ||
1332 | log.Printf("http2: Transport encoding header %q = %q", name, value) | ||
1333 | } | ||
1334 | cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) | ||
1335 | } | ||
1336 | |||
1337 | type resAndError struct { | ||
1338 | res *http.Response | ||
1339 | err error | ||
1340 | } | ||
1341 | |||
1342 | // requires cc.mu be held. | ||
1343 | func (cc *ClientConn) newStream() *clientStream { | ||
1344 | cs := &clientStream{ | ||
1345 | cc: cc, | ||
1346 | ID: cc.nextStreamID, | ||
1347 | resc: make(chan resAndError, 1), | ||
1348 | peerReset: make(chan struct{}), | ||
1349 | done: make(chan struct{}), | ||
1350 | } | ||
1351 | cs.flow.add(int32(cc.initialWindowSize)) | ||
1352 | cs.flow.setConnFlow(&cc.flow) | ||
1353 | cs.inflow.add(transportDefaultStreamFlow) | ||
1354 | cs.inflow.setConnFlow(&cc.inflow) | ||
1355 | cc.nextStreamID += 2 | ||
1356 | cc.streams[cs.ID] = cs | ||
1357 | return cs | ||
1358 | } | ||
1359 | |||
1360 | func (cc *ClientConn) forgetStreamID(id uint32) { | ||
1361 | cc.streamByID(id, true) | ||
1362 | } | ||
1363 | |||
1364 | func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { | ||
1365 | cc.mu.Lock() | ||
1366 | defer cc.mu.Unlock() | ||
1367 | cs := cc.streams[id] | ||
1368 | if andRemove && cs != nil && !cc.closed { | ||
1369 | cc.lastActive = time.Now() | ||
1370 | delete(cc.streams, id) | ||
1371 | if len(cc.streams) == 0 && cc.idleTimer != nil { | ||
1372 | cc.idleTimer.Reset(cc.idleTimeout) | ||
1373 | } | ||
1374 | close(cs.done) | ||
1375 | // Wake up checkResetOrDone via clientStream.awaitFlowControl and | ||
1376 | // wake up RoundTrip if there is a pending request. | ||
1377 | cc.cond.Broadcast() | ||
1378 | } | ||
1379 | return cs | ||
1380 | } | ||
1381 | |||
1382 | // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. | ||
1383 | type clientConnReadLoop struct { | ||
1384 | cc *ClientConn | ||
1385 | activeRes map[uint32]*clientStream // keyed by streamID | ||
1386 | closeWhenIdle bool | ||
1387 | } | ||
1388 | |||
1389 | // readLoop runs in its own goroutine and reads and dispatches frames. | ||
1390 | func (cc *ClientConn) readLoop() { | ||
1391 | rl := &clientConnReadLoop{ | ||
1392 | cc: cc, | ||
1393 | activeRes: make(map[uint32]*clientStream), | ||
1394 | } | ||
1395 | |||
1396 | defer rl.cleanup() | ||
1397 | cc.readerErr = rl.run() | ||
1398 | if ce, ok := cc.readerErr.(ConnectionError); ok { | ||
1399 | cc.wmu.Lock() | ||
1400 | cc.fr.WriteGoAway(0, ErrCode(ce), nil) | ||
1401 | cc.wmu.Unlock() | ||
1402 | } | ||
1403 | } | ||
1404 | |||
1405 | // GoAwayError is returned by the Transport when the server closes the | ||
1406 | // TCP connection after sending a GOAWAY frame. | ||
1407 | type GoAwayError struct { | ||
1408 | LastStreamID uint32 | ||
1409 | ErrCode ErrCode | ||
1410 | DebugData string | ||
1411 | } | ||
1412 | |||
1413 | func (e GoAwayError) Error() string { | ||
1414 | return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", | ||
1415 | e.LastStreamID, e.ErrCode, e.DebugData) | ||
1416 | } | ||
1417 | |||
1418 | func isEOFOrNetReadError(err error) bool { | ||
1419 | if err == io.EOF { | ||
1420 | return true | ||
1421 | } | ||
1422 | ne, ok := err.(*net.OpError) | ||
1423 | return ok && ne.Op == "read" | ||
1424 | } | ||
1425 | |||
1426 | func (rl *clientConnReadLoop) cleanup() { | ||
1427 | cc := rl.cc | ||
1428 | defer cc.tconn.Close() | ||
1429 | defer cc.t.connPool().MarkDead(cc) | ||
1430 | defer close(cc.readerDone) | ||
1431 | |||
1432 | if cc.idleTimer != nil { | ||
1433 | cc.idleTimer.Stop() | ||
1434 | } | ||
1435 | |||
1436 | // Close any response bodies if the server closes prematurely. | ||
1437 | // TODO: also do this if we've written the headers but not | ||
1438 | // gotten a response yet. | ||
1439 | err := cc.readerErr | ||
1440 | cc.mu.Lock() | ||
1441 | if cc.goAway != nil && isEOFOrNetReadError(err) { | ||
1442 | err = GoAwayError{ | ||
1443 | LastStreamID: cc.goAway.LastStreamID, | ||
1444 | ErrCode: cc.goAway.ErrCode, | ||
1445 | DebugData: cc.goAwayDebug, | ||
1446 | } | ||
1447 | } else if err == io.EOF { | ||
1448 | err = io.ErrUnexpectedEOF | ||
1449 | } | ||
1450 | for _, cs := range rl.activeRes { | ||
1451 | cs.bufPipe.CloseWithError(err) | ||
1452 | } | ||
1453 | for _, cs := range cc.streams { | ||
1454 | select { | ||
1455 | case cs.resc <- resAndError{err: err}: | ||
1456 | default: | ||
1457 | } | ||
1458 | close(cs.done) | ||
1459 | } | ||
1460 | cc.closed = true | ||
1461 | cc.cond.Broadcast() | ||
1462 | cc.mu.Unlock() | ||
1463 | } | ||
1464 | |||
1465 | func (rl *clientConnReadLoop) run() error { | ||
1466 | cc := rl.cc | ||
1467 | rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse | ||
1468 | gotReply := false // ever saw a HEADERS reply | ||
1469 | gotSettings := false | ||
1470 | for { | ||
1471 | f, err := cc.fr.ReadFrame() | ||
1472 | if err != nil { | ||
1473 | cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) | ||
1474 | } | ||
1475 | if se, ok := err.(StreamError); ok { | ||
1476 | if cs := cc.streamByID(se.StreamID, false); cs != nil { | ||
1477 | cs.cc.writeStreamReset(cs.ID, se.Code, err) | ||
1478 | cs.cc.forgetStreamID(cs.ID) | ||
1479 | if se.Cause == nil { | ||
1480 | se.Cause = cc.fr.errDetail | ||
1481 | } | ||
1482 | rl.endStreamError(cs, se) | ||
1483 | } | ||
1484 | continue | ||
1485 | } else if err != nil { | ||
1486 | return err | ||
1487 | } | ||
1488 | if VerboseLogs { | ||
1489 | cc.vlogf("http2: Transport received %s", summarizeFrame(f)) | ||
1490 | } | ||
1491 | if !gotSettings { | ||
1492 | if _, ok := f.(*SettingsFrame); !ok { | ||
1493 | cc.logf("protocol error: received %T before a SETTINGS frame", f) | ||
1494 | return ConnectionError(ErrCodeProtocol) | ||
1495 | } | ||
1496 | gotSettings = true | ||
1497 | } | ||
1498 | maybeIdle := false // whether frame might transition us to idle | ||
1499 | |||
1500 | switch f := f.(type) { | ||
1501 | case *MetaHeadersFrame: | ||
1502 | err = rl.processHeaders(f) | ||
1503 | maybeIdle = true | ||
1504 | gotReply = true | ||
1505 | case *DataFrame: | ||
1506 | err = rl.processData(f) | ||
1507 | maybeIdle = true | ||
1508 | case *GoAwayFrame: | ||
1509 | err = rl.processGoAway(f) | ||
1510 | maybeIdle = true | ||
1511 | case *RSTStreamFrame: | ||
1512 | err = rl.processResetStream(f) | ||
1513 | maybeIdle = true | ||
1514 | case *SettingsFrame: | ||
1515 | err = rl.processSettings(f) | ||
1516 | case *PushPromiseFrame: | ||
1517 | err = rl.processPushPromise(f) | ||
1518 | case *WindowUpdateFrame: | ||
1519 | err = rl.processWindowUpdate(f) | ||
1520 | case *PingFrame: | ||
1521 | err = rl.processPing(f) | ||
1522 | default: | ||
1523 | cc.logf("Transport: unhandled response frame type %T", f) | ||
1524 | } | ||
1525 | if err != nil { | ||
1526 | if VerboseLogs { | ||
1527 | cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) | ||
1528 | } | ||
1529 | return err | ||
1530 | } | ||
1531 | if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { | ||
1532 | cc.closeIfIdle() | ||
1533 | } | ||
1534 | } | ||
1535 | } | ||
1536 | |||
1537 | func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { | ||
1538 | cc := rl.cc | ||
1539 | cs := cc.streamByID(f.StreamID, f.StreamEnded()) | ||
1540 | if cs == nil { | ||
1541 | // We'd get here if we canceled a request while the | ||
1542 | // server had its response still in flight. So if this | ||
1543 | // was just something we canceled, ignore it. | ||
1544 | return nil | ||
1545 | } | ||
1546 | if !cs.firstByte { | ||
1547 | if cs.trace != nil { | ||
1548 | // TODO(bradfitz): move first response byte earlier, | ||
1549 | // when we first read the 9 byte header, not waiting | ||
1550 | // until all the HEADERS+CONTINUATION frames have been | ||
1551 | // merged. This works for now. | ||
1552 | traceFirstResponseByte(cs.trace) | ||
1553 | } | ||
1554 | cs.firstByte = true | ||
1555 | } | ||
1556 | if !cs.pastHeaders { | ||
1557 | cs.pastHeaders = true | ||
1558 | } else { | ||
1559 | return rl.processTrailers(cs, f) | ||
1560 | } | ||
1561 | |||
1562 | res, err := rl.handleResponse(cs, f) | ||
1563 | if err != nil { | ||
1564 | if _, ok := err.(ConnectionError); ok { | ||
1565 | return err | ||
1566 | } | ||
1567 | // Any other error type is a stream error. | ||
1568 | cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) | ||
1569 | cs.resc <- resAndError{err: err} | ||
1570 | return nil // return nil from process* funcs to keep conn alive | ||
1571 | } | ||
1572 | if res == nil { | ||
1573 | // (nil, nil) special case. See handleResponse docs. | ||
1574 | return nil | ||
1575 | } | ||
1576 | if res.Body != noBody { | ||
1577 | rl.activeRes[cs.ID] = cs | ||
1578 | } | ||
1579 | cs.resTrailer = &res.Trailer | ||
1580 | cs.resc <- resAndError{res: res} | ||
1581 | return nil | ||
1582 | } | ||
1583 | |||
1584 | // may return error types nil, or ConnectionError. Any other error value | ||
1585 | // is a StreamError of type ErrCodeProtocol. The returned error in that case | ||
1586 | // is the detail. | ||
1587 | // | ||
1588 | // As a special case, handleResponse may return (nil, nil) to skip the | ||
1589 | // frame (currently only used for 100 expect continue). This special | ||
1590 | // case is going away after Issue 13851 is fixed. | ||
1591 | func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { | ||
1592 | if f.Truncated { | ||
1593 | return nil, errResponseHeaderListSize | ||
1594 | } | ||
1595 | |||
1596 | status := f.PseudoValue("status") | ||
1597 | if status == "" { | ||
1598 | return nil, errors.New("missing status pseudo header") | ||
1599 | } | ||
1600 | statusCode, err := strconv.Atoi(status) | ||
1601 | if err != nil { | ||
1602 | return nil, errors.New("malformed non-numeric status pseudo header") | ||
1603 | } | ||
1604 | |||
1605 | if statusCode == 100 { | ||
1606 | traceGot100Continue(cs.trace) | ||
1607 | if cs.on100 != nil { | ||
1608 | cs.on100() // forces any write delay timer to fire | ||
1609 | } | ||
1610 | cs.pastHeaders = false // do it all again | ||
1611 | return nil, nil | ||
1612 | } | ||
1613 | |||
1614 | header := make(http.Header) | ||
1615 | res := &http.Response{ | ||
1616 | Proto: "HTTP/2.0", | ||
1617 | ProtoMajor: 2, | ||
1618 | Header: header, | ||
1619 | StatusCode: statusCode, | ||
1620 | Status: status + " " + http.StatusText(statusCode), | ||
1621 | } | ||
1622 | for _, hf := range f.RegularFields() { | ||
1623 | key := http.CanonicalHeaderKey(hf.Name) | ||
1624 | if key == "Trailer" { | ||
1625 | t := res.Trailer | ||
1626 | if t == nil { | ||
1627 | t = make(http.Header) | ||
1628 | res.Trailer = t | ||
1629 | } | ||
1630 | foreachHeaderElement(hf.Value, func(v string) { | ||
1631 | t[http.CanonicalHeaderKey(v)] = nil | ||
1632 | }) | ||
1633 | } else { | ||
1634 | header[key] = append(header[key], hf.Value) | ||
1635 | } | ||
1636 | } | ||
1637 | |||
1638 | streamEnded := f.StreamEnded() | ||
1639 | isHead := cs.req.Method == "HEAD" | ||
1640 | if !streamEnded || isHead { | ||
1641 | res.ContentLength = -1 | ||
1642 | if clens := res.Header["Content-Length"]; len(clens) == 1 { | ||
1643 | if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { | ||
1644 | res.ContentLength = clen64 | ||
1645 | } else { | ||
1646 | // TODO: care? unlike http/1, it won't mess up our framing, so it's | ||
1647 | // more safe smuggling-wise to ignore. | ||
1648 | } | ||
1649 | } else if len(clens) > 1 { | ||
1650 | // TODO: care? unlike http/1, it won't mess up our framing, so it's | ||
1651 | // more safe smuggling-wise to ignore. | ||
1652 | } | ||
1653 | } | ||
1654 | |||
1655 | if streamEnded || isHead { | ||
1656 | res.Body = noBody | ||
1657 | return res, nil | ||
1658 | } | ||
1659 | |||
1660 | cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} | ||
1661 | cs.bytesRemain = res.ContentLength | ||
1662 | res.Body = transportResponseBody{cs} | ||
1663 | go cs.awaitRequestCancel(cs.req) | ||
1664 | |||
1665 | if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { | ||
1666 | res.Header.Del("Content-Encoding") | ||
1667 | res.Header.Del("Content-Length") | ||
1668 | res.ContentLength = -1 | ||
1669 | res.Body = &gzipReader{body: res.Body} | ||
1670 | setResponseUncompressed(res) | ||
1671 | } | ||
1672 | return res, nil | ||
1673 | } | ||
1674 | |||
1675 | func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { | ||
1676 | if cs.pastTrailers { | ||
1677 | // Too many HEADERS frames for this stream. | ||
1678 | return ConnectionError(ErrCodeProtocol) | ||
1679 | } | ||
1680 | cs.pastTrailers = true | ||
1681 | if !f.StreamEnded() { | ||
1682 | // We expect that any headers for trailers also | ||
1683 | // has END_STREAM. | ||
1684 | return ConnectionError(ErrCodeProtocol) | ||
1685 | } | ||
1686 | if len(f.PseudoFields()) > 0 { | ||
1687 | // No pseudo header fields are defined for trailers. | ||
1688 | // TODO: ConnectionError might be overly harsh? Check. | ||
1689 | return ConnectionError(ErrCodeProtocol) | ||
1690 | } | ||
1691 | |||
1692 | trailer := make(http.Header) | ||
1693 | for _, hf := range f.RegularFields() { | ||
1694 | key := http.CanonicalHeaderKey(hf.Name) | ||
1695 | trailer[key] = append(trailer[key], hf.Value) | ||
1696 | } | ||
1697 | cs.trailer = trailer | ||
1698 | |||
1699 | rl.endStream(cs) | ||
1700 | return nil | ||
1701 | } | ||
1702 | |||
1703 | // transportResponseBody is the concrete type of Transport.RoundTrip's | ||
1704 | // Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. | ||
1705 | // On Close it sends RST_STREAM if EOF wasn't already seen. | ||
1706 | type transportResponseBody struct { | ||
1707 | cs *clientStream | ||
1708 | } | ||
1709 | |||
1710 | func (b transportResponseBody) Read(p []byte) (n int, err error) { | ||
1711 | cs := b.cs | ||
1712 | cc := cs.cc | ||
1713 | |||
1714 | if cs.readErr != nil { | ||
1715 | return 0, cs.readErr | ||
1716 | } | ||
1717 | n, err = b.cs.bufPipe.Read(p) | ||
1718 | if cs.bytesRemain != -1 { | ||
1719 | if int64(n) > cs.bytesRemain { | ||
1720 | n = int(cs.bytesRemain) | ||
1721 | if err == nil { | ||
1722 | err = errors.New("net/http: server replied with more than declared Content-Length; truncated") | ||
1723 | cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) | ||
1724 | } | ||
1725 | cs.readErr = err | ||
1726 | return int(cs.bytesRemain), err | ||
1727 | } | ||
1728 | cs.bytesRemain -= int64(n) | ||
1729 | if err == io.EOF && cs.bytesRemain > 0 { | ||
1730 | err = io.ErrUnexpectedEOF | ||
1731 | cs.readErr = err | ||
1732 | return n, err | ||
1733 | } | ||
1734 | } | ||
1735 | if n == 0 { | ||
1736 | // No flow control tokens to send back. | ||
1737 | return | ||
1738 | } | ||
1739 | |||
1740 | cc.mu.Lock() | ||
1741 | defer cc.mu.Unlock() | ||
1742 | |||
1743 | var connAdd, streamAdd int32 | ||
1744 | // Check the conn-level first, before the stream-level. | ||
1745 | if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { | ||
1746 | connAdd = transportDefaultConnFlow - v | ||
1747 | cc.inflow.add(connAdd) | ||
1748 | } | ||
1749 | if err == nil { // No need to refresh if the stream is over or failed. | ||
1750 | // Consider any buffered body data (read from the conn but not | ||
1751 | // consumed by the client) when computing flow control for this | ||
1752 | // stream. | ||
1753 | v := int(cs.inflow.available()) + cs.bufPipe.Len() | ||
1754 | if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { | ||
1755 | streamAdd = int32(transportDefaultStreamFlow - v) | ||
1756 | cs.inflow.add(streamAdd) | ||
1757 | } | ||
1758 | } | ||
1759 | if connAdd != 0 || streamAdd != 0 { | ||
1760 | cc.wmu.Lock() | ||
1761 | defer cc.wmu.Unlock() | ||
1762 | if connAdd != 0 { | ||
1763 | cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) | ||
1764 | } | ||
1765 | if streamAdd != 0 { | ||
1766 | cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) | ||
1767 | } | ||
1768 | cc.bw.Flush() | ||
1769 | } | ||
1770 | return | ||
1771 | } | ||
1772 | |||
1773 | var errClosedResponseBody = errors.New("http2: response body closed") | ||
1774 | |||
1775 | func (b transportResponseBody) Close() error { | ||
1776 | cs := b.cs | ||
1777 | cc := cs.cc | ||
1778 | |||
1779 | serverSentStreamEnd := cs.bufPipe.Err() == io.EOF | ||
1780 | unread := cs.bufPipe.Len() | ||
1781 | |||
1782 | if unread > 0 || !serverSentStreamEnd { | ||
1783 | cc.mu.Lock() | ||
1784 | cc.wmu.Lock() | ||
1785 | if !serverSentStreamEnd { | ||
1786 | cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) | ||
1787 | cs.didReset = true | ||
1788 | } | ||
1789 | // Return connection-level flow control. | ||
1790 | if unread > 0 { | ||
1791 | cc.inflow.add(int32(unread)) | ||
1792 | cc.fr.WriteWindowUpdate(0, uint32(unread)) | ||
1793 | } | ||
1794 | cc.bw.Flush() | ||
1795 | cc.wmu.Unlock() | ||
1796 | cc.mu.Unlock() | ||
1797 | } | ||
1798 | |||
1799 | cs.bufPipe.BreakWithError(errClosedResponseBody) | ||
1800 | cc.forgetStreamID(cs.ID) | ||
1801 | return nil | ||
1802 | } | ||
1803 | |||
1804 | func (rl *clientConnReadLoop) processData(f *DataFrame) error { | ||
1805 | cc := rl.cc | ||
1806 | cs := cc.streamByID(f.StreamID, f.StreamEnded()) | ||
1807 | data := f.Data() | ||
1808 | if cs == nil { | ||
1809 | cc.mu.Lock() | ||
1810 | neverSent := cc.nextStreamID | ||
1811 | cc.mu.Unlock() | ||
1812 | if f.StreamID >= neverSent { | ||
1813 | // We never asked for this. | ||
1814 | cc.logf("http2: Transport received unsolicited DATA frame; closing connection") | ||
1815 | return ConnectionError(ErrCodeProtocol) | ||
1816 | } | ||
1817 | // We probably did ask for this, but canceled. Just ignore it. | ||
1818 | // TODO: be stricter here? only silently ignore things which | ||
1819 | // we canceled, but not things which were closed normally | ||
1820 | // by the peer? Tough without accumulating too much state. | ||
1821 | |||
1822 | // But at least return their flow control: | ||
1823 | if f.Length > 0 { | ||
1824 | cc.mu.Lock() | ||
1825 | cc.inflow.add(int32(f.Length)) | ||
1826 | cc.mu.Unlock() | ||
1827 | |||
1828 | cc.wmu.Lock() | ||
1829 | cc.fr.WriteWindowUpdate(0, uint32(f.Length)) | ||
1830 | cc.bw.Flush() | ||
1831 | cc.wmu.Unlock() | ||
1832 | } | ||
1833 | return nil | ||
1834 | } | ||
1835 | if !cs.firstByte { | ||
1836 | cc.logf("protocol error: received DATA before a HEADERS frame") | ||
1837 | rl.endStreamError(cs, StreamError{ | ||
1838 | StreamID: f.StreamID, | ||
1839 | Code: ErrCodeProtocol, | ||
1840 | }) | ||
1841 | return nil | ||
1842 | } | ||
1843 | if f.Length > 0 { | ||
1844 | // Check connection-level flow control. | ||
1845 | cc.mu.Lock() | ||
1846 | if cs.inflow.available() >= int32(f.Length) { | ||
1847 | cs.inflow.take(int32(f.Length)) | ||
1848 | } else { | ||
1849 | cc.mu.Unlock() | ||
1850 | return ConnectionError(ErrCodeFlowControl) | ||
1851 | } | ||
1852 | // Return any padded flow control now, since we won't | ||
1853 | // refund it later on body reads. | ||
1854 | var refund int | ||
1855 | if pad := int(f.Length) - len(data); pad > 0 { | ||
1856 | refund += pad | ||
1857 | } | ||
1858 | // Return len(data) now if the stream is already closed, | ||
1859 | // since data will never be read. | ||
1860 | didReset := cs.didReset | ||
1861 | if didReset { | ||
1862 | refund += len(data) | ||
1863 | } | ||
1864 | if refund > 0 { | ||
1865 | cc.inflow.add(int32(refund)) | ||
1866 | cc.wmu.Lock() | ||
1867 | cc.fr.WriteWindowUpdate(0, uint32(refund)) | ||
1868 | if !didReset { | ||
1869 | cs.inflow.add(int32(refund)) | ||
1870 | cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) | ||
1871 | } | ||
1872 | cc.bw.Flush() | ||
1873 | cc.wmu.Unlock() | ||
1874 | } | ||
1875 | cc.mu.Unlock() | ||
1876 | |||
1877 | if len(data) > 0 && !didReset { | ||
1878 | if _, err := cs.bufPipe.Write(data); err != nil { | ||
1879 | rl.endStreamError(cs, err) | ||
1880 | return err | ||
1881 | } | ||
1882 | } | ||
1883 | } | ||
1884 | |||
1885 | if f.StreamEnded() { | ||
1886 | rl.endStream(cs) | ||
1887 | } | ||
1888 | return nil | ||
1889 | } | ||
1890 | |||
1891 | var errInvalidTrailers = errors.New("http2: invalid trailers") | ||
1892 | |||
1893 | func (rl *clientConnReadLoop) endStream(cs *clientStream) { | ||
1894 | // TODO: check that any declared content-length matches, like | ||
1895 | // server.go's (*stream).endStream method. | ||
1896 | rl.endStreamError(cs, nil) | ||
1897 | } | ||
1898 | |||
1899 | func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { | ||
1900 | var code func() | ||
1901 | if err == nil { | ||
1902 | err = io.EOF | ||
1903 | code = cs.copyTrailers | ||
1904 | } | ||
1905 | cs.bufPipe.closeWithErrorAndCode(err, code) | ||
1906 | delete(rl.activeRes, cs.ID) | ||
1907 | if isConnectionCloseRequest(cs.req) { | ||
1908 | rl.closeWhenIdle = true | ||
1909 | } | ||
1910 | |||
1911 | select { | ||
1912 | case cs.resc <- resAndError{err: err}: | ||
1913 | default: | ||
1914 | } | ||
1915 | } | ||
1916 | |||
1917 | func (cs *clientStream) copyTrailers() { | ||
1918 | for k, vv := range cs.trailer { | ||
1919 | t := cs.resTrailer | ||
1920 | if *t == nil { | ||
1921 | *t = make(http.Header) | ||
1922 | } | ||
1923 | (*t)[k] = vv | ||
1924 | } | ||
1925 | } | ||
1926 | |||
1927 | func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { | ||
1928 | cc := rl.cc | ||
1929 | cc.t.connPool().MarkDead(cc) | ||
1930 | if f.ErrCode != 0 { | ||
1931 | // TODO: deal with GOAWAY more. particularly the error code | ||
1932 | cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) | ||
1933 | } | ||
1934 | cc.setGoAway(f) | ||
1935 | return nil | ||
1936 | } | ||
1937 | |||
1938 | func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { | ||
1939 | cc := rl.cc | ||
1940 | cc.mu.Lock() | ||
1941 | defer cc.mu.Unlock() | ||
1942 | |||
1943 | if f.IsAck() { | ||
1944 | if cc.wantSettingsAck { | ||
1945 | cc.wantSettingsAck = false | ||
1946 | return nil | ||
1947 | } | ||
1948 | return ConnectionError(ErrCodeProtocol) | ||
1949 | } | ||
1950 | |||
1951 | err := f.ForeachSetting(func(s Setting) error { | ||
1952 | switch s.ID { | ||
1953 | case SettingMaxFrameSize: | ||
1954 | cc.maxFrameSize = s.Val | ||
1955 | case SettingMaxConcurrentStreams: | ||
1956 | cc.maxConcurrentStreams = s.Val | ||
1957 | case SettingMaxHeaderListSize: | ||
1958 | cc.peerMaxHeaderListSize = uint64(s.Val) | ||
1959 | case SettingInitialWindowSize: | ||
1960 | // Values above the maximum flow-control | ||
1961 | // window size of 2^31-1 MUST be treated as a | ||
1962 | // connection error (Section 5.4.1) of type | ||
1963 | // FLOW_CONTROL_ERROR. | ||
1964 | if s.Val > math.MaxInt32 { | ||
1965 | return ConnectionError(ErrCodeFlowControl) | ||
1966 | } | ||
1967 | |||
1968 | // Adjust flow control of currently-open | ||
1969 | // frames by the difference of the old initial | ||
1970 | // window size and this one. | ||
1971 | delta := int32(s.Val) - int32(cc.initialWindowSize) | ||
1972 | for _, cs := range cc.streams { | ||
1973 | cs.flow.add(delta) | ||
1974 | } | ||
1975 | cc.cond.Broadcast() | ||
1976 | |||
1977 | cc.initialWindowSize = s.Val | ||
1978 | default: | ||
1979 | // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. | ||
1980 | cc.vlogf("Unhandled Setting: %v", s) | ||
1981 | } | ||
1982 | return nil | ||
1983 | }) | ||
1984 | if err != nil { | ||
1985 | return err | ||
1986 | } | ||
1987 | |||
1988 | cc.wmu.Lock() | ||
1989 | defer cc.wmu.Unlock() | ||
1990 | |||
1991 | cc.fr.WriteSettingsAck() | ||
1992 | cc.bw.Flush() | ||
1993 | return cc.werr | ||
1994 | } | ||
1995 | |||
1996 | func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { | ||
1997 | cc := rl.cc | ||
1998 | cs := cc.streamByID(f.StreamID, false) | ||
1999 | if f.StreamID != 0 && cs == nil { | ||
2000 | return nil | ||
2001 | } | ||
2002 | |||
2003 | cc.mu.Lock() | ||
2004 | defer cc.mu.Unlock() | ||
2005 | |||
2006 | fl := &cc.flow | ||
2007 | if cs != nil { | ||
2008 | fl = &cs.flow | ||
2009 | } | ||
2010 | if !fl.add(int32(f.Increment)) { | ||
2011 | return ConnectionError(ErrCodeFlowControl) | ||
2012 | } | ||
2013 | cc.cond.Broadcast() | ||
2014 | return nil | ||
2015 | } | ||
2016 | |||
2017 | func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { | ||
2018 | cs := rl.cc.streamByID(f.StreamID, true) | ||
2019 | if cs == nil { | ||
2020 | // TODO: return error if server tries to RST_STEAM an idle stream | ||
2021 | return nil | ||
2022 | } | ||
2023 | select { | ||
2024 | case <-cs.peerReset: | ||
2025 | // Already reset. | ||
2026 | // This is the only goroutine | ||
2027 | // which closes this, so there | ||
2028 | // isn't a race. | ||
2029 | default: | ||
2030 | err := streamError(cs.ID, f.ErrCode) | ||
2031 | cs.resetErr = err | ||
2032 | close(cs.peerReset) | ||
2033 | cs.bufPipe.CloseWithError(err) | ||
2034 | cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl | ||
2035 | } | ||
2036 | delete(rl.activeRes, cs.ID) | ||
2037 | return nil | ||
2038 | } | ||
2039 | |||
2040 | // Ping sends a PING frame to the server and waits for the ack. | ||
2041 | // Public implementation is in go17.go and not_go17.go | ||
2042 | func (cc *ClientConn) ping(ctx contextContext) error { | ||
2043 | c := make(chan struct{}) | ||
2044 | // Generate a random payload | ||
2045 | var p [8]byte | ||
2046 | for { | ||
2047 | if _, err := rand.Read(p[:]); err != nil { | ||
2048 | return err | ||
2049 | } | ||
2050 | cc.mu.Lock() | ||
2051 | // check for dup before insert | ||
2052 | if _, found := cc.pings[p]; !found { | ||
2053 | cc.pings[p] = c | ||
2054 | cc.mu.Unlock() | ||
2055 | break | ||
2056 | } | ||
2057 | cc.mu.Unlock() | ||
2058 | } | ||
2059 | cc.wmu.Lock() | ||
2060 | if err := cc.fr.WritePing(false, p); err != nil { | ||
2061 | cc.wmu.Unlock() | ||
2062 | return err | ||
2063 | } | ||
2064 | if err := cc.bw.Flush(); err != nil { | ||
2065 | cc.wmu.Unlock() | ||
2066 | return err | ||
2067 | } | ||
2068 | cc.wmu.Unlock() | ||
2069 | select { | ||
2070 | case <-c: | ||
2071 | return nil | ||
2072 | case <-ctx.Done(): | ||
2073 | return ctx.Err() | ||
2074 | case <-cc.readerDone: | ||
2075 | // connection closed | ||
2076 | return cc.readerErr | ||
2077 | } | ||
2078 | } | ||
2079 | |||
2080 | func (rl *clientConnReadLoop) processPing(f *PingFrame) error { | ||
2081 | if f.IsAck() { | ||
2082 | cc := rl.cc | ||
2083 | cc.mu.Lock() | ||
2084 | defer cc.mu.Unlock() | ||
2085 | // If ack, notify listener if any | ||
2086 | if c, ok := cc.pings[f.Data]; ok { | ||
2087 | close(c) | ||
2088 | delete(cc.pings, f.Data) | ||
2089 | } | ||
2090 | return nil | ||
2091 | } | ||
2092 | cc := rl.cc | ||
2093 | cc.wmu.Lock() | ||
2094 | defer cc.wmu.Unlock() | ||
2095 | if err := cc.fr.WritePing(true, f.Data); err != nil { | ||
2096 | return err | ||
2097 | } | ||
2098 | return cc.bw.Flush() | ||
2099 | } | ||
2100 | |||
2101 | func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { | ||
2102 | // We told the peer we don't want them. | ||
2103 | // Spec says: | ||
2104 | // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH | ||
2105 | // setting of the peer endpoint is set to 0. An endpoint that | ||
2106 | // has set this setting and has received acknowledgement MUST | ||
2107 | // treat the receipt of a PUSH_PROMISE frame as a connection | ||
2108 | // error (Section 5.4.1) of type PROTOCOL_ERROR." | ||
2109 | return ConnectionError(ErrCodeProtocol) | ||
2110 | } | ||
2111 | |||
2112 | func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { | ||
2113 | // TODO: map err to more interesting error codes, once the | ||
2114 | // HTTP community comes up with some. But currently for | ||
2115 | // RST_STREAM there's no equivalent to GOAWAY frame's debug | ||
2116 | // data, and the error codes are all pretty vague ("cancel"). | ||
2117 | cc.wmu.Lock() | ||
2118 | cc.fr.WriteRSTStream(streamID, code) | ||
2119 | cc.bw.Flush() | ||
2120 | cc.wmu.Unlock() | ||
2121 | } | ||
2122 | |||
2123 | var ( | ||
2124 | errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") | ||
2125 | errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") | ||
2126 | errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") | ||
2127 | ) | ||
2128 | |||
2129 | func (cc *ClientConn) logf(format string, args ...interface{}) { | ||
2130 | cc.t.logf(format, args...) | ||
2131 | } | ||
2132 | |||
2133 | func (cc *ClientConn) vlogf(format string, args ...interface{}) { | ||
2134 | cc.t.vlogf(format, args...) | ||
2135 | } | ||
2136 | |||
2137 | func (t *Transport) vlogf(format string, args ...interface{}) { | ||
2138 | if VerboseLogs { | ||
2139 | t.logf(format, args...) | ||
2140 | } | ||
2141 | } | ||
2142 | |||
2143 | func (t *Transport) logf(format string, args ...interface{}) { | ||
2144 | log.Printf(format, args...) | ||
2145 | } | ||
2146 | |||
2147 | var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) | ||
2148 | |||
2149 | func strSliceContains(ss []string, s string) bool { | ||
2150 | for _, v := range ss { | ||
2151 | if v == s { | ||
2152 | return true | ||
2153 | } | ||
2154 | } | ||
2155 | return false | ||
2156 | } | ||
2157 | |||
2158 | type erringRoundTripper struct{ err error } | ||
2159 | |||
2160 | func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } | ||
2161 | |||
2162 | // gzipReader wraps a response body so it can lazily | ||
2163 | // call gzip.NewReader on the first call to Read | ||
2164 | type gzipReader struct { | ||
2165 | body io.ReadCloser // underlying Response.Body | ||
2166 | zr *gzip.Reader // lazily-initialized gzip reader | ||
2167 | zerr error // sticky error | ||
2168 | } | ||
2169 | |||
2170 | func (gz *gzipReader) Read(p []byte) (n int, err error) { | ||
2171 | if gz.zerr != nil { | ||
2172 | return 0, gz.zerr | ||
2173 | } | ||
2174 | if gz.zr == nil { | ||
2175 | gz.zr, err = gzip.NewReader(gz.body) | ||
2176 | if err != nil { | ||
2177 | gz.zerr = err | ||
2178 | return 0, err | ||
2179 | } | ||
2180 | } | ||
2181 | return gz.zr.Read(p) | ||
2182 | } | ||
2183 | |||
2184 | func (gz *gzipReader) Close() error { | ||
2185 | return gz.body.Close() | ||
2186 | } | ||
2187 | |||
2188 | type errorReader struct{ err error } | ||
2189 | |||
2190 | func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } | ||
2191 | |||
2192 | // bodyWriterState encapsulates various state around the Transport's writing | ||
2193 | // of the request body, particularly regarding doing delayed writes of the body | ||
2194 | // when the request contains "Expect: 100-continue". | ||
2195 | type bodyWriterState struct { | ||
2196 | cs *clientStream | ||
2197 | timer *time.Timer // if non-nil, we're doing a delayed write | ||
2198 | fnonce *sync.Once // to call fn with | ||
2199 | fn func() // the code to run in the goroutine, writing the body | ||
2200 | resc chan error // result of fn's execution | ||
2201 | delay time.Duration // how long we should delay a delayed write for | ||
2202 | } | ||
2203 | |||
2204 | func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { | ||
2205 | s.cs = cs | ||
2206 | if body == nil { | ||
2207 | return | ||
2208 | } | ||
2209 | resc := make(chan error, 1) | ||
2210 | s.resc = resc | ||
2211 | s.fn = func() { | ||
2212 | cs.cc.mu.Lock() | ||
2213 | cs.startedWrite = true | ||
2214 | cs.cc.mu.Unlock() | ||
2215 | resc <- cs.writeRequestBody(body, cs.req.Body) | ||
2216 | } | ||
2217 | s.delay = t.expectContinueTimeout() | ||
2218 | if s.delay == 0 || | ||
2219 | !httplex.HeaderValuesContainsToken( | ||
2220 | cs.req.Header["Expect"], | ||
2221 | "100-continue") { | ||
2222 | return | ||
2223 | } | ||
2224 | s.fnonce = new(sync.Once) | ||
2225 | |||
2226 | // Arm the timer with a very large duration, which we'll | ||
2227 | // intentionally lower later. It has to be large now because | ||
2228 | // we need a handle to it before writing the headers, but the | ||
2229 | // s.delay value is defined to not start until after the | ||
2230 | // request headers were written. | ||
2231 | const hugeDuration = 365 * 24 * time.Hour | ||
2232 | s.timer = time.AfterFunc(hugeDuration, func() { | ||
2233 | s.fnonce.Do(s.fn) | ||
2234 | }) | ||
2235 | return | ||
2236 | } | ||
2237 | |||
2238 | func (s bodyWriterState) cancel() { | ||
2239 | if s.timer != nil { | ||
2240 | s.timer.Stop() | ||
2241 | } | ||
2242 | } | ||
2243 | |||
2244 | func (s bodyWriterState) on100() { | ||
2245 | if s.timer == nil { | ||
2246 | // If we didn't do a delayed write, ignore the server's | ||
2247 | // bogus 100 continue response. | ||
2248 | return | ||
2249 | } | ||
2250 | s.timer.Stop() | ||
2251 | go func() { s.fnonce.Do(s.fn) }() | ||
2252 | } | ||
2253 | |||
2254 | // scheduleBodyWrite starts writing the body, either immediately (in | ||
2255 | // the common case) or after the delay timeout. It should not be | ||
2256 | // called until after the headers have been written. | ||
2257 | func (s bodyWriterState) scheduleBodyWrite() { | ||
2258 | if s.timer == nil { | ||
2259 | // We're not doing a delayed write (see | ||
2260 | // getBodyWriterState), so just start the writing | ||
2261 | // goroutine immediately. | ||
2262 | go s.fn() | ||
2263 | return | ||
2264 | } | ||
2265 | traceWait100Continue(s.cs.trace) | ||
2266 | if s.timer.Stop() { | ||
2267 | s.timer.Reset(s.delay) | ||
2268 | } | ||
2269 | } | ||
2270 | |||
2271 | // isConnectionCloseRequest reports whether req should use its own | ||
2272 | // connection for a single request and then close the connection. | ||
2273 | func isConnectionCloseRequest(req *http.Request) bool { | ||
2274 | return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") | ||
2275 | } | ||
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 0000000..6b0dfae --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go | |||
@@ -0,0 +1,370 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package http2 | ||
6 | |||
7 | import ( | ||
8 | "bytes" | ||
9 | "fmt" | ||
10 | "log" | ||
11 | "net/http" | ||
12 | "net/url" | ||
13 | "time" | ||
14 | |||
15 | "golang.org/x/net/http2/hpack" | ||
16 | "golang.org/x/net/lex/httplex" | ||
17 | ) | ||
18 | |||
19 | // writeFramer is implemented by any type that is used to write frames. | ||
20 | type writeFramer interface { | ||
21 | writeFrame(writeContext) error | ||
22 | |||
23 | // staysWithinBuffer reports whether this writer promises that | ||
24 | // it will only write less than or equal to size bytes, and it | ||
25 | // won't Flush the write context. | ||
26 | staysWithinBuffer(size int) bool | ||
27 | } | ||
28 | |||
29 | // writeContext is the interface needed by the various frame writer | ||
30 | // types below. All the writeFrame methods below are scheduled via the | ||
31 | // frame writing scheduler (see writeScheduler in writesched.go). | ||
32 | // | ||
33 | // This interface is implemented by *serverConn. | ||
34 | // | ||
35 | // TODO: decide whether to a) use this in the client code (which didn't | ||
36 | // end up using this yet, because it has a simpler design, not | ||
37 | // currently implementing priorities), or b) delete this and | ||
38 | // make the server code a bit more concrete. | ||
39 | type writeContext interface { | ||
40 | Framer() *Framer | ||
41 | Flush() error | ||
42 | CloseConn() error | ||
43 | // HeaderEncoder returns an HPACK encoder that writes to the | ||
44 | // returned buffer. | ||
45 | HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) | ||
46 | } | ||
47 | |||
48 | // writeEndsStream reports whether w writes a frame that will transition | ||
49 | // the stream to a half-closed local state. This returns false for RST_STREAM, | ||
50 | // which closes the entire stream (not just the local half). | ||
51 | func writeEndsStream(w writeFramer) bool { | ||
52 | switch v := w.(type) { | ||
53 | case *writeData: | ||
54 | return v.endStream | ||
55 | case *writeResHeaders: | ||
56 | return v.endStream | ||
57 | case nil: | ||
58 | // This can only happen if the caller reuses w after it's | ||
59 | // been intentionally nil'ed out to prevent use. Keep this | ||
60 | // here to catch future refactoring breaking it. | ||
61 | panic("writeEndsStream called on nil writeFramer") | ||
62 | } | ||
63 | return false | ||
64 | } | ||
65 | |||
66 | type flushFrameWriter struct{} | ||
67 | |||
68 | func (flushFrameWriter) writeFrame(ctx writeContext) error { | ||
69 | return ctx.Flush() | ||
70 | } | ||
71 | |||
72 | func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } | ||
73 | |||
74 | type writeSettings []Setting | ||
75 | |||
76 | func (s writeSettings) staysWithinBuffer(max int) bool { | ||
77 | const settingSize = 6 // uint16 + uint32 | ||
78 | return frameHeaderLen+settingSize*len(s) <= max | ||
79 | |||
80 | } | ||
81 | |||
82 | func (s writeSettings) writeFrame(ctx writeContext) error { | ||
83 | return ctx.Framer().WriteSettings([]Setting(s)...) | ||
84 | } | ||
85 | |||
86 | type writeGoAway struct { | ||
87 | maxStreamID uint32 | ||
88 | code ErrCode | ||
89 | } | ||
90 | |||
91 | func (p *writeGoAway) writeFrame(ctx writeContext) error { | ||
92 | err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) | ||
93 | if p.code != 0 { | ||
94 | ctx.Flush() // ignore error: we're hanging up on them anyway | ||
95 | time.Sleep(50 * time.Millisecond) | ||
96 | ctx.CloseConn() | ||
97 | } | ||
98 | return err | ||
99 | } | ||
100 | |||
101 | func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes | ||
102 | |||
103 | type writeData struct { | ||
104 | streamID uint32 | ||
105 | p []byte | ||
106 | endStream bool | ||
107 | } | ||
108 | |||
109 | func (w *writeData) String() string { | ||
110 | return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) | ||
111 | } | ||
112 | |||
113 | func (w *writeData) writeFrame(ctx writeContext) error { | ||
114 | return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) | ||
115 | } | ||
116 | |||
117 | func (w *writeData) staysWithinBuffer(max int) bool { | ||
118 | return frameHeaderLen+len(w.p) <= max | ||
119 | } | ||
120 | |||
121 | // handlerPanicRST is the message sent from handler goroutines when | ||
122 | // the handler panics. | ||
123 | type handlerPanicRST struct { | ||
124 | StreamID uint32 | ||
125 | } | ||
126 | |||
127 | func (hp handlerPanicRST) writeFrame(ctx writeContext) error { | ||
128 | return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) | ||
129 | } | ||
130 | |||
131 | func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } | ||
132 | |||
133 | func (se StreamError) writeFrame(ctx writeContext) error { | ||
134 | return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) | ||
135 | } | ||
136 | |||
137 | func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } | ||
138 | |||
139 | type writePingAck struct{ pf *PingFrame } | ||
140 | |||
141 | func (w writePingAck) writeFrame(ctx writeContext) error { | ||
142 | return ctx.Framer().WritePing(true, w.pf.Data) | ||
143 | } | ||
144 | |||
145 | func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } | ||
146 | |||
147 | type writeSettingsAck struct{} | ||
148 | |||
149 | func (writeSettingsAck) writeFrame(ctx writeContext) error { | ||
150 | return ctx.Framer().WriteSettingsAck() | ||
151 | } | ||
152 | |||
153 | func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } | ||
154 | |||
155 | // splitHeaderBlock splits headerBlock into fragments so that each fragment fits | ||
156 | // in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true | ||
157 | // for the first/last fragment, respectively. | ||
158 | func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { | ||
159 | // For now we're lazy and just pick the minimum MAX_FRAME_SIZE | ||
160 | // that all peers must support (16KB). Later we could care | ||
161 | // more and send larger frames if the peer advertised it, but | ||
162 | // there's little point. Most headers are small anyway (so we | ||
163 | // generally won't have CONTINUATION frames), and extra frames | ||
164 | // only waste 9 bytes anyway. | ||
165 | const maxFrameSize = 16384 | ||
166 | |||
167 | first := true | ||
168 | for len(headerBlock) > 0 { | ||
169 | frag := headerBlock | ||
170 | if len(frag) > maxFrameSize { | ||
171 | frag = frag[:maxFrameSize] | ||
172 | } | ||
173 | headerBlock = headerBlock[len(frag):] | ||
174 | if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { | ||
175 | return err | ||
176 | } | ||
177 | first = false | ||
178 | } | ||
179 | return nil | ||
180 | } | ||
181 | |||
182 | // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames | ||
183 | // for HTTP response headers or trailers from a server handler. | ||
184 | type writeResHeaders struct { | ||
185 | streamID uint32 | ||
186 | httpResCode int // 0 means no ":status" line | ||
187 | h http.Header // may be nil | ||
188 | trailers []string // if non-nil, which keys of h to write. nil means all. | ||
189 | endStream bool | ||
190 | |||
191 | date string | ||
192 | contentType string | ||
193 | contentLength string | ||
194 | } | ||
195 | |||
196 | func encKV(enc *hpack.Encoder, k, v string) { | ||
197 | if VerboseLogs { | ||
198 | log.Printf("http2: server encoding header %q = %q", k, v) | ||
199 | } | ||
200 | enc.WriteField(hpack.HeaderField{Name: k, Value: v}) | ||
201 | } | ||
202 | |||
203 | func (w *writeResHeaders) staysWithinBuffer(max int) bool { | ||
204 | // TODO: this is a common one. It'd be nice to return true | ||
205 | // here and get into the fast path if we could be clever and | ||
206 | // calculate the size fast enough, or at least a conservative | ||
207 | // uppper bound that usually fires. (Maybe if w.h and | ||
208 | // w.trailers are nil, so we don't need to enumerate it.) | ||
209 | // Otherwise I'm afraid that just calculating the length to | ||
210 | // answer this question would be slower than the ~2µs benefit. | ||
211 | return false | ||
212 | } | ||
213 | |||
214 | func (w *writeResHeaders) writeFrame(ctx writeContext) error { | ||
215 | enc, buf := ctx.HeaderEncoder() | ||
216 | buf.Reset() | ||
217 | |||
218 | if w.httpResCode != 0 { | ||
219 | encKV(enc, ":status", httpCodeString(w.httpResCode)) | ||
220 | } | ||
221 | |||
222 | encodeHeaders(enc, w.h, w.trailers) | ||
223 | |||
224 | if w.contentType != "" { | ||
225 | encKV(enc, "content-type", w.contentType) | ||
226 | } | ||
227 | if w.contentLength != "" { | ||
228 | encKV(enc, "content-length", w.contentLength) | ||
229 | } | ||
230 | if w.date != "" { | ||
231 | encKV(enc, "date", w.date) | ||
232 | } | ||
233 | |||
234 | headerBlock := buf.Bytes() | ||
235 | if len(headerBlock) == 0 && w.trailers == nil { | ||
236 | panic("unexpected empty hpack") | ||
237 | } | ||
238 | |||
239 | return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) | ||
240 | } | ||
241 | |||
242 | func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { | ||
243 | if firstFrag { | ||
244 | return ctx.Framer().WriteHeaders(HeadersFrameParam{ | ||
245 | StreamID: w.streamID, | ||
246 | BlockFragment: frag, | ||
247 | EndStream: w.endStream, | ||
248 | EndHeaders: lastFrag, | ||
249 | }) | ||
250 | } else { | ||
251 | return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) | ||
252 | } | ||
253 | } | ||
254 | |||
255 | // writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. | ||
256 | type writePushPromise struct { | ||
257 | streamID uint32 // pusher stream | ||
258 | method string // for :method | ||
259 | url *url.URL // for :scheme, :authority, :path | ||
260 | h http.Header | ||
261 | |||
262 | // Creates an ID for a pushed stream. This runs on serveG just before | ||
263 | // the frame is written. The returned ID is copied to promisedID. | ||
264 | allocatePromisedID func() (uint32, error) | ||
265 | promisedID uint32 | ||
266 | } | ||
267 | |||
268 | func (w *writePushPromise) staysWithinBuffer(max int) bool { | ||
269 | // TODO: see writeResHeaders.staysWithinBuffer | ||
270 | return false | ||
271 | } | ||
272 | |||
273 | func (w *writePushPromise) writeFrame(ctx writeContext) error { | ||
274 | enc, buf := ctx.HeaderEncoder() | ||
275 | buf.Reset() | ||
276 | |||
277 | encKV(enc, ":method", w.method) | ||
278 | encKV(enc, ":scheme", w.url.Scheme) | ||
279 | encKV(enc, ":authority", w.url.Host) | ||
280 | encKV(enc, ":path", w.url.RequestURI()) | ||
281 | encodeHeaders(enc, w.h, nil) | ||
282 | |||
283 | headerBlock := buf.Bytes() | ||
284 | if len(headerBlock) == 0 { | ||
285 | panic("unexpected empty hpack") | ||
286 | } | ||
287 | |||
288 | return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) | ||
289 | } | ||
290 | |||
291 | func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { | ||
292 | if firstFrag { | ||
293 | return ctx.Framer().WritePushPromise(PushPromiseParam{ | ||
294 | StreamID: w.streamID, | ||
295 | PromiseID: w.promisedID, | ||
296 | BlockFragment: frag, | ||
297 | EndHeaders: lastFrag, | ||
298 | }) | ||
299 | } else { | ||
300 | return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) | ||
301 | } | ||
302 | } | ||
303 | |||
304 | type write100ContinueHeadersFrame struct { | ||
305 | streamID uint32 | ||
306 | } | ||
307 | |||
308 | func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { | ||
309 | enc, buf := ctx.HeaderEncoder() | ||
310 | buf.Reset() | ||
311 | encKV(enc, ":status", "100") | ||
312 | return ctx.Framer().WriteHeaders(HeadersFrameParam{ | ||
313 | StreamID: w.streamID, | ||
314 | BlockFragment: buf.Bytes(), | ||
315 | EndStream: false, | ||
316 | EndHeaders: true, | ||
317 | }) | ||
318 | } | ||
319 | |||
320 | func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { | ||
321 | // Sloppy but conservative: | ||
322 | return 9+2*(len(":status")+len("100")) <= max | ||
323 | } | ||
324 | |||
325 | type writeWindowUpdate struct { | ||
326 | streamID uint32 // or 0 for conn-level | ||
327 | n uint32 | ||
328 | } | ||
329 | |||
330 | func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } | ||
331 | |||
332 | func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { | ||
333 | return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) | ||
334 | } | ||
335 | |||
336 | // encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) | ||
337 | // is encoded only only if k is in keys. | ||
338 | func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { | ||
339 | if keys == nil { | ||
340 | sorter := sorterPool.Get().(*sorter) | ||
341 | // Using defer here, since the returned keys from the | ||
342 | // sorter.Keys method is only valid until the sorter | ||
343 | // is returned: | ||
344 | defer sorterPool.Put(sorter) | ||
345 | keys = sorter.Keys(h) | ||
346 | } | ||
347 | for _, k := range keys { | ||
348 | vv := h[k] | ||
349 | k = lowerHeader(k) | ||
350 | if !validWireHeaderFieldName(k) { | ||
351 | // Skip it as backup paranoia. Per | ||
352 | // golang.org/issue/14048, these should | ||
353 | // already be rejected at a higher level. | ||
354 | continue | ||
355 | } | ||
356 | isTE := k == "transfer-encoding" | ||
357 | for _, v := range vv { | ||
358 | if !httplex.ValidHeaderFieldValue(v) { | ||
359 | // TODO: return an error? golang.org/issue/14048 | ||
360 | // For now just omit it. | ||
361 | continue | ||
362 | } | ||
363 | // TODO: more of "8.1.2.2 Connection-Specific Header Fields" | ||
364 | if isTE && v != "trailers" { | ||
365 | continue | ||
366 | } | ||
367 | encKV(enc, k, v) | ||
368 | } | ||
369 | } | ||
370 | } | ||
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 0000000..4fe3073 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go | |||
@@ -0,0 +1,242 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package http2 | ||
6 | |||
7 | import "fmt" | ||
8 | |||
9 | // WriteScheduler is the interface implemented by HTTP/2 write schedulers. | ||
10 | // Methods are never called concurrently. | ||
11 | type WriteScheduler interface { | ||
12 | // OpenStream opens a new stream in the write scheduler. | ||
13 | // It is illegal to call this with streamID=0 or with a streamID that is | ||
14 | // already open -- the call may panic. | ||
15 | OpenStream(streamID uint32, options OpenStreamOptions) | ||
16 | |||
17 | // CloseStream closes a stream in the write scheduler. Any frames queued on | ||
18 | // this stream should be discarded. It is illegal to call this on a stream | ||
19 | // that is not open -- the call may panic. | ||
20 | CloseStream(streamID uint32) | ||
21 | |||
22 | // AdjustStream adjusts the priority of the given stream. This may be called | ||
23 | // on a stream that has not yet been opened or has been closed. Note that | ||
24 | // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: | ||
25 | // https://tools.ietf.org/html/rfc7540#section-5.1 | ||
26 | AdjustStream(streamID uint32, priority PriorityParam) | ||
27 | |||
28 | // Push queues a frame in the scheduler. In most cases, this will not be | ||
29 | // called with wr.StreamID()!=0 unless that stream is currently open. The one | ||
30 | // exception is RST_STREAM frames, which may be sent on idle or closed streams. | ||
31 | Push(wr FrameWriteRequest) | ||
32 | |||
33 | // Pop dequeues the next frame to write. Returns false if no frames can | ||
34 | // be written. Frames with a given wr.StreamID() are Pop'd in the same | ||
35 | // order they are Push'd. | ||
36 | Pop() (wr FrameWriteRequest, ok bool) | ||
37 | } | ||
38 | |||
39 | // OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. | ||
40 | type OpenStreamOptions struct { | ||
41 | // PusherID is zero if the stream was initiated by the client. Otherwise, | ||
42 | // PusherID names the stream that pushed the newly opened stream. | ||
43 | PusherID uint32 | ||
44 | } | ||
45 | |||
46 | // FrameWriteRequest is a request to write a frame. | ||
47 | type FrameWriteRequest struct { | ||
48 | // write is the interface value that does the writing, once the | ||
49 | // WriteScheduler has selected this frame to write. The write | ||
50 | // functions are all defined in write.go. | ||
51 | write writeFramer | ||
52 | |||
53 | // stream is the stream on which this frame will be written. | ||
54 | // nil for non-stream frames like PING and SETTINGS. | ||
55 | stream *stream | ||
56 | |||
57 | // done, if non-nil, must be a buffered channel with space for | ||
58 | // 1 message and is sent the return value from write (or an | ||
59 | // earlier error) when the frame has been written. | ||
60 | done chan error | ||
61 | } | ||
62 | |||
63 | // StreamID returns the id of the stream this frame will be written to. | ||
64 | // 0 is used for non-stream frames such as PING and SETTINGS. | ||
65 | func (wr FrameWriteRequest) StreamID() uint32 { | ||
66 | if wr.stream == nil { | ||
67 | if se, ok := wr.write.(StreamError); ok { | ||
68 | // (*serverConn).resetStream doesn't set | ||
69 | // stream because it doesn't necessarily have | ||
70 | // one. So special case this type of write | ||
71 | // message. | ||
72 | return se.StreamID | ||
73 | } | ||
74 | return 0 | ||
75 | } | ||
76 | return wr.stream.id | ||
77 | } | ||
78 | |||
79 | // DataSize returns the number of flow control bytes that must be consumed | ||
80 | // to write this entire frame. This is 0 for non-DATA frames. | ||
81 | func (wr FrameWriteRequest) DataSize() int { | ||
82 | if wd, ok := wr.write.(*writeData); ok { | ||
83 | return len(wd.p) | ||
84 | } | ||
85 | return 0 | ||
86 | } | ||
87 | |||
88 | // Consume consumes min(n, available) bytes from this frame, where available | ||
89 | // is the number of flow control bytes available on the stream. Consume returns | ||
90 | // 0, 1, or 2 frames, where the integer return value gives the number of frames | ||
91 | // returned. | ||
92 | // | ||
93 | // If flow control prevents consuming any bytes, this returns (_, _, 0). If | ||
94 | // the entire frame was consumed, this returns (wr, _, 1). Otherwise, this | ||
95 | // returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and | ||
96 | // 'rest' contains the remaining bytes. The consumed bytes are deducted from the | ||
97 | // underlying stream's flow control budget. | ||
98 | func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { | ||
99 | var empty FrameWriteRequest | ||
100 | |||
101 | // Non-DATA frames are always consumed whole. | ||
102 | wd, ok := wr.write.(*writeData) | ||
103 | if !ok || len(wd.p) == 0 { | ||
104 | return wr, empty, 1 | ||
105 | } | ||
106 | |||
107 | // Might need to split after applying limits. | ||
108 | allowed := wr.stream.flow.available() | ||
109 | if n < allowed { | ||
110 | allowed = n | ||
111 | } | ||
112 | if wr.stream.sc.maxFrameSize < allowed { | ||
113 | allowed = wr.stream.sc.maxFrameSize | ||
114 | } | ||
115 | if allowed <= 0 { | ||
116 | return empty, empty, 0 | ||
117 | } | ||
118 | if len(wd.p) > int(allowed) { | ||
119 | wr.stream.flow.take(allowed) | ||
120 | consumed := FrameWriteRequest{ | ||
121 | stream: wr.stream, | ||
122 | write: &writeData{ | ||
123 | streamID: wd.streamID, | ||
124 | p: wd.p[:allowed], | ||
125 | // Even if the original had endStream set, there | ||
126 | // are bytes remaining because len(wd.p) > allowed, | ||
127 | // so we know endStream is false. | ||
128 | endStream: false, | ||
129 | }, | ||
130 | // Our caller is blocking on the final DATA frame, not | ||
131 | // this intermediate frame, so no need to wait. | ||
132 | done: nil, | ||
133 | } | ||
134 | rest := FrameWriteRequest{ | ||
135 | stream: wr.stream, | ||
136 | write: &writeData{ | ||
137 | streamID: wd.streamID, | ||
138 | p: wd.p[allowed:], | ||
139 | endStream: wd.endStream, | ||
140 | }, | ||
141 | done: wr.done, | ||
142 | } | ||
143 | return consumed, rest, 2 | ||
144 | } | ||
145 | |||
146 | // The frame is consumed whole. | ||
147 | // NB: This cast cannot overflow because allowed is <= math.MaxInt32. | ||
148 | wr.stream.flow.take(int32(len(wd.p))) | ||
149 | return wr, empty, 1 | ||
150 | } | ||
151 | |||
152 | // String is for debugging only. | ||
153 | func (wr FrameWriteRequest) String() string { | ||
154 | var des string | ||
155 | if s, ok := wr.write.(fmt.Stringer); ok { | ||
156 | des = s.String() | ||
157 | } else { | ||
158 | des = fmt.Sprintf("%T", wr.write) | ||
159 | } | ||
160 | return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) | ||
161 | } | ||
162 | |||
163 | // replyToWriter sends err to wr.done and panics if the send must block | ||
164 | // This does nothing if wr.done is nil. | ||
165 | func (wr *FrameWriteRequest) replyToWriter(err error) { | ||
166 | if wr.done == nil { | ||
167 | return | ||
168 | } | ||
169 | select { | ||
170 | case wr.done <- err: | ||
171 | default: | ||
172 | panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) | ||
173 | } | ||
174 | wr.write = nil // prevent use (assume it's tainted after wr.done send) | ||
175 | } | ||
176 | |||
177 | // writeQueue is used by implementations of WriteScheduler. | ||
178 | type writeQueue struct { | ||
179 | s []FrameWriteRequest | ||
180 | } | ||
181 | |||
182 | func (q *writeQueue) empty() bool { return len(q.s) == 0 } | ||
183 | |||
184 | func (q *writeQueue) push(wr FrameWriteRequest) { | ||
185 | q.s = append(q.s, wr) | ||
186 | } | ||
187 | |||
188 | func (q *writeQueue) shift() FrameWriteRequest { | ||
189 | if len(q.s) == 0 { | ||
190 | panic("invalid use of queue") | ||
191 | } | ||
192 | wr := q.s[0] | ||
193 | // TODO: less copy-happy queue. | ||
194 | copy(q.s, q.s[1:]) | ||
195 | q.s[len(q.s)-1] = FrameWriteRequest{} | ||
196 | q.s = q.s[:len(q.s)-1] | ||
197 | return wr | ||
198 | } | ||
199 | |||
200 | // consume consumes up to n bytes from q.s[0]. If the frame is | ||
201 | // entirely consumed, it is removed from the queue. If the frame | ||
202 | // is partially consumed, the frame is kept with the consumed | ||
203 | // bytes removed. Returns true iff any bytes were consumed. | ||
204 | func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { | ||
205 | if len(q.s) == 0 { | ||
206 | return FrameWriteRequest{}, false | ||
207 | } | ||
208 | consumed, rest, numresult := q.s[0].Consume(n) | ||
209 | switch numresult { | ||
210 | case 0: | ||
211 | return FrameWriteRequest{}, false | ||
212 | case 1: | ||
213 | q.shift() | ||
214 | case 2: | ||
215 | q.s[0] = rest | ||
216 | } | ||
217 | return consumed, true | ||
218 | } | ||
219 | |||
220 | type writeQueuePool []*writeQueue | ||
221 | |||
222 | // put inserts an unused writeQueue into the pool. | ||
223 | func (p *writeQueuePool) put(q *writeQueue) { | ||
224 | for i := range q.s { | ||
225 | q.s[i] = FrameWriteRequest{} | ||
226 | } | ||
227 | q.s = q.s[:0] | ||
228 | *p = append(*p, q) | ||
229 | } | ||
230 | |||
231 | // get returns an empty writeQueue. | ||
232 | func (p *writeQueuePool) get() *writeQueue { | ||
233 | ln := len(*p) | ||
234 | if ln == 0 { | ||
235 | return new(writeQueue) | ||
236 | } | ||
237 | x := ln - 1 | ||
238 | q := (*p)[x] | ||
239 | (*p)[x] = nil | ||
240 | *p = (*p)[:x] | ||
241 | return q | ||
242 | } | ||
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 0000000..848fed6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go | |||
@@ -0,0 +1,452 @@ | |||
1 | // Copyright 2016 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package http2 | ||
6 | |||
7 | import ( | ||
8 | "fmt" | ||
9 | "math" | ||
10 | "sort" | ||
11 | ) | ||
12 | |||
13 | // RFC 7540, Section 5.3.5: the default weight is 16. | ||
14 | const priorityDefaultWeight = 15 // 16 = 15 + 1 | ||
15 | |||
16 | // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. | ||
17 | type PriorityWriteSchedulerConfig struct { | ||
18 | // MaxClosedNodesInTree controls the maximum number of closed streams to | ||
19 | // retain in the priority tree. Setting this to zero saves a small amount | ||
20 | // of memory at the cost of performance. | ||
21 | // | ||
22 | // See RFC 7540, Section 5.3.4: | ||
23 | // "It is possible for a stream to become closed while prioritization | ||
24 | // information ... is in transit. ... This potentially creates suboptimal | ||
25 | // prioritization, since the stream could be given a priority that is | ||
26 | // different from what is intended. To avoid these problems, an endpoint | ||
27 | // SHOULD retain stream prioritization state for a period after streams | ||
28 | // become closed. The longer state is retained, the lower the chance that | ||
29 | // streams are assigned incorrect or default priority values." | ||
30 | MaxClosedNodesInTree int | ||
31 | |||
32 | // MaxIdleNodesInTree controls the maximum number of idle streams to | ||
33 | // retain in the priority tree. Setting this to zero saves a small amount | ||
34 | // of memory at the cost of performance. | ||
35 | // | ||
36 | // See RFC 7540, Section 5.3.4: | ||
37 | // Similarly, streams that are in the "idle" state can be assigned | ||
38 | // priority or become a parent of other streams. This allows for the | ||
39 | // creation of a grouping node in the dependency tree, which enables | ||
40 | // more flexible expressions of priority. Idle streams begin with a | ||
41 | // default priority (Section 5.3.5). | ||
42 | MaxIdleNodesInTree int | ||
43 | |||
44 | // ThrottleOutOfOrderWrites enables write throttling to help ensure that | ||
45 | // data is delivered in priority order. This works around a race where | ||
46 | // stream B depends on stream A and both streams are about to call Write | ||
47 | // to queue DATA frames. If B wins the race, a naive scheduler would eagerly | ||
48 | // write as much data from B as possible, but this is suboptimal because A | ||
49 | // is a higher-priority stream. With throttling enabled, we write a small | ||
50 | // amount of data from B to minimize the amount of bandwidth that B can | ||
51 | // steal from A. | ||
52 | ThrottleOutOfOrderWrites bool | ||
53 | } | ||
54 | |||
55 | // NewPriorityWriteScheduler constructs a WriteScheduler that schedules | ||
56 | // frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. | ||
57 | // If cfg is nil, default options are used. | ||
58 | func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { | ||
59 | if cfg == nil { | ||
60 | // For justification of these defaults, see: | ||
61 | // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY | ||
62 | cfg = &PriorityWriteSchedulerConfig{ | ||
63 | MaxClosedNodesInTree: 10, | ||
64 | MaxIdleNodesInTree: 10, | ||
65 | ThrottleOutOfOrderWrites: false, | ||
66 | } | ||
67 | } | ||
68 | |||
69 | ws := &priorityWriteScheduler{ | ||
70 | nodes: make(map[uint32]*priorityNode), | ||
71 | maxClosedNodesInTree: cfg.MaxClosedNodesInTree, | ||
72 | maxIdleNodesInTree: cfg.MaxIdleNodesInTree, | ||
73 | enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, | ||
74 | } | ||
75 | ws.nodes[0] = &ws.root | ||
76 | if cfg.ThrottleOutOfOrderWrites { | ||
77 | ws.writeThrottleLimit = 1024 | ||
78 | } else { | ||
79 | ws.writeThrottleLimit = math.MaxInt32 | ||
80 | } | ||
81 | return ws | ||
82 | } | ||
83 | |||
84 | type priorityNodeState int | ||
85 | |||
86 | const ( | ||
87 | priorityNodeOpen priorityNodeState = iota | ||
88 | priorityNodeClosed | ||
89 | priorityNodeIdle | ||
90 | ) | ||
91 | |||
92 | // priorityNode is a node in an HTTP/2 priority tree. | ||
93 | // Each node is associated with a single stream ID. | ||
94 | // See RFC 7540, Section 5.3. | ||
95 | type priorityNode struct { | ||
96 | q writeQueue // queue of pending frames to write | ||
97 | id uint32 // id of the stream, or 0 for the root of the tree | ||
98 | weight uint8 // the actual weight is weight+1, so the value is in [1,256] | ||
99 | state priorityNodeState // open | closed | idle | ||
100 | bytes int64 // number of bytes written by this node, or 0 if closed | ||
101 | subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree | ||
102 | |||
103 | // These links form the priority tree. | ||
104 | parent *priorityNode | ||
105 | kids *priorityNode // start of the kids list | ||
106 | prev, next *priorityNode // doubly-linked list of siblings | ||
107 | } | ||
108 | |||
109 | func (n *priorityNode) setParent(parent *priorityNode) { | ||
110 | if n == parent { | ||
111 | panic("setParent to self") | ||
112 | } | ||
113 | if n.parent == parent { | ||
114 | return | ||
115 | } | ||
116 | // Unlink from current parent. | ||
117 | if parent := n.parent; parent != nil { | ||
118 | if n.prev == nil { | ||
119 | parent.kids = n.next | ||
120 | } else { | ||
121 | n.prev.next = n.next | ||
122 | } | ||
123 | if n.next != nil { | ||
124 | n.next.prev = n.prev | ||
125 | } | ||
126 | } | ||
127 | // Link to new parent. | ||
128 | // If parent=nil, remove n from the tree. | ||
129 | // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). | ||
130 | n.parent = parent | ||
131 | if parent == nil { | ||
132 | n.next = nil | ||
133 | n.prev = nil | ||
134 | } else { | ||
135 | n.next = parent.kids | ||
136 | n.prev = nil | ||
137 | if n.next != nil { | ||
138 | n.next.prev = n | ||
139 | } | ||
140 | parent.kids = n | ||
141 | } | ||
142 | } | ||
143 | |||
144 | func (n *priorityNode) addBytes(b int64) { | ||
145 | n.bytes += b | ||
146 | for ; n != nil; n = n.parent { | ||
147 | n.subtreeBytes += b | ||
148 | } | ||
149 | } | ||
150 | |||
151 | // walkReadyInOrder iterates over the tree in priority order, calling f for each node | ||
152 | // with a non-empty write queue. When f returns true, this funcion returns true and the | ||
153 | // walk halts. tmp is used as scratch space for sorting. | ||
154 | // | ||
155 | // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true | ||
156 | // if any ancestor p of n is still open (ignoring the root node). | ||
157 | func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { | ||
158 | if !n.q.empty() && f(n, openParent) { | ||
159 | return true | ||
160 | } | ||
161 | if n.kids == nil { | ||
162 | return false | ||
163 | } | ||
164 | |||
165 | // Don't consider the root "open" when updating openParent since | ||
166 | // we can't send data frames on the root stream (only control frames). | ||
167 | if n.id != 0 { | ||
168 | openParent = openParent || (n.state == priorityNodeOpen) | ||
169 | } | ||
170 | |||
171 | // Common case: only one kid or all kids have the same weight. | ||
172 | // Some clients don't use weights; other clients (like web browsers) | ||
173 | // use mostly-linear priority trees. | ||
174 | w := n.kids.weight | ||
175 | needSort := false | ||
176 | for k := n.kids.next; k != nil; k = k.next { | ||
177 | if k.weight != w { | ||
178 | needSort = true | ||
179 | break | ||
180 | } | ||
181 | } | ||
182 | if !needSort { | ||
183 | for k := n.kids; k != nil; k = k.next { | ||
184 | if k.walkReadyInOrder(openParent, tmp, f) { | ||
185 | return true | ||
186 | } | ||
187 | } | ||
188 | return false | ||
189 | } | ||
190 | |||
191 | // Uncommon case: sort the child nodes. We remove the kids from the parent, | ||
192 | // then re-insert after sorting so we can reuse tmp for future sort calls. | ||
193 | *tmp = (*tmp)[:0] | ||
194 | for n.kids != nil { | ||
195 | *tmp = append(*tmp, n.kids) | ||
196 | n.kids.setParent(nil) | ||
197 | } | ||
198 | sort.Sort(sortPriorityNodeSiblings(*tmp)) | ||
199 | for i := len(*tmp) - 1; i >= 0; i-- { | ||
200 | (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids | ||
201 | } | ||
202 | for k := n.kids; k != nil; k = k.next { | ||
203 | if k.walkReadyInOrder(openParent, tmp, f) { | ||
204 | return true | ||
205 | } | ||
206 | } | ||
207 | return false | ||
208 | } | ||
209 | |||
210 | type sortPriorityNodeSiblings []*priorityNode | ||
211 | |||
212 | func (z sortPriorityNodeSiblings) Len() int { return len(z) } | ||
213 | func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } | ||
214 | func (z sortPriorityNodeSiblings) Less(i, k int) bool { | ||
215 | // Prefer the subtree that has sent fewer bytes relative to its weight. | ||
216 | // See sections 5.3.2 and 5.3.4. | ||
217 | wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) | ||
218 | wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) | ||
219 | if bi == 0 && bk == 0 { | ||
220 | return wi >= wk | ||
221 | } | ||
222 | if bk == 0 { | ||
223 | return false | ||
224 | } | ||
225 | return bi/bk <= wi/wk | ||
226 | } | ||
227 | |||
228 | type priorityWriteScheduler struct { | ||
229 | // root is the root of the priority tree, where root.id = 0. | ||
230 | // The root queues control frames that are not associated with any stream. | ||
231 | root priorityNode | ||
232 | |||
233 | // nodes maps stream ids to priority tree nodes. | ||
234 | nodes map[uint32]*priorityNode | ||
235 | |||
236 | // maxID is the maximum stream id in nodes. | ||
237 | maxID uint32 | ||
238 | |||
239 | // lists of nodes that have been closed or are idle, but are kept in | ||
240 | // the tree for improved prioritization. When the lengths exceed either | ||
241 | // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. | ||
242 | closedNodes, idleNodes []*priorityNode | ||
243 | |||
244 | // From the config. | ||
245 | maxClosedNodesInTree int | ||
246 | maxIdleNodesInTree int | ||
247 | writeThrottleLimit int32 | ||
248 | enableWriteThrottle bool | ||
249 | |||
250 | // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. | ||
251 | tmp []*priorityNode | ||
252 | |||
253 | // pool of empty queues for reuse. | ||
254 | queuePool writeQueuePool | ||
255 | } | ||
256 | |||
257 | func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { | ||
258 | // The stream may be currently idle but cannot be opened or closed. | ||
259 | if curr := ws.nodes[streamID]; curr != nil { | ||
260 | if curr.state != priorityNodeIdle { | ||
261 | panic(fmt.Sprintf("stream %d already opened", streamID)) | ||
262 | } | ||
263 | curr.state = priorityNodeOpen | ||
264 | return | ||
265 | } | ||
266 | |||
267 | // RFC 7540, Section 5.3.5: | ||
268 | // "All streams are initially assigned a non-exclusive dependency on stream 0x0. | ||
269 | // Pushed streams initially depend on their associated stream. In both cases, | ||
270 | // streams are assigned a default weight of 16." | ||
271 | parent := ws.nodes[options.PusherID] | ||
272 | if parent == nil { | ||
273 | parent = &ws.root | ||
274 | } | ||
275 | n := &priorityNode{ | ||
276 | q: *ws.queuePool.get(), | ||
277 | id: streamID, | ||
278 | weight: priorityDefaultWeight, | ||
279 | state: priorityNodeOpen, | ||
280 | } | ||
281 | n.setParent(parent) | ||
282 | ws.nodes[streamID] = n | ||
283 | if streamID > ws.maxID { | ||
284 | ws.maxID = streamID | ||
285 | } | ||
286 | } | ||
287 | |||
288 | func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { | ||
289 | if streamID == 0 { | ||
290 | panic("violation of WriteScheduler interface: cannot close stream 0") | ||
291 | } | ||
292 | if ws.nodes[streamID] == nil { | ||
293 | panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) | ||
294 | } | ||
295 | if ws.nodes[streamID].state != priorityNodeOpen { | ||
296 | panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) | ||
297 | } | ||
298 | |||
299 | n := ws.nodes[streamID] | ||
300 | n.state = priorityNodeClosed | ||
301 | n.addBytes(-n.bytes) | ||
302 | |||
303 | q := n.q | ||
304 | ws.queuePool.put(&q) | ||
305 | n.q.s = nil | ||
306 | if ws.maxClosedNodesInTree > 0 { | ||
307 | ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) | ||
308 | } else { | ||
309 | ws.removeNode(n) | ||
310 | } | ||
311 | } | ||
312 | |||
313 | func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { | ||
314 | if streamID == 0 { | ||
315 | panic("adjustPriority on root") | ||
316 | } | ||
317 | |||
318 | // If streamID does not exist, there are two cases: | ||
319 | // - A closed stream that has been removed (this will have ID <= maxID) | ||
320 | // - An idle stream that is being used for "grouping" (this will have ID > maxID) | ||
321 | n := ws.nodes[streamID] | ||
322 | if n == nil { | ||
323 | if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { | ||
324 | return | ||
325 | } | ||
326 | ws.maxID = streamID | ||
327 | n = &priorityNode{ | ||
328 | q: *ws.queuePool.get(), | ||
329 | id: streamID, | ||
330 | weight: priorityDefaultWeight, | ||
331 | state: priorityNodeIdle, | ||
332 | } | ||
333 | n.setParent(&ws.root) | ||
334 | ws.nodes[streamID] = n | ||
335 | ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) | ||
336 | } | ||
337 | |||
338 | // Section 5.3.1: A dependency on a stream that is not currently in the tree | ||
339 | // results in that stream being given a default priority (Section 5.3.5). | ||
340 | parent := ws.nodes[priority.StreamDep] | ||
341 | if parent == nil { | ||
342 | n.setParent(&ws.root) | ||
343 | n.weight = priorityDefaultWeight | ||
344 | return | ||
345 | } | ||
346 | |||
347 | // Ignore if the client tries to make a node its own parent. | ||
348 | if n == parent { | ||
349 | return | ||
350 | } | ||
351 | |||
352 | // Section 5.3.3: | ||
353 | // "If a stream is made dependent on one of its own dependencies, the | ||
354 | // formerly dependent stream is first moved to be dependent on the | ||
355 | // reprioritized stream's previous parent. The moved dependency retains | ||
356 | // its weight." | ||
357 | // | ||
358 | // That is: if parent depends on n, move parent to depend on n.parent. | ||
359 | for x := parent.parent; x != nil; x = x.parent { | ||
360 | if x == n { | ||
361 | parent.setParent(n.parent) | ||
362 | break | ||
363 | } | ||
364 | } | ||
365 | |||
366 | // Section 5.3.3: The exclusive flag causes the stream to become the sole | ||
367 | // dependency of its parent stream, causing other dependencies to become | ||
368 | // dependent on the exclusive stream. | ||
369 | if priority.Exclusive { | ||
370 | k := parent.kids | ||
371 | for k != nil { | ||
372 | next := k.next | ||
373 | if k != n { | ||
374 | k.setParent(n) | ||
375 | } | ||
376 | k = next | ||
377 | } | ||
378 | } | ||
379 | |||
380 | n.setParent(parent) | ||
381 | n.weight = priority.Weight | ||
382 | } | ||
383 | |||
384 | func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { | ||
385 | var n *priorityNode | ||
386 | if id := wr.StreamID(); id == 0 { | ||
387 | n = &ws.root | ||
388 | } else { | ||
389 | n = ws.nodes[id] | ||
390 | if n == nil { | ||
391 | // id is an idle or closed stream. wr should not be a HEADERS or | ||
392 | // DATA frame. However, wr can be a RST_STREAM. In this case, we | ||
393 | // push wr onto the root, rather than creating a new priorityNode, | ||
394 | // since RST_STREAM is tiny and the stream's priority is unknown | ||
395 | // anyway. See issue #17919. | ||
396 | if wr.DataSize() > 0 { | ||
397 | panic("add DATA on non-open stream") | ||
398 | } | ||
399 | n = &ws.root | ||
400 | } | ||
401 | } | ||
402 | n.q.push(wr) | ||
403 | } | ||
404 | |||
405 | func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { | ||
406 | ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { | ||
407 | limit := int32(math.MaxInt32) | ||
408 | if openParent { | ||
409 | limit = ws.writeThrottleLimit | ||
410 | } | ||
411 | wr, ok = n.q.consume(limit) | ||
412 | if !ok { | ||
413 | return false | ||
414 | } | ||
415 | n.addBytes(int64(wr.DataSize())) | ||
416 | // If B depends on A and B continuously has data available but A | ||
417 | // does not, gradually increase the throttling limit to allow B to | ||
418 | // steal more and more bandwidth from A. | ||
419 | if openParent { | ||
420 | ws.writeThrottleLimit += 1024 | ||
421 | if ws.writeThrottleLimit < 0 { | ||
422 | ws.writeThrottleLimit = math.MaxInt32 | ||
423 | } | ||
424 | } else if ws.enableWriteThrottle { | ||
425 | ws.writeThrottleLimit = 1024 | ||
426 | } | ||
427 | return true | ||
428 | }) | ||
429 | return wr, ok | ||
430 | } | ||
431 | |||
432 | func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { | ||
433 | if maxSize == 0 { | ||
434 | return | ||
435 | } | ||
436 | if len(*list) == maxSize { | ||
437 | // Remove the oldest node, then shift left. | ||
438 | ws.removeNode((*list)[0]) | ||
439 | x := (*list)[1:] | ||
440 | copy(*list, x) | ||
441 | *list = (*list)[:len(x)] | ||
442 | } | ||
443 | *list = append(*list, n) | ||
444 | } | ||
445 | |||
446 | func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { | ||
447 | for k := n.kids; k != nil; k = k.next { | ||
448 | k.setParent(n.parent) | ||
449 | } | ||
450 | n.setParent(nil) | ||
451 | delete(ws.nodes, n.id) | ||
452 | } | ||
diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 0000000..36d7919 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go | |||
@@ -0,0 +1,72 @@ | |||
1 | // Copyright 2014 The Go Authors. All rights reserved. | ||
2 | // Use of this source code is governed by a BSD-style | ||
3 | // license that can be found in the LICENSE file. | ||
4 | |||
5 | package http2 | ||
6 | |||
7 | import "math" | ||
8 | |||
9 | // NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 | ||
10 | // priorities. Control frames like SETTINGS and PING are written before DATA | ||
11 | // frames, but if no control frames are queued and multiple streams have queued | ||
12 | // HEADERS or DATA frames, Pop selects a ready stream arbitrarily. | ||
13 | func NewRandomWriteScheduler() WriteScheduler { | ||
14 | return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} | ||
15 | } | ||
16 | |||
17 | type randomWriteScheduler struct { | ||
18 | // zero are frames not associated with a specific stream. | ||
19 | zero writeQueue | ||
20 | |||
21 | // sq contains the stream-specific queues, keyed by stream ID. | ||
22 | // When a stream is idle or closed, it's deleted from the map. | ||
23 | sq map[uint32]*writeQueue | ||
24 | |||
25 | // pool of empty queues for reuse. | ||
26 | queuePool writeQueuePool | ||
27 | } | ||
28 | |||
29 | func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { | ||
30 | // no-op: idle streams are not tracked | ||
31 | } | ||
32 | |||
33 | func (ws *randomWriteScheduler) CloseStream(streamID uint32) { | ||
34 | q, ok := ws.sq[streamID] | ||
35 | if !ok { | ||
36 | return | ||
37 | } | ||
38 | delete(ws.sq, streamID) | ||
39 | ws.queuePool.put(q) | ||
40 | } | ||
41 | |||
42 | func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { | ||
43 | // no-op: priorities are ignored | ||
44 | } | ||
45 | |||
46 | func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { | ||
47 | id := wr.StreamID() | ||
48 | if id == 0 { | ||
49 | ws.zero.push(wr) | ||
50 | return | ||
51 | } | ||
52 | q, ok := ws.sq[id] | ||
53 | if !ok { | ||
54 | q = ws.queuePool.get() | ||
55 | ws.sq[id] = q | ||
56 | } | ||
57 | q.push(wr) | ||
58 | } | ||
59 | |||
60 | func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { | ||
61 | // Control frames first. | ||
62 | if !ws.zero.empty() { | ||
63 | return ws.zero.shift(), true | ||
64 | } | ||
65 | // Iterate over all non-idle streams until finding one that can be consumed. | ||
66 | for _, q := range ws.sq { | ||
67 | if wr, ok := q.consume(math.MaxInt32); ok { | ||
68 | return wr, true | ||
69 | } | ||
70 | } | ||
71 | return FrameWriteRequest{}, false | ||
72 | } | ||