blob: 76a92e0ca6b6b8610e89a69e8b04191529e82240 [file] [log] [blame]
Matteo Scandoloa4285862020-12-01 18:10:10 -08001// Copyright 2015 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Transport code.
6
7package http2
8
9import (
10 "bufio"
11 "bytes"
12 "compress/gzip"
13 "context"
14 "crypto/rand"
15 "crypto/tls"
16 "errors"
17 "fmt"
18 "io"
19 "io/ioutil"
20 "log"
21 "math"
22 mathrand "math/rand"
23 "net"
24 "net/http"
25 "net/http/httptrace"
26 "net/textproto"
27 "sort"
28 "strconv"
29 "strings"
30 "sync"
31 "sync/atomic"
32 "time"
33
34 "golang.org/x/net/http/httpguts"
35 "golang.org/x/net/http2/hpack"
36 "golang.org/x/net/idna"
37)
38
39const (
40 // transportDefaultConnFlow is how many connection-level flow control
41 // tokens we give the server at start-up, past the default 64k.
42 transportDefaultConnFlow = 1 << 30
43
44 // transportDefaultStreamFlow is how many stream-level flow
45 // control tokens we announce to the peer, and how many bytes
46 // we buffer per stream.
47 transportDefaultStreamFlow = 4 << 20
48
49 // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
50 // a stream-level WINDOW_UPDATE for at a time.
51 transportDefaultStreamMinRefresh = 4 << 10
52
53 defaultUserAgent = "Go-http-client/2.0"
54)
55
56// Transport is an HTTP/2 Transport.
57//
58// A Transport internally caches connections to servers. It is safe
59// for concurrent use by multiple goroutines.
60type Transport struct {
61 // DialTLS specifies an optional dial function for creating
62 // TLS connections for requests.
63 //
64 // If DialTLS is nil, tls.Dial is used.
65 //
66 // If the returned net.Conn has a ConnectionState method like tls.Conn,
67 // it will be used to set http.Response.TLS.
68 DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
69
70 // TLSClientConfig specifies the TLS configuration to use with
71 // tls.Client. If nil, the default configuration is used.
72 TLSClientConfig *tls.Config
73
74 // ConnPool optionally specifies an alternate connection pool to use.
75 // If nil, the default is used.
76 ConnPool ClientConnPool
77
78 // DisableCompression, if true, prevents the Transport from
79 // requesting compression with an "Accept-Encoding: gzip"
80 // request header when the Request contains no existing
81 // Accept-Encoding value. If the Transport requests gzip on
82 // its own and gets a gzipped response, it's transparently
83 // decoded in the Response.Body. However, if the user
84 // explicitly requested gzip it is not automatically
85 // uncompressed.
86 DisableCompression bool
87
88 // AllowHTTP, if true, permits HTTP/2 requests using the insecure,
89 // plain-text "http" scheme. Note that this does not enable h2c support.
90 AllowHTTP bool
91
92 // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
93 // send in the initial settings frame. It is how many bytes
94 // of response headers are allowed. Unlike the http2 spec, zero here
95 // means to use a default limit (currently 10MB). If you actually
96 // want to advertise an unlimited value to the peer, Transport
97 // interprets the highest possible value here (0xffffffff or 1<<32-1)
98 // to mean no limit.
99 MaxHeaderListSize uint32
100
101 // StrictMaxConcurrentStreams controls whether the server's
102 // SETTINGS_MAX_CONCURRENT_STREAMS should be respected
103 // globally. If false, new TCP connections are created to the
104 // server as needed to keep each under the per-connection
105 // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the
106 // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as
107 // a global limit and callers of RoundTrip block when needed,
108 // waiting for their turn.
109 StrictMaxConcurrentStreams bool
110
111 // ReadIdleTimeout is the timeout after which a health check using ping
112 // frame will be carried out if no frame is received on the connection.
113 // Note that a ping response will is considered a received frame, so if
114 // there is no other traffic on the connection, the health check will
115 // be performed every ReadIdleTimeout interval.
116 // If zero, no health check is performed.
117 ReadIdleTimeout time.Duration
118
119 // PingTimeout is the timeout after which the connection will be closed
120 // if a response to Ping is not received.
121 // Defaults to 15s.
122 PingTimeout time.Duration
123
124 // t1, if non-nil, is the standard library Transport using
125 // this transport. Its settings are used (but not its
126 // RoundTrip method, etc).
127 t1 *http.Transport
128
129 connPoolOnce sync.Once
130 connPoolOrDef ClientConnPool // non-nil version of ConnPool
131}
132
133func (t *Transport) maxHeaderListSize() uint32 {
134 if t.MaxHeaderListSize == 0 {
135 return 10 << 20
136 }
137 if t.MaxHeaderListSize == 0xffffffff {
138 return 0
139 }
140 return t.MaxHeaderListSize
141}
142
143func (t *Transport) disableCompression() bool {
144 return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
145}
146
147func (t *Transport) pingTimeout() time.Duration {
148 if t.PingTimeout == 0 {
149 return 15 * time.Second
150 }
151 return t.PingTimeout
152
153}
154
155// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
156// It returns an error if t1 has already been HTTP/2-enabled.
157func ConfigureTransport(t1 *http.Transport) error {
158 _, err := configureTransport(t1)
159 return err
160}
161
162func configureTransport(t1 *http.Transport) (*Transport, error) {
163 connPool := new(clientConnPool)
164 t2 := &Transport{
165 ConnPool: noDialClientConnPool{connPool},
166 t1: t1,
167 }
168 connPool.t = t2
169 if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
170 return nil, err
171 }
172 if t1.TLSClientConfig == nil {
173 t1.TLSClientConfig = new(tls.Config)
174 }
175 if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
176 t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
177 }
178 if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
179 t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
180 }
181 upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
182 addr := authorityAddr("https", authority)
183 if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
184 go c.Close()
185 return erringRoundTripper{err}
186 } else if !used {
187 // Turns out we don't need this c.
188 // For example, two goroutines made requests to the same host
189 // at the same time, both kicking off TCP dials. (since protocol
190 // was unknown)
191 go c.Close()
192 }
193 return t2
194 }
195 if m := t1.TLSNextProto; len(m) == 0 {
196 t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
197 "h2": upgradeFn,
198 }
199 } else {
200 m["h2"] = upgradeFn
201 }
202 return t2, nil
203}
204
205func (t *Transport) connPool() ClientConnPool {
206 t.connPoolOnce.Do(t.initConnPool)
207 return t.connPoolOrDef
208}
209
210func (t *Transport) initConnPool() {
211 if t.ConnPool != nil {
212 t.connPoolOrDef = t.ConnPool
213 } else {
214 t.connPoolOrDef = &clientConnPool{t: t}
215 }
216}
217
218// ClientConn is the state of a single HTTP/2 client connection to an
219// HTTP/2 server.
220type ClientConn struct {
221 t *Transport
222 tconn net.Conn // usually *tls.Conn, except specialized impls
223 tlsState *tls.ConnectionState // nil only for specialized impls
224 reused uint32 // whether conn is being reused; atomic
225 singleUse bool // whether being used for a single http.Request
226
227 // readLoop goroutine fields:
228 readerDone chan struct{} // closed on error
229 readerErr error // set before readerDone is closed
230
231 idleTimeout time.Duration // or 0 for never
232 idleTimer *time.Timer
233
234 mu sync.Mutex // guards following
235 cond *sync.Cond // hold mu; broadcast on flow/closed changes
236 flow flow // our conn-level flow control quota (cs.flow is per stream)
237 inflow flow // peer's conn-level flow control
238 closing bool
239 closed bool
240 wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
241 goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
242 goAwayDebug string // goAway frame's debug data, retained as a string
243 streams map[uint32]*clientStream // client-initiated
244 nextStreamID uint32
245 pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
246 pings map[[8]byte]chan struct{} // in flight ping data to notification channel
247 bw *bufio.Writer
248 br *bufio.Reader
249 fr *Framer
250 lastActive time.Time
251 lastIdle time.Time // time last idle
252 // Settings from peer: (also guarded by mu)
253 maxFrameSize uint32
254 maxConcurrentStreams uint32
255 peerMaxHeaderListSize uint64
256 initialWindowSize uint32
257
258 hbuf bytes.Buffer // HPACK encoder writes into this
259 henc *hpack.Encoder
260 freeBuf [][]byte
261
262 wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
263 werr error // first write error that has occurred
264}
265
266// clientStream is the state for a single HTTP/2 stream. One of these
267// is created for each Transport.RoundTrip call.
268type clientStream struct {
269 cc *ClientConn
270 req *http.Request
271 trace *httptrace.ClientTrace // or nil
272 ID uint32
273 resc chan resAndError
274 bufPipe pipe // buffered pipe with the flow-controlled response payload
275 startedWrite bool // started request body write; guarded by cc.mu
276 requestedGzip bool
277 on100 func() // optional code to run if get a 100 continue response
278
279 flow flow // guarded by cc.mu
280 inflow flow // guarded by cc.mu
281 bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
282 readErr error // sticky read error; owned by transportResponseBody.Read
283 stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
284 didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu
285
286 peerReset chan struct{} // closed on peer reset
287 resetErr error // populated before peerReset is closed
288
289 done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
290
291 // owned by clientConnReadLoop:
292 firstByte bool // got the first response byte
293 pastHeaders bool // got first MetaHeadersFrame (actual headers)
294 pastTrailers bool // got optional second MetaHeadersFrame (trailers)
295 num1xx uint8 // number of 1xx responses seen
296
297 trailer http.Header // accumulated trailers
298 resTrailer *http.Header // client's Response.Trailer
299}
300
301// awaitRequestCancel waits for the user to cancel a request or for the done
302// channel to be signaled. A non-nil error is returned only if the request was
303// canceled.
304func awaitRequestCancel(req *http.Request, done <-chan struct{}) error {
305 ctx := req.Context()
306 if req.Cancel == nil && ctx.Done() == nil {
307 return nil
308 }
309 select {
310 case <-req.Cancel:
311 return errRequestCanceled
312 case <-ctx.Done():
313 return ctx.Err()
314 case <-done:
315 return nil
316 }
317}
318
319var got1xxFuncForTests func(int, textproto.MIMEHeader) error
320
321// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func,
322// if any. It returns nil if not set or if the Go version is too old.
323func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error {
324 if fn := got1xxFuncForTests; fn != nil {
325 return fn
326 }
327 return traceGot1xxResponseFunc(cs.trace)
328}
329
330// awaitRequestCancel waits for the user to cancel a request, its context to
331// expire, or for the request to be done (any way it might be removed from the
332// cc.streams map: peer reset, successful completion, TCP connection breakage,
333// etc). If the request is canceled, then cs will be canceled and closed.
334func (cs *clientStream) awaitRequestCancel(req *http.Request) {
335 if err := awaitRequestCancel(req, cs.done); err != nil {
336 cs.cancelStream()
337 cs.bufPipe.CloseWithError(err)
338 }
339}
340
341func (cs *clientStream) cancelStream() {
342 cc := cs.cc
343 cc.mu.Lock()
344 didReset := cs.didReset
345 cs.didReset = true
346 cc.mu.Unlock()
347
348 if !didReset {
349 cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
350 cc.forgetStreamID(cs.ID)
351 }
352}
353
354// checkResetOrDone reports any error sent in a RST_STREAM frame by the
355// server, or errStreamClosed if the stream is complete.
356func (cs *clientStream) checkResetOrDone() error {
357 select {
358 case <-cs.peerReset:
359 return cs.resetErr
360 case <-cs.done:
361 return errStreamClosed
362 default:
363 return nil
364 }
365}
366
367func (cs *clientStream) getStartedWrite() bool {
368 cc := cs.cc
369 cc.mu.Lock()
370 defer cc.mu.Unlock()
371 return cs.startedWrite
372}
373
374func (cs *clientStream) abortRequestBodyWrite(err error) {
375 if err == nil {
376 panic("nil error")
377 }
378 cc := cs.cc
379 cc.mu.Lock()
380 cs.stopReqBody = err
381 cc.cond.Broadcast()
382 cc.mu.Unlock()
383}
384
385type stickyErrWriter struct {
386 w io.Writer
387 err *error
388}
389
390func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
391 if *sew.err != nil {
392 return 0, *sew.err
393 }
394 n, err = sew.w.Write(p)
395 *sew.err = err
396 return
397}
398
399// noCachedConnError is the concrete type of ErrNoCachedConn, which
400// needs to be detected by net/http regardless of whether it's its
401// bundled version (in h2_bundle.go with a rewritten type name) or
402// from a user's x/net/http2. As such, as it has a unique method name
403// (IsHTTP2NoCachedConnError) that net/http sniffs for via func
404// isNoCachedConnError.
405type noCachedConnError struct{}
406
407func (noCachedConnError) IsHTTP2NoCachedConnError() {}
408func (noCachedConnError) Error() string { return "http2: no cached connection was available" }
409
410// isNoCachedConnError reports whether err is of type noCachedConnError
411// or its equivalent renamed type in net/http2's h2_bundle.go. Both types
412// may coexist in the same running program.
413func isNoCachedConnError(err error) bool {
414 _, ok := err.(interface{ IsHTTP2NoCachedConnError() })
415 return ok
416}
417
418var ErrNoCachedConn error = noCachedConnError{}
419
420// RoundTripOpt are options for the Transport.RoundTripOpt method.
421type RoundTripOpt struct {
422 // OnlyCachedConn controls whether RoundTripOpt may
423 // create a new TCP connection. If set true and
424 // no cached connection is available, RoundTripOpt
425 // will return ErrNoCachedConn.
426 OnlyCachedConn bool
427}
428
429func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
430 return t.RoundTripOpt(req, RoundTripOpt{})
431}
432
433// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
434// and returns a host:port. The port 443 is added if needed.
435func authorityAddr(scheme string, authority string) (addr string) {
436 host, port, err := net.SplitHostPort(authority)
437 if err != nil { // authority didn't have a port
438 port = "443"
439 if scheme == "http" {
440 port = "80"
441 }
442 host = authority
443 }
444 if a, err := idna.ToASCII(host); err == nil {
445 host = a
446 }
447 // IPv6 address literal, without a port:
448 if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
449 return host + ":" + port
450 }
451 return net.JoinHostPort(host, port)
452}
453
454// RoundTripOpt is like RoundTrip, but takes options.
455func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
456 if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
457 return nil, errors.New("http2: unsupported scheme")
458 }
459
460 addr := authorityAddr(req.URL.Scheme, req.URL.Host)
461 for retry := 0; ; retry++ {
462 cc, err := t.connPool().GetClientConn(req, addr)
463 if err != nil {
464 t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
465 return nil, err
466 }
467 reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
468 traceGotConn(req, cc, reused)
469 res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req)
470 if err != nil && retry <= 6 {
471 if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil {
472 // After the first retry, do exponential backoff with 10% jitter.
473 if retry == 0 {
474 continue
475 }
476 backoff := float64(uint(1) << (uint(retry) - 1))
477 backoff += backoff * (0.1 * mathrand.Float64())
478 select {
479 case <-time.After(time.Second * time.Duration(backoff)):
480 continue
481 case <-req.Context().Done():
482 return nil, req.Context().Err()
483 }
484 }
485 }
486 if err != nil {
487 t.vlogf("RoundTrip failure: %v", err)
488 return nil, err
489 }
490 return res, nil
491 }
492}
493
494// CloseIdleConnections closes any connections which were previously
495// connected from previous requests but are now sitting idle.
496// It does not interrupt any connections currently in use.
497func (t *Transport) CloseIdleConnections() {
498 if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok {
499 cp.closeIdleConnections()
500 }
501}
502
503var (
504 errClientConnClosed = errors.New("http2: client conn is closed")
505 errClientConnUnusable = errors.New("http2: client conn not usable")
506 errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
507)
508
509// shouldRetryRequest is called by RoundTrip when a request fails to get
510// response headers. It is always called with a non-nil error.
511// It returns either a request to retry (either the same request, or a
512// modified clone), or an error if the request can't be replayed.
513func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) {
514 if !canRetryError(err) {
515 return nil, err
516 }
517 // If the Body is nil (or http.NoBody), it's safe to reuse
518 // this request and its Body.
519 if req.Body == nil || req.Body == http.NoBody {
520 return req, nil
521 }
522
523 // If the request body can be reset back to its original
524 // state via the optional req.GetBody, do that.
525 if req.GetBody != nil {
526 // TODO: consider a req.Body.Close here? or audit that all caller paths do?
527 body, err := req.GetBody()
528 if err != nil {
529 return nil, err
530 }
531 newReq := *req
532 newReq.Body = body
533 return &newReq, nil
534 }
535
536 // The Request.Body can't reset back to the beginning, but we
537 // don't seem to have started to read from it yet, so reuse
538 // the request directly. The "afterBodyWrite" means the
539 // bodyWrite process has started, which becomes true before
540 // the first Read.
541 if !afterBodyWrite {
542 return req, nil
543 }
544
545 return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
546}
547
548func canRetryError(err error) bool {
549 if err == errClientConnUnusable || err == errClientConnGotGoAway {
550 return true
551 }
552 if se, ok := err.(StreamError); ok {
553 return se.Code == ErrCodeRefusedStream
554 }
555 return false
556}
557
558func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
559 host, _, err := net.SplitHostPort(addr)
560 if err != nil {
561 return nil, err
562 }
563 tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host))
564 if err != nil {
565 return nil, err
566 }
567 return t.newClientConn(tconn, singleUse)
568}
569
570func (t *Transport) newTLSConfig(host string) *tls.Config {
571 cfg := new(tls.Config)
572 if t.TLSClientConfig != nil {
573 *cfg = *t.TLSClientConfig.Clone()
574 }
575 if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
576 cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
577 }
578 if cfg.ServerName == "" {
579 cfg.ServerName = host
580 }
581 return cfg
582}
583
584func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) {
585 if t.DialTLS != nil {
586 return t.DialTLS
587 }
588 return t.dialTLSDefault
589}
590
591func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) {
592 cn, err := tls.Dial(network, addr, cfg)
593 if err != nil {
594 return nil, err
595 }
596 if err := cn.Handshake(); err != nil {
597 return nil, err
598 }
599 if !cfg.InsecureSkipVerify {
600 if err := cn.VerifyHostname(cfg.ServerName); err != nil {
601 return nil, err
602 }
603 }
604 state := cn.ConnectionState()
605 if p := state.NegotiatedProtocol; p != NextProtoTLS {
606 return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
607 }
608 if !state.NegotiatedProtocolIsMutual {
609 return nil, errors.New("http2: could not negotiate protocol mutually")
610 }
611 return cn, nil
612}
613
614// disableKeepAlives reports whether connections should be closed as
615// soon as possible after handling the first request.
616func (t *Transport) disableKeepAlives() bool {
617 return t.t1 != nil && t.t1.DisableKeepAlives
618}
619
620func (t *Transport) expectContinueTimeout() time.Duration {
621 if t.t1 == nil {
622 return 0
623 }
624 return t.t1.ExpectContinueTimeout
625}
626
627func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
628 return t.newClientConn(c, t.disableKeepAlives())
629}
630
631func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
632 cc := &ClientConn{
633 t: t,
634 tconn: c,
635 readerDone: make(chan struct{}),
636 nextStreamID: 1,
637 maxFrameSize: 16 << 10, // spec default
638 initialWindowSize: 65535, // spec default
639 maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
640 peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
641 streams: make(map[uint32]*clientStream),
642 singleUse: singleUse,
643 wantSettingsAck: true,
644 pings: make(map[[8]byte]chan struct{}),
645 }
646 if d := t.idleConnTimeout(); d != 0 {
647 cc.idleTimeout = d
648 cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
649 }
650 if VerboseLogs {
651 t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
652 }
653
654 cc.cond = sync.NewCond(&cc.mu)
655 cc.flow.add(int32(initialWindowSize))
656
657 // TODO: adjust this writer size to account for frame size +
658 // MTU + crypto/tls record padding.
659 cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
660 cc.br = bufio.NewReader(c)
661 cc.fr = NewFramer(cc.bw, cc.br)
662 cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
663 cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
664
665 // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
666 // henc in response to SETTINGS frames?
667 cc.henc = hpack.NewEncoder(&cc.hbuf)
668
669 if t.AllowHTTP {
670 cc.nextStreamID = 3
671 }
672
673 if cs, ok := c.(connectionStater); ok {
674 state := cs.ConnectionState()
675 cc.tlsState = &state
676 }
677
678 initialSettings := []Setting{
679 {ID: SettingEnablePush, Val: 0},
680 {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
681 }
682 if max := t.maxHeaderListSize(); max != 0 {
683 initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
684 }
685
686 cc.bw.Write(clientPreface)
687 cc.fr.WriteSettings(initialSettings...)
688 cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
689 cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
690 cc.bw.Flush()
691 if cc.werr != nil {
692 return nil, cc.werr
693 }
694
695 go cc.readLoop()
696 return cc, nil
697}
698
699func (cc *ClientConn) healthCheck() {
700 pingTimeout := cc.t.pingTimeout()
701 // We don't need to periodically ping in the health check, because the readLoop of ClientConn will
702 // trigger the healthCheck again if there is no frame received.
703 ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
704 defer cancel()
705 err := cc.Ping(ctx)
706 if err != nil {
707 cc.closeForLostPing()
708 cc.t.connPool().MarkDead(cc)
709 return
710 }
711}
712
713func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
714 cc.mu.Lock()
715 defer cc.mu.Unlock()
716
717 old := cc.goAway
718 cc.goAway = f
719
720 // Merge the previous and current GoAway error frames.
721 if cc.goAwayDebug == "" {
722 cc.goAwayDebug = string(f.DebugData())
723 }
724 if old != nil && old.ErrCode != ErrCodeNo {
725 cc.goAway.ErrCode = old.ErrCode
726 }
727 last := f.LastStreamID
728 for streamID, cs := range cc.streams {
729 if streamID > last {
730 select {
731 case cs.resc <- resAndError{err: errClientConnGotGoAway}:
732 default:
733 }
734 }
735 }
736}
737
738// CanTakeNewRequest reports whether the connection can take a new request,
739// meaning it has not been closed or received or sent a GOAWAY.
740func (cc *ClientConn) CanTakeNewRequest() bool {
741 cc.mu.Lock()
742 defer cc.mu.Unlock()
743 return cc.canTakeNewRequestLocked()
744}
745
746// clientConnIdleState describes the suitability of a client
747// connection to initiate a new RoundTrip request.
748type clientConnIdleState struct {
749 canTakeNewRequest bool
750 freshConn bool // whether it's unused by any previous request
751}
752
753func (cc *ClientConn) idleState() clientConnIdleState {
754 cc.mu.Lock()
755 defer cc.mu.Unlock()
756 return cc.idleStateLocked()
757}
758
759func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
760 if cc.singleUse && cc.nextStreamID > 1 {
761 return
762 }
763 var maxConcurrentOkay bool
764 if cc.t.StrictMaxConcurrentStreams {
765 // We'll tell the caller we can take a new request to
766 // prevent the caller from dialing a new TCP
767 // connection, but then we'll block later before
768 // writing it.
769 maxConcurrentOkay = true
770 } else {
771 maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams)
772 }
773
774 st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
775 int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
776 !cc.tooIdleLocked()
777 st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
778 return
779}
780
781func (cc *ClientConn) canTakeNewRequestLocked() bool {
782 st := cc.idleStateLocked()
783 return st.canTakeNewRequest
784}
785
786// tooIdleLocked reports whether this connection has been been sitting idle
787// for too much wall time.
788func (cc *ClientConn) tooIdleLocked() bool {
789 // The Round(0) strips the monontonic clock reading so the
790 // times are compared based on their wall time. We don't want
791 // to reuse a connection that's been sitting idle during
792 // VM/laptop suspend if monotonic time was also frozen.
793 return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
794}
795
796// onIdleTimeout is called from a time.AfterFunc goroutine. It will
797// only be called when we're idle, but because we're coming from a new
798// goroutine, there could be a new request coming in at the same time,
799// so this simply calls the synchronized closeIfIdle to shut down this
800// connection. The timer could just call closeIfIdle, but this is more
801// clear.
802func (cc *ClientConn) onIdleTimeout() {
803 cc.closeIfIdle()
804}
805
806func (cc *ClientConn) closeIfIdle() {
807 cc.mu.Lock()
808 if len(cc.streams) > 0 {
809 cc.mu.Unlock()
810 return
811 }
812 cc.closed = true
813 nextID := cc.nextStreamID
814 // TODO: do clients send GOAWAY too? maybe? Just Close:
815 cc.mu.Unlock()
816
817 if VerboseLogs {
818 cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
819 }
820 cc.tconn.Close()
821}
822
823var shutdownEnterWaitStateHook = func() {}
824
825// Shutdown gracefully close the client connection, waiting for running streams to complete.
826func (cc *ClientConn) Shutdown(ctx context.Context) error {
827 if err := cc.sendGoAway(); err != nil {
828 return err
829 }
830 // Wait for all in-flight streams to complete or connection to close
831 done := make(chan error, 1)
832 cancelled := false // guarded by cc.mu
833 go func() {
834 cc.mu.Lock()
835 defer cc.mu.Unlock()
836 for {
837 if len(cc.streams) == 0 || cc.closed {
838 cc.closed = true
839 done <- cc.tconn.Close()
840 break
841 }
842 if cancelled {
843 break
844 }
845 cc.cond.Wait()
846 }
847 }()
848 shutdownEnterWaitStateHook()
849 select {
850 case err := <-done:
851 return err
852 case <-ctx.Done():
853 cc.mu.Lock()
854 // Free the goroutine above
855 cancelled = true
856 cc.cond.Broadcast()
857 cc.mu.Unlock()
858 return ctx.Err()
859 }
860}
861
862func (cc *ClientConn) sendGoAway() error {
863 cc.mu.Lock()
864 defer cc.mu.Unlock()
865 cc.wmu.Lock()
866 defer cc.wmu.Unlock()
867 if cc.closing {
868 // GOAWAY sent already
869 return nil
870 }
871 // Send a graceful shutdown frame to server
872 maxStreamID := cc.nextStreamID
873 if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil {
874 return err
875 }
876 if err := cc.bw.Flush(); err != nil {
877 return err
878 }
879 // Prevent new requests
880 cc.closing = true
881 return nil
882}
883
884// closes the client connection immediately. In-flight requests are interrupted.
885// err is sent to streams.
886func (cc *ClientConn) closeForError(err error) error {
887 cc.mu.Lock()
888 defer cc.cond.Broadcast()
889 defer cc.mu.Unlock()
890 for id, cs := range cc.streams {
891 select {
892 case cs.resc <- resAndError{err: err}:
893 default:
894 }
895 cs.bufPipe.CloseWithError(err)
896 delete(cc.streams, id)
897 }
898 cc.closed = true
899 return cc.tconn.Close()
900}
901
902// Close closes the client connection immediately.
903//
904// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
905func (cc *ClientConn) Close() error {
906 err := errors.New("http2: client connection force closed via ClientConn.Close")
907 return cc.closeForError(err)
908}
909
910// closes the client connection immediately. In-flight requests are interrupted.
911func (cc *ClientConn) closeForLostPing() error {
912 err := errors.New("http2: client connection lost")
913 return cc.closeForError(err)
914}
915
916const maxAllocFrameSize = 512 << 10
917
918// frameBuffer returns a scratch buffer suitable for writing DATA frames.
919// They're capped at the min of the peer's max frame size or 512KB
920// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
921// bufers.
922func (cc *ClientConn) frameScratchBuffer() []byte {
923 cc.mu.Lock()
924 size := cc.maxFrameSize
925 if size > maxAllocFrameSize {
926 size = maxAllocFrameSize
927 }
928 for i, buf := range cc.freeBuf {
929 if len(buf) >= int(size) {
930 cc.freeBuf[i] = nil
931 cc.mu.Unlock()
932 return buf[:size]
933 }
934 }
935 cc.mu.Unlock()
936 return make([]byte, size)
937}
938
939func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
940 cc.mu.Lock()
941 defer cc.mu.Unlock()
942 const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
943 if len(cc.freeBuf) < maxBufs {
944 cc.freeBuf = append(cc.freeBuf, buf)
945 return
946 }
947 for i, old := range cc.freeBuf {
948 if old == nil {
949 cc.freeBuf[i] = buf
950 return
951 }
952 }
953 // forget about it.
954}
955
956// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
957// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
958var errRequestCanceled = errors.New("net/http: request canceled")
959
960func commaSeparatedTrailers(req *http.Request) (string, error) {
961 keys := make([]string, 0, len(req.Trailer))
962 for k := range req.Trailer {
963 k = http.CanonicalHeaderKey(k)
964 switch k {
965 case "Transfer-Encoding", "Trailer", "Content-Length":
966 return "", fmt.Errorf("invalid Trailer key %q", k)
967 }
968 keys = append(keys, k)
969 }
970 if len(keys) > 0 {
971 sort.Strings(keys)
972 return strings.Join(keys, ","), nil
973 }
974 return "", nil
975}
976
977func (cc *ClientConn) responseHeaderTimeout() time.Duration {
978 if cc.t.t1 != nil {
979 return cc.t.t1.ResponseHeaderTimeout
980 }
981 // No way to do this (yet?) with just an http2.Transport. Probably
982 // no need. Request.Cancel this is the new way. We only need to support
983 // this for compatibility with the old http.Transport fields when
984 // we're doing transparent http2.
985 return 0
986}
987
988// checkConnHeaders checks whether req has any invalid connection-level headers.
989// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
990// Certain headers are special-cased as okay but not transmitted later.
991func checkConnHeaders(req *http.Request) error {
992 if v := req.Header.Get("Upgrade"); v != "" {
993 return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
994 }
995 if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
996 return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
997 }
998 if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) {
999 return fmt.Errorf("http2: invalid Connection request header: %q", vv)
1000 }
1001 return nil
1002}
1003
1004// actualContentLength returns a sanitized version of
1005// req.ContentLength, where 0 actually means zero (not unknown) and -1
1006// means unknown.
1007func actualContentLength(req *http.Request) int64 {
1008 if req.Body == nil || req.Body == http.NoBody {
1009 return 0
1010 }
1011 if req.ContentLength != 0 {
1012 return req.ContentLength
1013 }
1014 return -1
1015}
1016
1017func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
1018 resp, _, err := cc.roundTrip(req)
1019 return resp, err
1020}
1021
1022func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) {
1023 if err := checkConnHeaders(req); err != nil {
1024 return nil, false, err
1025 }
1026 if cc.idleTimer != nil {
1027 cc.idleTimer.Stop()
1028 }
1029
1030 trailers, err := commaSeparatedTrailers(req)
1031 if err != nil {
1032 return nil, false, err
1033 }
1034 hasTrailers := trailers != ""
1035
1036 cc.mu.Lock()
1037 if err := cc.awaitOpenSlotForRequest(req); err != nil {
1038 cc.mu.Unlock()
1039 return nil, false, err
1040 }
1041
1042 body := req.Body
1043 contentLen := actualContentLength(req)
1044 hasBody := contentLen != 0
1045
1046 // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
1047 var requestedGzip bool
1048 if !cc.t.disableCompression() &&
1049 req.Header.Get("Accept-Encoding") == "" &&
1050 req.Header.Get("Range") == "" &&
1051 req.Method != "HEAD" {
1052 // Request gzip only, not deflate. Deflate is ambiguous and
1053 // not as universally supported anyway.
1054 // See: https://zlib.net/zlib_faq.html#faq39
1055 //
1056 // Note that we don't request this for HEAD requests,
1057 // due to a bug in nginx:
1058 // http://trac.nginx.org/nginx/ticket/358
1059 // https://golang.org/issue/5522
1060 //
1061 // We don't request gzip if the request is for a range, since
1062 // auto-decoding a portion of a gzipped document will just fail
1063 // anyway. See https://golang.org/issue/8923
1064 requestedGzip = true
1065 }
1066
1067 // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
1068 // sent by writeRequestBody below, along with any Trailers,
1069 // again in form HEADERS{1}, CONTINUATION{0,})
1070 hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
1071 if err != nil {
1072 cc.mu.Unlock()
1073 return nil, false, err
1074 }
1075
1076 cs := cc.newStream()
1077 cs.req = req
1078 cs.trace = httptrace.ContextClientTrace(req.Context())
1079 cs.requestedGzip = requestedGzip
1080 bodyWriter := cc.t.getBodyWriterState(cs, body)
1081 cs.on100 = bodyWriter.on100
1082
1083 cc.wmu.Lock()
1084 endStream := !hasBody && !hasTrailers
1085 werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
1086 cc.wmu.Unlock()
1087 traceWroteHeaders(cs.trace)
1088 cc.mu.Unlock()
1089
1090 if werr != nil {
1091 if hasBody {
1092 req.Body.Close() // per RoundTripper contract
1093 bodyWriter.cancel()
1094 }
1095 cc.forgetStreamID(cs.ID)
1096 // Don't bother sending a RST_STREAM (our write already failed;
1097 // no need to keep writing)
1098 traceWroteRequest(cs.trace, werr)
1099 return nil, false, werr
1100 }
1101
1102 var respHeaderTimer <-chan time.Time
1103 if hasBody {
1104 bodyWriter.scheduleBodyWrite()
1105 } else {
1106 traceWroteRequest(cs.trace, nil)
1107 if d := cc.responseHeaderTimeout(); d != 0 {
1108 timer := time.NewTimer(d)
1109 defer timer.Stop()
1110 respHeaderTimer = timer.C
1111 }
1112 }
1113
1114 readLoopResCh := cs.resc
1115 bodyWritten := false
1116 ctx := req.Context()
1117
1118 handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) {
1119 res := re.res
1120 if re.err != nil || res.StatusCode > 299 {
1121 // On error or status code 3xx, 4xx, 5xx, etc abort any
1122 // ongoing write, assuming that the server doesn't care
1123 // about our request body. If the server replied with 1xx or
1124 // 2xx, however, then assume the server DOES potentially
1125 // want our body (e.g. full-duplex streaming:
1126 // golang.org/issue/13444). If it turns out the server
1127 // doesn't, they'll RST_STREAM us soon enough. This is a
1128 // heuristic to avoid adding knobs to Transport. Hopefully
1129 // we can keep it.
1130 bodyWriter.cancel()
1131 cs.abortRequestBodyWrite(errStopReqBodyWrite)
1132 }
1133 if re.err != nil {
1134 cc.forgetStreamID(cs.ID)
1135 return nil, cs.getStartedWrite(), re.err
1136 }
1137 res.Request = req
1138 res.TLS = cc.tlsState
1139 return res, false, nil
1140 }
1141
1142 for {
1143 select {
1144 case re := <-readLoopResCh:
1145 return handleReadLoopResponse(re)
1146 case <-respHeaderTimer:
1147 if !hasBody || bodyWritten {
1148 cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
1149 } else {
1150 bodyWriter.cancel()
1151 cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
1152 }
1153 cc.forgetStreamID(cs.ID)
1154 return nil, cs.getStartedWrite(), errTimeout
1155 case <-ctx.Done():
1156 if !hasBody || bodyWritten {
1157 cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
1158 } else {
1159 bodyWriter.cancel()
1160 cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
1161 }
1162 cc.forgetStreamID(cs.ID)
1163 return nil, cs.getStartedWrite(), ctx.Err()
1164 case <-req.Cancel:
1165 if !hasBody || bodyWritten {
1166 cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
1167 } else {
1168 bodyWriter.cancel()
1169 cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
1170 }
1171 cc.forgetStreamID(cs.ID)
1172 return nil, cs.getStartedWrite(), errRequestCanceled
1173 case <-cs.peerReset:
1174 // processResetStream already removed the
1175 // stream from the streams map; no need for
1176 // forgetStreamID.
1177 return nil, cs.getStartedWrite(), cs.resetErr
1178 case err := <-bodyWriter.resc:
1179 // Prefer the read loop's response, if available. Issue 16102.
1180 select {
1181 case re := <-readLoopResCh:
1182 return handleReadLoopResponse(re)
1183 default:
1184 }
1185 if err != nil {
1186 cc.forgetStreamID(cs.ID)
1187 return nil, cs.getStartedWrite(), err
1188 }
1189 bodyWritten = true
1190 if d := cc.responseHeaderTimeout(); d != 0 {
1191 timer := time.NewTimer(d)
1192 defer timer.Stop()
1193 respHeaderTimer = timer.C
1194 }
1195 }
1196 }
1197}
1198
1199// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams.
1200// Must hold cc.mu.
1201func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
1202 var waitingForConn chan struct{}
1203 var waitingForConnErr error // guarded by cc.mu
1204 for {
1205 cc.lastActive = time.Now()
1206 if cc.closed || !cc.canTakeNewRequestLocked() {
1207 if waitingForConn != nil {
1208 close(waitingForConn)
1209 }
1210 return errClientConnUnusable
1211 }
1212 cc.lastIdle = time.Time{}
1213 if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
1214 if waitingForConn != nil {
1215 close(waitingForConn)
1216 }
1217 return nil
1218 }
1219 // Unfortunately, we cannot wait on a condition variable and channel at
1220 // the same time, so instead, we spin up a goroutine to check if the
1221 // request is canceled while we wait for a slot to open in the connection.
1222 if waitingForConn == nil {
1223 waitingForConn = make(chan struct{})
1224 go func() {
1225 if err := awaitRequestCancel(req, waitingForConn); err != nil {
1226 cc.mu.Lock()
1227 waitingForConnErr = err
1228 cc.cond.Broadcast()
1229 cc.mu.Unlock()
1230 }
1231 }()
1232 }
1233 cc.pendingRequests++
1234 cc.cond.Wait()
1235 cc.pendingRequests--
1236 if waitingForConnErr != nil {
1237 return waitingForConnErr
1238 }
1239 }
1240}
1241
1242// requires cc.wmu be held
1243func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error {
1244 first := true // first frame written (HEADERS is first, then CONTINUATION)
1245 for len(hdrs) > 0 && cc.werr == nil {
1246 chunk := hdrs
1247 if len(chunk) > maxFrameSize {
1248 chunk = chunk[:maxFrameSize]
1249 }
1250 hdrs = hdrs[len(chunk):]
1251 endHeaders := len(hdrs) == 0
1252 if first {
1253 cc.fr.WriteHeaders(HeadersFrameParam{
1254 StreamID: streamID,
1255 BlockFragment: chunk,
1256 EndStream: endStream,
1257 EndHeaders: endHeaders,
1258 })
1259 first = false
1260 } else {
1261 cc.fr.WriteContinuation(streamID, endHeaders, chunk)
1262 }
1263 }
1264 // TODO(bradfitz): this Flush could potentially block (as
1265 // could the WriteHeaders call(s) above), which means they
1266 // wouldn't respond to Request.Cancel being readable. That's
1267 // rare, but this should probably be in a goroutine.
1268 cc.bw.Flush()
1269 return cc.werr
1270}
1271
1272// internal error values; they don't escape to callers
1273var (
1274 // abort request body write; don't send cancel
1275 errStopReqBodyWrite = errors.New("http2: aborting request body write")
1276
1277 // abort request body write, but send stream reset of cancel.
1278 errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
1279
1280 errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
1281)
1282
1283func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
1284 cc := cs.cc
1285 sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
1286 buf := cc.frameScratchBuffer()
1287 defer cc.putFrameScratchBuffer(buf)
1288
1289 defer func() {
1290 traceWroteRequest(cs.trace, err)
1291 // TODO: write h12Compare test showing whether
1292 // Request.Body is closed by the Transport,
1293 // and in multiple cases: server replies <=299 and >299
1294 // while still writing request body
1295 cerr := bodyCloser.Close()
1296 if err == nil {
1297 err = cerr
1298 }
1299 }()
1300
1301 req := cs.req
1302 hasTrailers := req.Trailer != nil
1303 remainLen := actualContentLength(req)
1304 hasContentLen := remainLen != -1
1305
1306 var sawEOF bool
1307 for !sawEOF {
1308 n, err := body.Read(buf[:len(buf)-1])
1309 if hasContentLen {
1310 remainLen -= int64(n)
1311 if remainLen == 0 && err == nil {
1312 // The request body's Content-Length was predeclared and
1313 // we just finished reading it all, but the underlying io.Reader
1314 // returned the final chunk with a nil error (which is one of
1315 // the two valid things a Reader can do at EOF). Because we'd prefer
1316 // to send the END_STREAM bit early, double-check that we're actually
1317 // at EOF. Subsequent reads should return (0, EOF) at this point.
1318 // If either value is different, we return an error in one of two ways below.
1319 var n1 int
1320 n1, err = body.Read(buf[n:])
1321 remainLen -= int64(n1)
1322 }
1323 if remainLen < 0 {
1324 err = errReqBodyTooLong
1325 cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
1326 return err
1327 }
1328 }
1329 if err == io.EOF {
1330 sawEOF = true
1331 err = nil
1332 } else if err != nil {
1333 cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
1334 return err
1335 }
1336
1337 remain := buf[:n]
1338 for len(remain) > 0 && err == nil {
1339 var allowed int32
1340 allowed, err = cs.awaitFlowControl(len(remain))
1341 switch {
1342 case err == errStopReqBodyWrite:
1343 return err
1344 case err == errStopReqBodyWriteAndCancel:
1345 cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
1346 return err
1347 case err != nil:
1348 return err
1349 }
1350 cc.wmu.Lock()
1351 data := remain[:allowed]
1352 remain = remain[allowed:]
1353 sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
1354 err = cc.fr.WriteData(cs.ID, sentEnd, data)
1355 if err == nil {
1356 // TODO(bradfitz): this flush is for latency, not bandwidth.
1357 // Most requests won't need this. Make this opt-in or
1358 // opt-out? Use some heuristic on the body type? Nagel-like
1359 // timers? Based on 'n'? Only last chunk of this for loop,
1360 // unless flow control tokens are low? For now, always.
1361 // If we change this, see comment below.
1362 err = cc.bw.Flush()
1363 }
1364 cc.wmu.Unlock()
1365 }
1366 if err != nil {
1367 return err
1368 }
1369 }
1370
1371 if sentEnd {
1372 // Already sent END_STREAM (which implies we have no
1373 // trailers) and flushed, because currently all
1374 // WriteData frames above get a flush. So we're done.
1375 return nil
1376 }
1377
1378 var trls []byte
1379 if hasTrailers {
1380 cc.mu.Lock()
1381 trls, err = cc.encodeTrailers(req)
1382 cc.mu.Unlock()
1383 if err != nil {
1384 cc.writeStreamReset(cs.ID, ErrCodeInternal, err)
1385 cc.forgetStreamID(cs.ID)
1386 return err
1387 }
1388 }
1389
1390 cc.mu.Lock()
1391 maxFrameSize := int(cc.maxFrameSize)
1392 cc.mu.Unlock()
1393
1394 cc.wmu.Lock()
1395 defer cc.wmu.Unlock()
1396
1397 // Two ways to send END_STREAM: either with trailers, or
1398 // with an empty DATA frame.
1399 if len(trls) > 0 {
1400 err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls)
1401 } else {
1402 err = cc.fr.WriteData(cs.ID, true, nil)
1403 }
1404 if ferr := cc.bw.Flush(); ferr != nil && err == nil {
1405 err = ferr
1406 }
1407 return err
1408}
1409
1410// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
1411// control tokens from the server.
1412// It returns either the non-zero number of tokens taken or an error
1413// if the stream is dead.
1414func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
1415 cc := cs.cc
1416 cc.mu.Lock()
1417 defer cc.mu.Unlock()
1418 for {
1419 if cc.closed {
1420 return 0, errClientConnClosed
1421 }
1422 if cs.stopReqBody != nil {
1423 return 0, cs.stopReqBody
1424 }
1425 if err := cs.checkResetOrDone(); err != nil {
1426 return 0, err
1427 }
1428 if a := cs.flow.available(); a > 0 {
1429 take := a
1430 if int(take) > maxBytes {
1431
1432 take = int32(maxBytes) // can't truncate int; take is int32
1433 }
1434 if take > int32(cc.maxFrameSize) {
1435 take = int32(cc.maxFrameSize)
1436 }
1437 cs.flow.take(take)
1438 return take, nil
1439 }
1440 cc.cond.Wait()
1441 }
1442}
1443
1444// requires cc.mu be held.
1445func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
1446 cc.hbuf.Reset()
1447
1448 host := req.Host
1449 if host == "" {
1450 host = req.URL.Host
1451 }
1452 host, err := httpguts.PunycodeHostPort(host)
1453 if err != nil {
1454 return nil, err
1455 }
1456
1457 var path string
1458 if req.Method != "CONNECT" {
1459 path = req.URL.RequestURI()
1460 if !validPseudoPath(path) {
1461 orig := path
1462 path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
1463 if !validPseudoPath(path) {
1464 if req.URL.Opaque != "" {
1465 return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
1466 } else {
1467 return nil, fmt.Errorf("invalid request :path %q", orig)
1468 }
1469 }
1470 }
1471 }
1472
1473 // Check for any invalid headers and return an error before we
1474 // potentially pollute our hpack state. (We want to be able to
1475 // continue to reuse the hpack encoder for future requests)
1476 for k, vv := range req.Header {
1477 if !httpguts.ValidHeaderFieldName(k) {
1478 return nil, fmt.Errorf("invalid HTTP header name %q", k)
1479 }
1480 for _, v := range vv {
1481 if !httpguts.ValidHeaderFieldValue(v) {
1482 return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
1483 }
1484 }
1485 }
1486
1487 enumerateHeaders := func(f func(name, value string)) {
1488 // 8.1.2.3 Request Pseudo-Header Fields
1489 // The :path pseudo-header field includes the path and query parts of the
1490 // target URI (the path-absolute production and optionally a '?' character
1491 // followed by the query production (see Sections 3.3 and 3.4 of
1492 // [RFC3986]).
1493 f(":authority", host)
1494 m := req.Method
1495 if m == "" {
1496 m = http.MethodGet
1497 }
1498 f(":method", m)
1499 if req.Method != "CONNECT" {
1500 f(":path", path)
1501 f(":scheme", req.URL.Scheme)
1502 }
1503 if trailers != "" {
1504 f("trailer", trailers)
1505 }
1506
1507 var didUA bool
1508 for k, vv := range req.Header {
1509 if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") {
1510 // Host is :authority, already sent.
1511 // Content-Length is automatic, set below.
1512 continue
1513 } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") ||
1514 strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") ||
1515 strings.EqualFold(k, "keep-alive") {
1516 // Per 8.1.2.2 Connection-Specific Header
1517 // Fields, don't send connection-specific
1518 // fields. We have already checked if any
1519 // are error-worthy so just ignore the rest.
1520 continue
1521 } else if strings.EqualFold(k, "user-agent") {
1522 // Match Go's http1 behavior: at most one
1523 // User-Agent. If set to nil or empty string,
1524 // then omit it. Otherwise if not mentioned,
1525 // include the default (below).
1526 didUA = true
1527 if len(vv) < 1 {
1528 continue
1529 }
1530 vv = vv[:1]
1531 if vv[0] == "" {
1532 continue
1533 }
1534 } else if strings.EqualFold(k, "cookie") {
1535 // Per 8.1.2.5 To allow for better compression efficiency, the
1536 // Cookie header field MAY be split into separate header fields,
1537 // each with one or more cookie-pairs.
1538 for _, v := range vv {
1539 for {
1540 p := strings.IndexByte(v, ';')
1541 if p < 0 {
1542 break
1543 }
1544 f("cookie", v[:p])
1545 p++
1546 // strip space after semicolon if any.
1547 for p+1 <= len(v) && v[p] == ' ' {
1548 p++
1549 }
1550 v = v[p:]
1551 }
1552 if len(v) > 0 {
1553 f("cookie", v)
1554 }
1555 }
1556 continue
1557 }
1558
1559 for _, v := range vv {
1560 f(k, v)
1561 }
1562 }
1563 if shouldSendReqContentLength(req.Method, contentLength) {
1564 f("content-length", strconv.FormatInt(contentLength, 10))
1565 }
1566 if addGzipHeader {
1567 f("accept-encoding", "gzip")
1568 }
1569 if !didUA {
1570 f("user-agent", defaultUserAgent)
1571 }
1572 }
1573
1574 // Do a first pass over the headers counting bytes to ensure
1575 // we don't exceed cc.peerMaxHeaderListSize. This is done as a
1576 // separate pass before encoding the headers to prevent
1577 // modifying the hpack state.
1578 hlSize := uint64(0)
1579 enumerateHeaders(func(name, value string) {
1580 hf := hpack.HeaderField{Name: name, Value: value}
1581 hlSize += uint64(hf.Size())
1582 })
1583
1584 if hlSize > cc.peerMaxHeaderListSize {
1585 return nil, errRequestHeaderListSize
1586 }
1587
1588 trace := httptrace.ContextClientTrace(req.Context())
1589 traceHeaders := traceHasWroteHeaderField(trace)
1590
1591 // Header list size is ok. Write the headers.
1592 enumerateHeaders(func(name, value string) {
1593 name = strings.ToLower(name)
1594 cc.writeHeader(name, value)
1595 if traceHeaders {
1596 traceWroteHeaderField(trace, name, value)
1597 }
1598 })
1599
1600 return cc.hbuf.Bytes(), nil
1601}
1602
1603// shouldSendReqContentLength reports whether the http2.Transport should send
1604// a "content-length" request header. This logic is basically a copy of the net/http
1605// transferWriter.shouldSendContentLength.
1606// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
1607// -1 means unknown.
1608func shouldSendReqContentLength(method string, contentLength int64) bool {
1609 if contentLength > 0 {
1610 return true
1611 }
1612 if contentLength < 0 {
1613 return false
1614 }
1615 // For zero bodies, whether we send a content-length depends on the method.
1616 // It also kinda doesn't matter for http2 either way, with END_STREAM.
1617 switch method {
1618 case "POST", "PUT", "PATCH":
1619 return true
1620 default:
1621 return false
1622 }
1623}
1624
1625// requires cc.mu be held.
1626func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) {
1627 cc.hbuf.Reset()
1628
1629 hlSize := uint64(0)
1630 for k, vv := range req.Trailer {
1631 for _, v := range vv {
1632 hf := hpack.HeaderField{Name: k, Value: v}
1633 hlSize += uint64(hf.Size())
1634 }
1635 }
1636 if hlSize > cc.peerMaxHeaderListSize {
1637 return nil, errRequestHeaderListSize
1638 }
1639
1640 for k, vv := range req.Trailer {
1641 // Transfer-Encoding, etc.. have already been filtered at the
1642 // start of RoundTrip
1643 lowKey := strings.ToLower(k)
1644 for _, v := range vv {
1645 cc.writeHeader(lowKey, v)
1646 }
1647 }
1648 return cc.hbuf.Bytes(), nil
1649}
1650
1651func (cc *ClientConn) writeHeader(name, value string) {
1652 if VerboseLogs {
1653 log.Printf("http2: Transport encoding header %q = %q", name, value)
1654 }
1655 cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
1656}
1657
1658type resAndError struct {
1659 _ incomparable
1660 res *http.Response
1661 err error
1662}
1663
1664// requires cc.mu be held.
1665func (cc *ClientConn) newStream() *clientStream {
1666 cs := &clientStream{
1667 cc: cc,
1668 ID: cc.nextStreamID,
1669 resc: make(chan resAndError, 1),
1670 peerReset: make(chan struct{}),
1671 done: make(chan struct{}),
1672 }
1673 cs.flow.add(int32(cc.initialWindowSize))
1674 cs.flow.setConnFlow(&cc.flow)
1675 cs.inflow.add(transportDefaultStreamFlow)
1676 cs.inflow.setConnFlow(&cc.inflow)
1677 cc.nextStreamID += 2
1678 cc.streams[cs.ID] = cs
1679 return cs
1680}
1681
1682func (cc *ClientConn) forgetStreamID(id uint32) {
1683 cc.streamByID(id, true)
1684}
1685
1686func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
1687 cc.mu.Lock()
1688 defer cc.mu.Unlock()
1689 cs := cc.streams[id]
1690 if andRemove && cs != nil && !cc.closed {
1691 cc.lastActive = time.Now()
1692 delete(cc.streams, id)
1693 if len(cc.streams) == 0 && cc.idleTimer != nil {
1694 cc.idleTimer.Reset(cc.idleTimeout)
1695 cc.lastIdle = time.Now()
1696 }
1697 close(cs.done)
1698 // Wake up checkResetOrDone via clientStream.awaitFlowControl and
1699 // wake up RoundTrip if there is a pending request.
1700 cc.cond.Broadcast()
1701 }
1702 return cs
1703}
1704
1705// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
1706type clientConnReadLoop struct {
1707 _ incomparable
1708 cc *ClientConn
1709 closeWhenIdle bool
1710}
1711
1712// readLoop runs in its own goroutine and reads and dispatches frames.
1713func (cc *ClientConn) readLoop() {
1714 rl := &clientConnReadLoop{cc: cc}
1715 defer rl.cleanup()
1716 cc.readerErr = rl.run()
1717 if ce, ok := cc.readerErr.(ConnectionError); ok {
1718 cc.wmu.Lock()
1719 cc.fr.WriteGoAway(0, ErrCode(ce), nil)
1720 cc.wmu.Unlock()
1721 }
1722}
1723
1724// GoAwayError is returned by the Transport when the server closes the
1725// TCP connection after sending a GOAWAY frame.
1726type GoAwayError struct {
1727 LastStreamID uint32
1728 ErrCode ErrCode
1729 DebugData string
1730}
1731
1732func (e GoAwayError) Error() string {
1733 return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
1734 e.LastStreamID, e.ErrCode, e.DebugData)
1735}
1736
1737func isEOFOrNetReadError(err error) bool {
1738 if err == io.EOF {
1739 return true
1740 }
1741 ne, ok := err.(*net.OpError)
1742 return ok && ne.Op == "read"
1743}
1744
1745func (rl *clientConnReadLoop) cleanup() {
1746 cc := rl.cc
1747 defer cc.tconn.Close()
1748 defer cc.t.connPool().MarkDead(cc)
1749 defer close(cc.readerDone)
1750
1751 if cc.idleTimer != nil {
1752 cc.idleTimer.Stop()
1753 }
1754
1755 // Close any response bodies if the server closes prematurely.
1756 // TODO: also do this if we've written the headers but not
1757 // gotten a response yet.
1758 err := cc.readerErr
1759 cc.mu.Lock()
1760 if cc.goAway != nil && isEOFOrNetReadError(err) {
1761 err = GoAwayError{
1762 LastStreamID: cc.goAway.LastStreamID,
1763 ErrCode: cc.goAway.ErrCode,
1764 DebugData: cc.goAwayDebug,
1765 }
1766 } else if err == io.EOF {
1767 err = io.ErrUnexpectedEOF
1768 }
1769 for _, cs := range cc.streams {
1770 cs.bufPipe.CloseWithError(err) // no-op if already closed
1771 select {
1772 case cs.resc <- resAndError{err: err}:
1773 default:
1774 }
1775 close(cs.done)
1776 }
1777 cc.closed = true
1778 cc.cond.Broadcast()
1779 cc.mu.Unlock()
1780}
1781
1782func (rl *clientConnReadLoop) run() error {
1783 cc := rl.cc
1784 rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
1785 gotReply := false // ever saw a HEADERS reply
1786 gotSettings := false
1787 readIdleTimeout := cc.t.ReadIdleTimeout
1788 var t *time.Timer
1789 if readIdleTimeout != 0 {
1790 t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
1791 defer t.Stop()
1792 }
1793 for {
1794 f, err := cc.fr.ReadFrame()
1795 if t != nil {
1796 t.Reset(readIdleTimeout)
1797 }
1798 if err != nil {
1799 cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
1800 }
1801 if se, ok := err.(StreamError); ok {
1802 if cs := cc.streamByID(se.StreamID, false); cs != nil {
1803 cs.cc.writeStreamReset(cs.ID, se.Code, err)
1804 cs.cc.forgetStreamID(cs.ID)
1805 if se.Cause == nil {
1806 se.Cause = cc.fr.errDetail
1807 }
1808 rl.endStreamError(cs, se)
1809 }
1810 continue
1811 } else if err != nil {
1812 return err
1813 }
1814 if VerboseLogs {
1815 cc.vlogf("http2: Transport received %s", summarizeFrame(f))
1816 }
1817 if !gotSettings {
1818 if _, ok := f.(*SettingsFrame); !ok {
1819 cc.logf("protocol error: received %T before a SETTINGS frame", f)
1820 return ConnectionError(ErrCodeProtocol)
1821 }
1822 gotSettings = true
1823 }
1824 maybeIdle := false // whether frame might transition us to idle
1825
1826 switch f := f.(type) {
1827 case *MetaHeadersFrame:
1828 err = rl.processHeaders(f)
1829 maybeIdle = true
1830 gotReply = true
1831 case *DataFrame:
1832 err = rl.processData(f)
1833 maybeIdle = true
1834 case *GoAwayFrame:
1835 err = rl.processGoAway(f)
1836 maybeIdle = true
1837 case *RSTStreamFrame:
1838 err = rl.processResetStream(f)
1839 maybeIdle = true
1840 case *SettingsFrame:
1841 err = rl.processSettings(f)
1842 case *PushPromiseFrame:
1843 err = rl.processPushPromise(f)
1844 case *WindowUpdateFrame:
1845 err = rl.processWindowUpdate(f)
1846 case *PingFrame:
1847 err = rl.processPing(f)
1848 default:
1849 cc.logf("Transport: unhandled response frame type %T", f)
1850 }
1851 if err != nil {
1852 if VerboseLogs {
1853 cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
1854 }
1855 return err
1856 }
1857 if rl.closeWhenIdle && gotReply && maybeIdle {
1858 cc.closeIfIdle()
1859 }
1860 }
1861}
1862
1863func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
1864 cc := rl.cc
1865 cs := cc.streamByID(f.StreamID, false)
1866 if cs == nil {
1867 // We'd get here if we canceled a request while the
1868 // server had its response still in flight. So if this
1869 // was just something we canceled, ignore it.
1870 return nil
1871 }
1872 if f.StreamEnded() {
1873 // Issue 20521: If the stream has ended, streamByID() causes
1874 // clientStream.done to be closed, which causes the request's bodyWriter
1875 // to be closed with an errStreamClosed, which may be received by
1876 // clientConn.RoundTrip before the result of processing these headers.
1877 // Deferring stream closure allows the header processing to occur first.
1878 // clientConn.RoundTrip may still receive the bodyWriter error first, but
1879 // the fix for issue 16102 prioritises any response.
1880 //
1881 // Issue 22413: If there is no request body, we should close the
1882 // stream before writing to cs.resc so that the stream is closed
1883 // immediately once RoundTrip returns.
1884 if cs.req.Body != nil {
1885 defer cc.forgetStreamID(f.StreamID)
1886 } else {
1887 cc.forgetStreamID(f.StreamID)
1888 }
1889 }
1890 if !cs.firstByte {
1891 if cs.trace != nil {
1892 // TODO(bradfitz): move first response byte earlier,
1893 // when we first read the 9 byte header, not waiting
1894 // until all the HEADERS+CONTINUATION frames have been
1895 // merged. This works for now.
1896 traceFirstResponseByte(cs.trace)
1897 }
1898 cs.firstByte = true
1899 }
1900 if !cs.pastHeaders {
1901 cs.pastHeaders = true
1902 } else {
1903 return rl.processTrailers(cs, f)
1904 }
1905
1906 res, err := rl.handleResponse(cs, f)
1907 if err != nil {
1908 if _, ok := err.(ConnectionError); ok {
1909 return err
1910 }
1911 // Any other error type is a stream error.
1912 cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
1913 cc.forgetStreamID(cs.ID)
1914 cs.resc <- resAndError{err: err}
1915 return nil // return nil from process* funcs to keep conn alive
1916 }
1917 if res == nil {
1918 // (nil, nil) special case. See handleResponse docs.
1919 return nil
1920 }
1921 cs.resTrailer = &res.Trailer
1922 cs.resc <- resAndError{res: res}
1923 return nil
1924}
1925
1926// may return error types nil, or ConnectionError. Any other error value
1927// is a StreamError of type ErrCodeProtocol. The returned error in that case
1928// is the detail.
1929//
1930// As a special case, handleResponse may return (nil, nil) to skip the
1931// frame (currently only used for 1xx responses).
1932func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
1933 if f.Truncated {
1934 return nil, errResponseHeaderListSize
1935 }
1936
1937 status := f.PseudoValue("status")
1938 if status == "" {
1939 return nil, errors.New("malformed response from server: missing status pseudo header")
1940 }
1941 statusCode, err := strconv.Atoi(status)
1942 if err != nil {
1943 return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
1944 }
1945
1946 regularFields := f.RegularFields()
1947 strs := make([]string, len(regularFields))
1948 header := make(http.Header, len(regularFields))
1949 res := &http.Response{
1950 Proto: "HTTP/2.0",
1951 ProtoMajor: 2,
1952 Header: header,
1953 StatusCode: statusCode,
1954 Status: status + " " + http.StatusText(statusCode),
1955 }
1956 for _, hf := range regularFields {
1957 key := http.CanonicalHeaderKey(hf.Name)
1958 if key == "Trailer" {
1959 t := res.Trailer
1960 if t == nil {
1961 t = make(http.Header)
1962 res.Trailer = t
1963 }
1964 foreachHeaderElement(hf.Value, func(v string) {
1965 t[http.CanonicalHeaderKey(v)] = nil
1966 })
1967 } else {
1968 vv := header[key]
1969 if vv == nil && len(strs) > 0 {
1970 // More than likely this will be a single-element key.
1971 // Most headers aren't multi-valued.
1972 // Set the capacity on strs[0] to 1, so any future append
1973 // won't extend the slice into the other strings.
1974 vv, strs = strs[:1:1], strs[1:]
1975 vv[0] = hf.Value
1976 header[key] = vv
1977 } else {
1978 header[key] = append(vv, hf.Value)
1979 }
1980 }
1981 }
1982
1983 if statusCode >= 100 && statusCode <= 199 {
1984 cs.num1xx++
1985 const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
1986 if cs.num1xx > max1xxResponses {
1987 return nil, errors.New("http2: too many 1xx informational responses")
1988 }
1989 if fn := cs.get1xxTraceFunc(); fn != nil {
1990 if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
1991 return nil, err
1992 }
1993 }
1994 if statusCode == 100 {
1995 traceGot100Continue(cs.trace)
1996 if cs.on100 != nil {
1997 cs.on100() // forces any write delay timer to fire
1998 }
1999 }
2000 cs.pastHeaders = false // do it all again
2001 return nil, nil
2002 }
2003
2004 streamEnded := f.StreamEnded()
2005 isHead := cs.req.Method == "HEAD"
2006 if !streamEnded || isHead {
2007 res.ContentLength = -1
2008 if clens := res.Header["Content-Length"]; len(clens) == 1 {
2009 if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
2010 res.ContentLength = clen64
2011 } else {
2012 // TODO: care? unlike http/1, it won't mess up our framing, so it's
2013 // more safe smuggling-wise to ignore.
2014 }
2015 } else if len(clens) > 1 {
2016 // TODO: care? unlike http/1, it won't mess up our framing, so it's
2017 // more safe smuggling-wise to ignore.
2018 }
2019 }
2020
2021 if streamEnded || isHead {
2022 res.Body = noBody
2023 return res, nil
2024 }
2025
2026 cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}
2027 cs.bytesRemain = res.ContentLength
2028 res.Body = transportResponseBody{cs}
2029 go cs.awaitRequestCancel(cs.req)
2030
2031 if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
2032 res.Header.Del("Content-Encoding")
2033 res.Header.Del("Content-Length")
2034 res.ContentLength = -1
2035 res.Body = &gzipReader{body: res.Body}
2036 res.Uncompressed = true
2037 }
2038 return res, nil
2039}
2040
2041func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error {
2042 if cs.pastTrailers {
2043 // Too many HEADERS frames for this stream.
2044 return ConnectionError(ErrCodeProtocol)
2045 }
2046 cs.pastTrailers = true
2047 if !f.StreamEnded() {
2048 // We expect that any headers for trailers also
2049 // has END_STREAM.
2050 return ConnectionError(ErrCodeProtocol)
2051 }
2052 if len(f.PseudoFields()) > 0 {
2053 // No pseudo header fields are defined for trailers.
2054 // TODO: ConnectionError might be overly harsh? Check.
2055 return ConnectionError(ErrCodeProtocol)
2056 }
2057
2058 trailer := make(http.Header)
2059 for _, hf := range f.RegularFields() {
2060 key := http.CanonicalHeaderKey(hf.Name)
2061 trailer[key] = append(trailer[key], hf.Value)
2062 }
2063 cs.trailer = trailer
2064
2065 rl.endStream(cs)
2066 return nil
2067}
2068
2069// transportResponseBody is the concrete type of Transport.RoundTrip's
2070// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.
2071// On Close it sends RST_STREAM if EOF wasn't already seen.
2072type transportResponseBody struct {
2073 cs *clientStream
2074}
2075
2076func (b transportResponseBody) Read(p []byte) (n int, err error) {
2077 cs := b.cs
2078 cc := cs.cc
2079
2080 if cs.readErr != nil {
2081 return 0, cs.readErr
2082 }
2083 n, err = b.cs.bufPipe.Read(p)
2084 if cs.bytesRemain != -1 {
2085 if int64(n) > cs.bytesRemain {
2086 n = int(cs.bytesRemain)
2087 if err == nil {
2088 err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
2089 cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
2090 }
2091 cs.readErr = err
2092 return int(cs.bytesRemain), err
2093 }
2094 cs.bytesRemain -= int64(n)
2095 if err == io.EOF && cs.bytesRemain > 0 {
2096 err = io.ErrUnexpectedEOF
2097 cs.readErr = err
2098 return n, err
2099 }
2100 }
2101 if n == 0 {
2102 // No flow control tokens to send back.
2103 return
2104 }
2105
2106 cc.mu.Lock()
2107 defer cc.mu.Unlock()
2108
2109 var connAdd, streamAdd int32
2110 // Check the conn-level first, before the stream-level.
2111 if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
2112 connAdd = transportDefaultConnFlow - v
2113 cc.inflow.add(connAdd)
2114 }
2115 if err == nil { // No need to refresh if the stream is over or failed.
2116 // Consider any buffered body data (read from the conn but not
2117 // consumed by the client) when computing flow control for this
2118 // stream.
2119 v := int(cs.inflow.available()) + cs.bufPipe.Len()
2120 if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
2121 streamAdd = int32(transportDefaultStreamFlow - v)
2122 cs.inflow.add(streamAdd)
2123 }
2124 }
2125 if connAdd != 0 || streamAdd != 0 {
2126 cc.wmu.Lock()
2127 defer cc.wmu.Unlock()
2128 if connAdd != 0 {
2129 cc.fr.WriteWindowUpdate(0, mustUint31(connAdd))
2130 }
2131 if streamAdd != 0 {
2132 cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd))
2133 }
2134 cc.bw.Flush()
2135 }
2136 return
2137}
2138
2139var errClosedResponseBody = errors.New("http2: response body closed")
2140
2141func (b transportResponseBody) Close() error {
2142 cs := b.cs
2143 cc := cs.cc
2144
2145 serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
2146 unread := cs.bufPipe.Len()
2147
2148 if unread > 0 || !serverSentStreamEnd {
2149 cc.mu.Lock()
2150 cc.wmu.Lock()
2151 if !serverSentStreamEnd {
2152 cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
2153 cs.didReset = true
2154 }
2155 // Return connection-level flow control.
2156 if unread > 0 {
2157 cc.inflow.add(int32(unread))
2158 cc.fr.WriteWindowUpdate(0, uint32(unread))
2159 }
2160 cc.bw.Flush()
2161 cc.wmu.Unlock()
2162 cc.mu.Unlock()
2163 }
2164
2165 cs.bufPipe.BreakWithError(errClosedResponseBody)
2166 cc.forgetStreamID(cs.ID)
2167 return nil
2168}
2169
2170func (rl *clientConnReadLoop) processData(f *DataFrame) error {
2171 cc := rl.cc
2172 cs := cc.streamByID(f.StreamID, f.StreamEnded())
2173 data := f.Data()
2174 if cs == nil {
2175 cc.mu.Lock()
2176 neverSent := cc.nextStreamID
2177 cc.mu.Unlock()
2178 if f.StreamID >= neverSent {
2179 // We never asked for this.
2180 cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
2181 return ConnectionError(ErrCodeProtocol)
2182 }
2183 // We probably did ask for this, but canceled. Just ignore it.
2184 // TODO: be stricter here? only silently ignore things which
2185 // we canceled, but not things which were closed normally
2186 // by the peer? Tough without accumulating too much state.
2187
2188 // But at least return their flow control:
2189 if f.Length > 0 {
2190 cc.mu.Lock()
2191 cc.inflow.add(int32(f.Length))
2192 cc.mu.Unlock()
2193
2194 cc.wmu.Lock()
2195 cc.fr.WriteWindowUpdate(0, uint32(f.Length))
2196 cc.bw.Flush()
2197 cc.wmu.Unlock()
2198 }
2199 return nil
2200 }
2201 if !cs.firstByte {
2202 cc.logf("protocol error: received DATA before a HEADERS frame")
2203 rl.endStreamError(cs, StreamError{
2204 StreamID: f.StreamID,
2205 Code: ErrCodeProtocol,
2206 })
2207 return nil
2208 }
2209 if f.Length > 0 {
2210 if cs.req.Method == "HEAD" && len(data) > 0 {
2211 cc.logf("protocol error: received DATA on a HEAD request")
2212 rl.endStreamError(cs, StreamError{
2213 StreamID: f.StreamID,
2214 Code: ErrCodeProtocol,
2215 })
2216 return nil
2217 }
2218 // Check connection-level flow control.
2219 cc.mu.Lock()
2220 if cs.inflow.available() >= int32(f.Length) {
2221 cs.inflow.take(int32(f.Length))
2222 } else {
2223 cc.mu.Unlock()
2224 return ConnectionError(ErrCodeFlowControl)
2225 }
2226 // Return any padded flow control now, since we won't
2227 // refund it later on body reads.
2228 var refund int
2229 if pad := int(f.Length) - len(data); pad > 0 {
2230 refund += pad
2231 }
2232 // Return len(data) now if the stream is already closed,
2233 // since data will never be read.
2234 didReset := cs.didReset
2235 if didReset {
2236 refund += len(data)
2237 }
2238 if refund > 0 {
2239 cc.inflow.add(int32(refund))
2240 cc.wmu.Lock()
2241 cc.fr.WriteWindowUpdate(0, uint32(refund))
2242 if !didReset {
2243 cs.inflow.add(int32(refund))
2244 cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
2245 }
2246 cc.bw.Flush()
2247 cc.wmu.Unlock()
2248 }
2249 cc.mu.Unlock()
2250
2251 if len(data) > 0 && !didReset {
2252 if _, err := cs.bufPipe.Write(data); err != nil {
2253 rl.endStreamError(cs, err)
2254 return err
2255 }
2256 }
2257 }
2258
2259 if f.StreamEnded() {
2260 rl.endStream(cs)
2261 }
2262 return nil
2263}
2264
2265func (rl *clientConnReadLoop) endStream(cs *clientStream) {
2266 // TODO: check that any declared content-length matches, like
2267 // server.go's (*stream).endStream method.
2268 rl.endStreamError(cs, nil)
2269}
2270
2271func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
2272 var code func()
2273 if err == nil {
2274 err = io.EOF
2275 code = cs.copyTrailers
2276 }
2277 if isConnectionCloseRequest(cs.req) {
2278 rl.closeWhenIdle = true
2279 }
2280 cs.bufPipe.closeWithErrorAndCode(err, code)
2281
2282 select {
2283 case cs.resc <- resAndError{err: err}:
2284 default:
2285 }
2286}
2287
2288func (cs *clientStream) copyTrailers() {
2289 for k, vv := range cs.trailer {
2290 t := cs.resTrailer
2291 if *t == nil {
2292 *t = make(http.Header)
2293 }
2294 (*t)[k] = vv
2295 }
2296}
2297
2298func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
2299 cc := rl.cc
2300 cc.t.connPool().MarkDead(cc)
2301 if f.ErrCode != 0 {
2302 // TODO: deal with GOAWAY more. particularly the error code
2303 cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
2304 }
2305 cc.setGoAway(f)
2306 return nil
2307}
2308
2309func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
2310 cc := rl.cc
2311 cc.mu.Lock()
2312 defer cc.mu.Unlock()
2313
2314 if f.IsAck() {
2315 if cc.wantSettingsAck {
2316 cc.wantSettingsAck = false
2317 return nil
2318 }
2319 return ConnectionError(ErrCodeProtocol)
2320 }
2321
2322 err := f.ForeachSetting(func(s Setting) error {
2323 switch s.ID {
2324 case SettingMaxFrameSize:
2325 cc.maxFrameSize = s.Val
2326 case SettingMaxConcurrentStreams:
2327 cc.maxConcurrentStreams = s.Val
2328 case SettingMaxHeaderListSize:
2329 cc.peerMaxHeaderListSize = uint64(s.Val)
2330 case SettingInitialWindowSize:
2331 // Values above the maximum flow-control
2332 // window size of 2^31-1 MUST be treated as a
2333 // connection error (Section 5.4.1) of type
2334 // FLOW_CONTROL_ERROR.
2335 if s.Val > math.MaxInt32 {
2336 return ConnectionError(ErrCodeFlowControl)
2337 }
2338
2339 // Adjust flow control of currently-open
2340 // frames by the difference of the old initial
2341 // window size and this one.
2342 delta := int32(s.Val) - int32(cc.initialWindowSize)
2343 for _, cs := range cc.streams {
2344 cs.flow.add(delta)
2345 }
2346 cc.cond.Broadcast()
2347
2348 cc.initialWindowSize = s.Val
2349 default:
2350 // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
2351 cc.vlogf("Unhandled Setting: %v", s)
2352 }
2353 return nil
2354 })
2355 if err != nil {
2356 return err
2357 }
2358
2359 cc.wmu.Lock()
2360 defer cc.wmu.Unlock()
2361
2362 cc.fr.WriteSettingsAck()
2363 cc.bw.Flush()
2364 return cc.werr
2365}
2366
2367func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
2368 cc := rl.cc
2369 cs := cc.streamByID(f.StreamID, false)
2370 if f.StreamID != 0 && cs == nil {
2371 return nil
2372 }
2373
2374 cc.mu.Lock()
2375 defer cc.mu.Unlock()
2376
2377 fl := &cc.flow
2378 if cs != nil {
2379 fl = &cs.flow
2380 }
2381 if !fl.add(int32(f.Increment)) {
2382 return ConnectionError(ErrCodeFlowControl)
2383 }
2384 cc.cond.Broadcast()
2385 return nil
2386}
2387
2388func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
2389 cs := rl.cc.streamByID(f.StreamID, true)
2390 if cs == nil {
2391 // TODO: return error if server tries to RST_STEAM an idle stream
2392 return nil
2393 }
2394 select {
2395 case <-cs.peerReset:
2396 // Already reset.
2397 // This is the only goroutine
2398 // which closes this, so there
2399 // isn't a race.
2400 default:
2401 err := streamError(cs.ID, f.ErrCode)
2402 cs.resetErr = err
2403 close(cs.peerReset)
2404 cs.bufPipe.CloseWithError(err)
2405 cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
2406 }
2407 return nil
2408}
2409
2410// Ping sends a PING frame to the server and waits for the ack.
2411func (cc *ClientConn) Ping(ctx context.Context) error {
2412 c := make(chan struct{})
2413 // Generate a random payload
2414 var p [8]byte
2415 for {
2416 if _, err := rand.Read(p[:]); err != nil {
2417 return err
2418 }
2419 cc.mu.Lock()
2420 // check for dup before insert
2421 if _, found := cc.pings[p]; !found {
2422 cc.pings[p] = c
2423 cc.mu.Unlock()
2424 break
2425 }
2426 cc.mu.Unlock()
2427 }
2428 cc.wmu.Lock()
2429 if err := cc.fr.WritePing(false, p); err != nil {
2430 cc.wmu.Unlock()
2431 return err
2432 }
2433 if err := cc.bw.Flush(); err != nil {
2434 cc.wmu.Unlock()
2435 return err
2436 }
2437 cc.wmu.Unlock()
2438 select {
2439 case <-c:
2440 return nil
2441 case <-ctx.Done():
2442 return ctx.Err()
2443 case <-cc.readerDone:
2444 // connection closed
2445 return cc.readerErr
2446 }
2447}
2448
2449func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
2450 if f.IsAck() {
2451 cc := rl.cc
2452 cc.mu.Lock()
2453 defer cc.mu.Unlock()
2454 // If ack, notify listener if any
2455 if c, ok := cc.pings[f.Data]; ok {
2456 close(c)
2457 delete(cc.pings, f.Data)
2458 }
2459 return nil
2460 }
2461 cc := rl.cc
2462 cc.wmu.Lock()
2463 defer cc.wmu.Unlock()
2464 if err := cc.fr.WritePing(true, f.Data); err != nil {
2465 return err
2466 }
2467 return cc.bw.Flush()
2468}
2469
2470func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
2471 // We told the peer we don't want them.
2472 // Spec says:
2473 // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
2474 // setting of the peer endpoint is set to 0. An endpoint that
2475 // has set this setting and has received acknowledgement MUST
2476 // treat the receipt of a PUSH_PROMISE frame as a connection
2477 // error (Section 5.4.1) of type PROTOCOL_ERROR."
2478 return ConnectionError(ErrCodeProtocol)
2479}
2480
2481func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
2482 // TODO: map err to more interesting error codes, once the
2483 // HTTP community comes up with some. But currently for
2484 // RST_STREAM there's no equivalent to GOAWAY frame's debug
2485 // data, and the error codes are all pretty vague ("cancel").
2486 cc.wmu.Lock()
2487 cc.fr.WriteRSTStream(streamID, code)
2488 cc.bw.Flush()
2489 cc.wmu.Unlock()
2490}
2491
2492var (
2493 errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
2494 errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
2495)
2496
2497func (cc *ClientConn) logf(format string, args ...interface{}) {
2498 cc.t.logf(format, args...)
2499}
2500
2501func (cc *ClientConn) vlogf(format string, args ...interface{}) {
2502 cc.t.vlogf(format, args...)
2503}
2504
2505func (t *Transport) vlogf(format string, args ...interface{}) {
2506 if VerboseLogs {
2507 t.logf(format, args...)
2508 }
2509}
2510
2511func (t *Transport) logf(format string, args ...interface{}) {
2512 log.Printf(format, args...)
2513}
2514
2515var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
2516
2517func strSliceContains(ss []string, s string) bool {
2518 for _, v := range ss {
2519 if v == s {
2520 return true
2521 }
2522 }
2523 return false
2524}
2525
2526type erringRoundTripper struct{ err error }
2527
2528func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
2529
2530// gzipReader wraps a response body so it can lazily
2531// call gzip.NewReader on the first call to Read
2532type gzipReader struct {
2533 _ incomparable
2534 body io.ReadCloser // underlying Response.Body
2535 zr *gzip.Reader // lazily-initialized gzip reader
2536 zerr error // sticky error
2537}
2538
2539func (gz *gzipReader) Read(p []byte) (n int, err error) {
2540 if gz.zerr != nil {
2541 return 0, gz.zerr
2542 }
2543 if gz.zr == nil {
2544 gz.zr, err = gzip.NewReader(gz.body)
2545 if err != nil {
2546 gz.zerr = err
2547 return 0, err
2548 }
2549 }
2550 return gz.zr.Read(p)
2551}
2552
2553func (gz *gzipReader) Close() error {
2554 return gz.body.Close()
2555}
2556
2557type errorReader struct{ err error }
2558
2559func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
2560
2561// bodyWriterState encapsulates various state around the Transport's writing
2562// of the request body, particularly regarding doing delayed writes of the body
2563// when the request contains "Expect: 100-continue".
2564type bodyWriterState struct {
2565 cs *clientStream
2566 timer *time.Timer // if non-nil, we're doing a delayed write
2567 fnonce *sync.Once // to call fn with
2568 fn func() // the code to run in the goroutine, writing the body
2569 resc chan error // result of fn's execution
2570 delay time.Duration // how long we should delay a delayed write for
2571}
2572
2573func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) {
2574 s.cs = cs
2575 if body == nil {
2576 return
2577 }
2578 resc := make(chan error, 1)
2579 s.resc = resc
2580 s.fn = func() {
2581 cs.cc.mu.Lock()
2582 cs.startedWrite = true
2583 cs.cc.mu.Unlock()
2584 resc <- cs.writeRequestBody(body, cs.req.Body)
2585 }
2586 s.delay = t.expectContinueTimeout()
2587 if s.delay == 0 ||
2588 !httpguts.HeaderValuesContainsToken(
2589 cs.req.Header["Expect"],
2590 "100-continue") {
2591 return
2592 }
2593 s.fnonce = new(sync.Once)
2594
2595 // Arm the timer with a very large duration, which we'll
2596 // intentionally lower later. It has to be large now because
2597 // we need a handle to it before writing the headers, but the
2598 // s.delay value is defined to not start until after the
2599 // request headers were written.
2600 const hugeDuration = 365 * 24 * time.Hour
2601 s.timer = time.AfterFunc(hugeDuration, func() {
2602 s.fnonce.Do(s.fn)
2603 })
2604 return
2605}
2606
2607func (s bodyWriterState) cancel() {
2608 if s.timer != nil {
2609 s.timer.Stop()
2610 }
2611}
2612
2613func (s bodyWriterState) on100() {
2614 if s.timer == nil {
2615 // If we didn't do a delayed write, ignore the server's
2616 // bogus 100 continue response.
2617 return
2618 }
2619 s.timer.Stop()
2620 go func() { s.fnonce.Do(s.fn) }()
2621}
2622
2623// scheduleBodyWrite starts writing the body, either immediately (in
2624// the common case) or after the delay timeout. It should not be
2625// called until after the headers have been written.
2626func (s bodyWriterState) scheduleBodyWrite() {
2627 if s.timer == nil {
2628 // We're not doing a delayed write (see
2629 // getBodyWriterState), so just start the writing
2630 // goroutine immediately.
2631 go s.fn()
2632 return
2633 }
2634 traceWait100Continue(s.cs.trace)
2635 if s.timer.Stop() {
2636 s.timer.Reset(s.delay)
2637 }
2638}
2639
2640// isConnectionCloseRequest reports whether req should use its own
2641// connection for a single request and then close the connection.
2642func isConnectionCloseRequest(req *http.Request) bool {
2643 return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close")
2644}
2645
2646// registerHTTPSProtocol calls Transport.RegisterProtocol but
2647// converting panics into errors.
2648func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) {
2649 defer func() {
2650 if e := recover(); e != nil {
2651 err = fmt.Errorf("%v", e)
2652 }
2653 }()
2654 t.RegisterProtocol("https", rt)
2655 return nil
2656}
2657
2658// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
2659// if there's already has a cached connection to the host.
2660// (The field is exported so it can be accessed via reflect from net/http; tested
2661// by TestNoDialH2RoundTripperType)
2662type noDialH2RoundTripper struct{ *Transport }
2663
2664func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
2665 res, err := rt.Transport.RoundTrip(req)
2666 if isNoCachedConnError(err) {
2667 return nil, http.ErrSkipAltProtocol
2668 }
2669 return res, err
2670}
2671
2672func (t *Transport) idleConnTimeout() time.Duration {
2673 if t.t1 != nil {
2674 return t.t1.IdleConnTimeout
2675 }
2676 return 0
2677}
2678
2679func traceGetConn(req *http.Request, hostPort string) {
2680 trace := httptrace.ContextClientTrace(req.Context())
2681 if trace == nil || trace.GetConn == nil {
2682 return
2683 }
2684 trace.GetConn(hostPort)
2685}
2686
2687func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
2688 trace := httptrace.ContextClientTrace(req.Context())
2689 if trace == nil || trace.GotConn == nil {
2690 return
2691 }
2692 ci := httptrace.GotConnInfo{Conn: cc.tconn}
2693 ci.Reused = reused
2694 cc.mu.Lock()
2695 ci.WasIdle = len(cc.streams) == 0 && reused
2696 if ci.WasIdle && !cc.lastActive.IsZero() {
2697 ci.IdleTime = time.Now().Sub(cc.lastActive)
2698 }
2699 cc.mu.Unlock()
2700
2701 trace.GotConn(ci)
2702}
2703
2704func traceWroteHeaders(trace *httptrace.ClientTrace) {
2705 if trace != nil && trace.WroteHeaders != nil {
2706 trace.WroteHeaders()
2707 }
2708}
2709
2710func traceGot100Continue(trace *httptrace.ClientTrace) {
2711 if trace != nil && trace.Got100Continue != nil {
2712 trace.Got100Continue()
2713 }
2714}
2715
2716func traceWait100Continue(trace *httptrace.ClientTrace) {
2717 if trace != nil && trace.Wait100Continue != nil {
2718 trace.Wait100Continue()
2719 }
2720}
2721
2722func traceWroteRequest(trace *httptrace.ClientTrace, err error) {
2723 if trace != nil && trace.WroteRequest != nil {
2724 trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
2725 }
2726}
2727
2728func traceFirstResponseByte(trace *httptrace.ClientTrace) {
2729 if trace != nil && trace.GotFirstResponseByte != nil {
2730 trace.GotFirstResponseByte()
2731 }
2732}