mirror of
https://github.com/openfaas/faasd.git
synced 2025-06-24 07:43:24 +00:00
Bump golang.org/x/net from 0.0.0-20220722155237-a158d28d115b to 0.7.0
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.0.0-20220722155237-a158d28d115b to 0.7.0. - [Release notes](https://github.com/golang/net/releases) - [Commits](https://github.com/golang/net/commits/v0.7.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
Alex Ellis
parent
aaf1811052
commit
c5c0fa05a2
331
vendor/golang.org/x/net/http2/server.go
generated
vendored
331
vendor/golang.org/x/net/http2/server.go
generated
vendored
@ -98,6 +98,19 @@ type Server struct {
|
||||
// the HTTP/2 spec's recommendations.
|
||||
MaxConcurrentStreams uint32
|
||||
|
||||
// MaxDecoderHeaderTableSize optionally specifies the http2
|
||||
// SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
|
||||
// informs the remote endpoint of the maximum size of the header compression
|
||||
// table used to decode header blocks, in octets. If zero, the default value
|
||||
// of 4096 is used.
|
||||
MaxDecoderHeaderTableSize uint32
|
||||
|
||||
// MaxEncoderHeaderTableSize optionally specifies an upper limit for the
|
||||
// header compression table used for encoding request headers. Received
|
||||
// SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
|
||||
// the default value of 4096 is used.
|
||||
MaxEncoderHeaderTableSize uint32
|
||||
|
||||
// MaxReadFrameSize optionally specifies the largest frame
|
||||
// this server is willing to read. A valid value is between
|
||||
// 16k and 16M, inclusive. If zero or otherwise invalid, a
|
||||
@ -143,7 +156,7 @@ type Server struct {
|
||||
}
|
||||
|
||||
func (s *Server) initialConnRecvWindowSize() int32 {
|
||||
if s.MaxUploadBufferPerConnection > initialWindowSize {
|
||||
if s.MaxUploadBufferPerConnection >= initialWindowSize {
|
||||
return s.MaxUploadBufferPerConnection
|
||||
}
|
||||
return 1 << 20
|
||||
@ -170,6 +183,20 @@ func (s *Server) maxConcurrentStreams() uint32 {
|
||||
return defaultMaxStreams
|
||||
}
|
||||
|
||||
func (s *Server) maxDecoderHeaderTableSize() uint32 {
|
||||
if v := s.MaxDecoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (s *Server) maxEncoderHeaderTableSize() uint32 {
|
||||
if v := s.MaxEncoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
// maxQueuedControlFrames is the maximum number of control frames like
|
||||
// SETTINGS, PING and RST_STREAM that will be queued for writing before
|
||||
// the connection is closed to prevent memory exhaustion attacks.
|
||||
@ -394,7 +421,6 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
advMaxStreams: s.maxConcurrentStreams(),
|
||||
initialStreamSendWindowSize: initialWindowSize,
|
||||
maxFrameSize: initialMaxFrameSize,
|
||||
headerTableSize: initialHeaderTableSize,
|
||||
serveG: newGoroutineLock(),
|
||||
pushEnabled: true,
|
||||
sawClientPreface: opts.SawClientPreface,
|
||||
@ -422,14 +448,15 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
// configured value for inflow, that will be updated when we send a
|
||||
// WINDOW_UPDATE shortly after sending SETTINGS.
|
||||
sc.flow.add(initialWindowSize)
|
||||
sc.inflow.add(initialWindowSize)
|
||||
sc.inflow.init(initialWindowSize)
|
||||
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
|
||||
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
|
||||
|
||||
fr := NewFramer(sc.bw, c)
|
||||
if s.CountError != nil {
|
||||
fr.countError = s.CountError
|
||||
}
|
||||
fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
|
||||
fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
|
||||
fr.MaxHeaderListSize = sc.maxHeaderListSize()
|
||||
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
|
||||
sc.framer = fr
|
||||
@ -536,8 +563,8 @@ type serverConn struct {
|
||||
wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
|
||||
bodyReadCh chan bodyReadMsg // from handlers -> serve
|
||||
serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
|
||||
flow flow // conn-wide (not stream-specific) outbound flow control
|
||||
inflow flow // conn-wide inbound flow control
|
||||
flow outflow // conn-wide (not stream-specific) outbound flow control
|
||||
inflow inflow // conn-wide inbound flow control
|
||||
tlsState *tls.ConnectionState // shared by all handlers, like net/http
|
||||
remoteAddrStr string
|
||||
writeSched WriteScheduler
|
||||
@ -559,9 +586,9 @@ type serverConn struct {
|
||||
streams map[uint32]*stream
|
||||
initialStreamSendWindowSize int32
|
||||
maxFrameSize int32
|
||||
headerTableSize uint32
|
||||
peerMaxHeaderListSize uint32 // zero means unknown (default)
|
||||
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
|
||||
canonHeaderKeysSize int // canonHeader keys size in bytes
|
||||
writingFrame bool // started writing a frame (on serve goroutine or separate)
|
||||
writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
|
||||
needsFrameFlush bool // last frame write wasn't a flush
|
||||
@ -614,15 +641,17 @@ type stream struct {
|
||||
cancelCtx func()
|
||||
|
||||
// owned by serverConn's serve loop:
|
||||
bodyBytes int64 // body bytes seen so far
|
||||
declBodyBytes int64 // or -1 if undeclared
|
||||
flow flow // limits writing from Handler to client
|
||||
inflow flow // what the client is allowed to POST/etc to us
|
||||
bodyBytes int64 // body bytes seen so far
|
||||
declBodyBytes int64 // or -1 if undeclared
|
||||
flow outflow // limits writing from Handler to client
|
||||
inflow inflow // what the client is allowed to POST/etc to us
|
||||
state streamState
|
||||
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
||||
gotTrailerHeader bool // HEADER frame for trailers was seen
|
||||
wroteHeaders bool // whether we wrote headers (not status 100)
|
||||
readDeadline *time.Timer // nil if unused
|
||||
writeDeadline *time.Timer // nil if unused
|
||||
closeErr error // set before cw is closed
|
||||
|
||||
trailer http.Header // accumulated trailers
|
||||
reqTrailer http.Header // handler's Request.Trailer
|
||||
@ -738,6 +767,13 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
|
||||
// of the entries in the canonHeader cache.
|
||||
// This should be larger than the size of unique, uncommon header keys likely to
|
||||
// be sent by the peer, while not so high as to permit unreasonable memory usage
|
||||
// if the peer sends an unbounded number of unique header keys.
|
||||
const maxCachedCanonicalHeadersKeysSize = 2048
|
||||
|
||||
func (sc *serverConn) canonicalHeader(v string) string {
|
||||
sc.serveG.check()
|
||||
buildCommonHeaderMapsOnce()
|
||||
@ -753,14 +789,10 @@ func (sc *serverConn) canonicalHeader(v string) string {
|
||||
sc.canonHeader = make(map[string]string)
|
||||
}
|
||||
cv = http.CanonicalHeaderKey(v)
|
||||
// maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
|
||||
// entries in the canonHeader cache. This should be larger than the number
|
||||
// of unique, uncommon header keys likely to be sent by the peer, while not
|
||||
// so high as to permit unreasonable memory usage if the peer sends an unbounded
|
||||
// number of unique header keys.
|
||||
const maxCachedCanonicalHeaders = 32
|
||||
if len(sc.canonHeader) < maxCachedCanonicalHeaders {
|
||||
size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
|
||||
if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize {
|
||||
sc.canonHeader[v] = cv
|
||||
sc.canonHeaderKeysSize += size
|
||||
}
|
||||
return cv
|
||||
}
|
||||
@ -811,8 +843,13 @@ type frameWriteResult struct {
|
||||
// and then reports when it's done.
|
||||
// At most one goroutine can be running writeFrameAsync at a time per
|
||||
// serverConn.
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
|
||||
err := wr.write.writeFrame(sc)
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
|
||||
var err error
|
||||
if wd == nil {
|
||||
err = wr.write.writeFrame(sc)
|
||||
} else {
|
||||
err = sc.framer.endWrite()
|
||||
}
|
||||
sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
|
||||
}
|
||||
|
||||
@ -862,6 +899,7 @@ func (sc *serverConn) serve() {
|
||||
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
|
||||
{SettingMaxConcurrentStreams, sc.advMaxStreams},
|
||||
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
|
||||
{SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
|
||||
{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
|
||||
},
|
||||
})
|
||||
@ -948,6 +986,8 @@ func (sc *serverConn) serve() {
|
||||
}
|
||||
case *startPushRequest:
|
||||
sc.startPush(v)
|
||||
case func(*serverConn):
|
||||
v(sc)
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected type %T", v))
|
||||
}
|
||||
@ -1216,9 +1256,16 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
|
||||
sc.writingFrameAsync = false
|
||||
err := wr.write.writeFrame(sc)
|
||||
sc.wroteFrame(frameWriteResult{wr: wr, err: err})
|
||||
} else if wd, ok := wr.write.(*writeData); ok {
|
||||
// Encode the frame in the serve goroutine, to ensure we don't have
|
||||
// any lingering asynchronous references to data passed to Write.
|
||||
// See https://go.dev/issue/58446.
|
||||
sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr, wd)
|
||||
} else {
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr)
|
||||
go sc.writeFrameAsync(wr, nil)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1371,6 +1418,9 @@ func (sc *serverConn) startGracefulShutdownInternal() {
|
||||
func (sc *serverConn) goAway(code ErrCode) {
|
||||
sc.serveG.check()
|
||||
if sc.inGoAway {
|
||||
if sc.goAwayCode == ErrCodeNo {
|
||||
sc.goAwayCode = code
|
||||
}
|
||||
return
|
||||
}
|
||||
sc.inGoAway = true
|
||||
@ -1458,6 +1508,21 @@ func (sc *serverConn) processFrame(f Frame) error {
|
||||
sc.sawFirstSettings = true
|
||||
}
|
||||
|
||||
// Discard frames for streams initiated after the identified last
|
||||
// stream sent in a GOAWAY, or all frames after sending an error.
|
||||
// We still need to return connection-level flow control for DATA frames.
|
||||
// RFC 9113 Section 6.8.
|
||||
if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
|
||||
|
||||
if f, ok := f.(*DataFrame); ok {
|
||||
if !sc.inflow.take(f.Length) {
|
||||
return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
|
||||
}
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch f := f.(type) {
|
||||
case *SettingsFrame:
|
||||
return sc.processSettings(f)
|
||||
@ -1500,9 +1565,6 @@ func (sc *serverConn) processPing(f *PingFrame) error {
|
||||
// PROTOCOL_ERROR."
|
||||
return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
|
||||
}
|
||||
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
|
||||
return nil
|
||||
}
|
||||
sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
|
||||
return nil
|
||||
}
|
||||
@ -1564,6 +1626,9 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
||||
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
|
||||
}
|
||||
st.state = stateClosed
|
||||
if st.readDeadline != nil {
|
||||
st.readDeadline.Stop()
|
||||
}
|
||||
if st.writeDeadline != nil {
|
||||
st.writeDeadline.Stop()
|
||||
}
|
||||
@ -1589,6 +1654,14 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
||||
|
||||
p.CloseWithError(err)
|
||||
}
|
||||
if e, ok := err.(StreamError); ok {
|
||||
if e.Cause != nil {
|
||||
err = e.Cause
|
||||
} else {
|
||||
err = errStreamClosed
|
||||
}
|
||||
}
|
||||
st.closeErr = err
|
||||
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
|
||||
sc.writeSched.CloseStream(st.id)
|
||||
}
|
||||
@ -1631,7 +1704,6 @@ func (sc *serverConn) processSetting(s Setting) error {
|
||||
}
|
||||
switch s.ID {
|
||||
case SettingHeaderTableSize:
|
||||
sc.headerTableSize = s.Val
|
||||
sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
|
||||
case SettingEnablePush:
|
||||
sc.pushEnabled = s.Val != 0
|
||||
@ -1685,16 +1757,6 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
|
||||
func (sc *serverConn) processData(f *DataFrame) error {
|
||||
sc.serveG.check()
|
||||
id := f.Header().StreamID
|
||||
if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || id > sc.maxClientStreamID) {
|
||||
// Discard all DATA frames if the GOAWAY is due to an
|
||||
// error, or:
|
||||
//
|
||||
// Section 6.8: After sending a GOAWAY frame, the sender
|
||||
// can discard frames for streams initiated by the
|
||||
// receiver with identifiers higher than the identified
|
||||
// last stream.
|
||||
return nil
|
||||
}
|
||||
|
||||
data := f.Data()
|
||||
state, st := sc.state(id)
|
||||
@ -1725,14 +1787,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
// But still enforce their connection-level flow control,
|
||||
// and return any flow control bytes since we're not going
|
||||
// to consume them.
|
||||
if sc.inflow.available() < int32(f.Length) {
|
||||
if !sc.inflow.take(f.Length) {
|
||||
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
|
||||
}
|
||||
// Deduct the flow control from inflow, since we're
|
||||
// going to immediately add it back in
|
||||
// sendWindowUpdate, which also schedules sending the
|
||||
// frames.
|
||||
sc.inflow.take(int32(f.Length))
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
|
||||
if st != nil && st.resetQueued {
|
||||
@ -1747,6 +1804,11 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
|
||||
// Sender sending more than they'd declared?
|
||||
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
|
||||
if !sc.inflow.take(f.Length) {
|
||||
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
|
||||
}
|
||||
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
|
||||
|
||||
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
|
||||
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
|
||||
// value of a content-length header field does not equal the sum of the
|
||||
@ -1755,10 +1817,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
}
|
||||
if f.Length > 0 {
|
||||
// Check whether the client has flow control quota.
|
||||
if st.inflow.available() < int32(f.Length) {
|
||||
if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
|
||||
return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
|
||||
}
|
||||
st.inflow.take(int32(f.Length))
|
||||
|
||||
if len(data) > 0 {
|
||||
wrote, err := st.body.Write(data)
|
||||
@ -1774,10 +1835,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
|
||||
// Return any padded flow control now, since we won't
|
||||
// refund it later on body reads.
|
||||
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
|
||||
sc.sendWindowUpdate32(nil, pad)
|
||||
sc.sendWindowUpdate32(st, pad)
|
||||
}
|
||||
// Call sendWindowUpdate even if there is no padding,
|
||||
// to return buffered flow control credit if the sent
|
||||
// window has shrunk.
|
||||
pad := int32(f.Length) - int32(len(data))
|
||||
sc.sendWindowUpdate32(nil, pad)
|
||||
sc.sendWindowUpdate32(st, pad)
|
||||
}
|
||||
if f.StreamEnded() {
|
||||
st.endStream()
|
||||
@ -1831,19 +1894,27 @@ func (st *stream) copyTrailersToHandlerRequest() {
|
||||
}
|
||||
}
|
||||
|
||||
// onReadTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's ReadTimeout has fired.
|
||||
func (st *stream) onReadTimeout() {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
}
|
||||
|
||||
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's WriteTimeout has fired.
|
||||
func (st *stream) onWriteTimeout() {
|
||||
st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})
|
||||
st.sc.writeFrameFromHandler(FrameWriteRequest{write: StreamError{
|
||||
StreamID: st.id,
|
||||
Code: ErrCodeInternal,
|
||||
Cause: os.ErrDeadlineExceeded,
|
||||
}})
|
||||
}
|
||||
|
||||
func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
sc.serveG.check()
|
||||
id := f.StreamID
|
||||
if sc.inGoAway {
|
||||
// Ignore.
|
||||
return nil
|
||||
}
|
||||
// http://tools.ietf.org/html/rfc7540#section-5.1.1
|
||||
// Streams initiated by a client MUST use odd-numbered stream
|
||||
// identifiers. [...] An endpoint that receives an unexpected
|
||||
@ -1946,6 +2017,9 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
// (in Go 1.8), though. That's a more sane option anyway.
|
||||
if sc.hs.ReadTimeout != 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
if st.body != nil {
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
go sc.runHandler(rw, req, handler)
|
||||
@ -2014,9 +2088,6 @@ func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
|
||||
}
|
||||
|
||||
func (sc *serverConn) processPriority(f *PriorityFrame) error {
|
||||
if sc.inGoAway {
|
||||
return nil
|
||||
}
|
||||
if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -2041,8 +2112,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
||||
st.cw.Init()
|
||||
st.flow.conn = &sc.flow // link to conn-level counter
|
||||
st.flow.add(sc.initialStreamSendWindowSize)
|
||||
st.inflow.conn = &sc.inflow // link to conn-level counter
|
||||
st.inflow.add(sc.srv.initialStreamRecvWindowSize())
|
||||
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
|
||||
if sc.hs.WriteTimeout != 0 {
|
||||
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
||||
}
|
||||
@ -2090,12 +2160,6 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
||||
return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
|
||||
}
|
||||
|
||||
bodyOpen := !f.StreamEnded()
|
||||
if rp.method == "HEAD" && bodyOpen {
|
||||
// HEAD requests can't have bodies
|
||||
return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol))
|
||||
}
|
||||
|
||||
rp.header = make(http.Header)
|
||||
for _, hf := range f.RegularFields() {
|
||||
rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
|
||||
@ -2108,6 +2172,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
bodyOpen := !f.StreamEnded()
|
||||
if bodyOpen {
|
||||
if vv, ok := rp.header["Content-Length"]; ok {
|
||||
if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
|
||||
@ -2139,7 +2204,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
|
||||
tlsState = sc.tlsState
|
||||
}
|
||||
|
||||
needsContinue := rp.header.Get("Expect") == "100-continue"
|
||||
needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
|
||||
if needsContinue {
|
||||
rp.header.Del("Expect")
|
||||
}
|
||||
@ -2223,6 +2288,9 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler
|
||||
didPanic := true
|
||||
defer func() {
|
||||
rw.rws.stream.cancelCtx()
|
||||
if req.MultipartForm != nil {
|
||||
req.MultipartForm.RemoveAll()
|
||||
}
|
||||
if didPanic {
|
||||
e := recover()
|
||||
sc.writeFrameFromHandler(FrameWriteRequest{
|
||||
@ -2326,47 +2394,28 @@ func (sc *serverConn) noteBodyRead(st *stream, n int) {
|
||||
}
|
||||
|
||||
// st may be nil for conn-level
|
||||
func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
|
||||
sc.serveG.check()
|
||||
// "The legal range for the increment to the flow control
|
||||
// window is 1 to 2^31-1 (2,147,483,647) octets."
|
||||
// A Go Read call on 64-bit machines could in theory read
|
||||
// a larger Read than this. Very unlikely, but we handle it here
|
||||
// rather than elsewhere for now.
|
||||
const maxUint31 = 1<<31 - 1
|
||||
for n >= maxUint31 {
|
||||
sc.sendWindowUpdate32(st, maxUint31)
|
||||
n -= maxUint31
|
||||
}
|
||||
sc.sendWindowUpdate32(st, int32(n))
|
||||
func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
|
||||
sc.sendWindowUpdate(st, int(n))
|
||||
}
|
||||
|
||||
// st may be nil for conn-level
|
||||
func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
|
||||
func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
|
||||
sc.serveG.check()
|
||||
if n == 0 {
|
||||
var streamID uint32
|
||||
var send int32
|
||||
if st == nil {
|
||||
send = sc.inflow.add(n)
|
||||
} else {
|
||||
streamID = st.id
|
||||
send = st.inflow.add(n)
|
||||
}
|
||||
if send == 0 {
|
||||
return
|
||||
}
|
||||
if n < 0 {
|
||||
panic("negative update")
|
||||
}
|
||||
var streamID uint32
|
||||
if st != nil {
|
||||
streamID = st.id
|
||||
}
|
||||
sc.writeFrame(FrameWriteRequest{
|
||||
write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
|
||||
write: writeWindowUpdate{streamID: streamID, n: uint32(send)},
|
||||
stream: st,
|
||||
})
|
||||
var ok bool
|
||||
if st == nil {
|
||||
ok = sc.inflow.add(n)
|
||||
} else {
|
||||
ok = st.inflow.add(n)
|
||||
}
|
||||
if !ok {
|
||||
panic("internal error; sent too many window updates without decrements?")
|
||||
}
|
||||
}
|
||||
|
||||
// requestBody is the Handler's Request.Body type.
|
||||
@ -2454,7 +2503,15 @@ type responseWriterState struct {
|
||||
|
||||
type chunkWriter struct{ rws *responseWriterState }
|
||||
|
||||
func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
|
||||
func (cw chunkWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = cw.rws.writeChunk(p)
|
||||
if err == errStreamClosed {
|
||||
// If writing failed because the stream has been closed,
|
||||
// return the reason it was closed.
|
||||
err = cw.rws.stream.closeErr
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
|
||||
|
||||
@ -2493,6 +2550,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
rws.writeHeader(200)
|
||||
}
|
||||
|
||||
if rws.handlerDone {
|
||||
rws.promoteUndeclaredTrailers()
|
||||
}
|
||||
|
||||
isHeadResp := rws.req.Method == "HEAD"
|
||||
if !rws.sentHeader {
|
||||
rws.sentHeader = true
|
||||
@ -2564,10 +2625,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if rws.handlerDone {
|
||||
rws.promoteUndeclaredTrailers()
|
||||
}
|
||||
|
||||
// only send trailers if they have actually been defined by the
|
||||
// server handler.
|
||||
hasNonemptyTrailers := rws.hasNonemptyTrailers()
|
||||
@ -2648,23 +2705,85 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
||||
st := w.rws.stream
|
||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
||||
// If we're setting a deadline in the past, reset the stream immediately
|
||||
// so writes after SetWriteDeadline returns will fail.
|
||||
st.onReadTimeout()
|
||||
return nil
|
||||
}
|
||||
w.rws.conn.sendServeMsg(func(sc *serverConn) {
|
||||
if st.readDeadline != nil {
|
||||
if !st.readDeadline.Stop() {
|
||||
// Deadline already exceeded, or stream has been closed.
|
||||
return
|
||||
}
|
||||
}
|
||||
if deadline.IsZero() {
|
||||
st.readDeadline = nil
|
||||
} else if st.readDeadline == nil {
|
||||
st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
|
||||
} else {
|
||||
st.readDeadline.Reset(deadline.Sub(time.Now()))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
|
||||
st := w.rws.stream
|
||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
||||
// If we're setting a deadline in the past, reset the stream immediately
|
||||
// so writes after SetWriteDeadline returns will fail.
|
||||
st.onWriteTimeout()
|
||||
return nil
|
||||
}
|
||||
w.rws.conn.sendServeMsg(func(sc *serverConn) {
|
||||
if st.writeDeadline != nil {
|
||||
if !st.writeDeadline.Stop() {
|
||||
// Deadline already exceeded, or stream has been closed.
|
||||
return
|
||||
}
|
||||
}
|
||||
if deadline.IsZero() {
|
||||
st.writeDeadline = nil
|
||||
} else if st.writeDeadline == nil {
|
||||
st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
|
||||
} else {
|
||||
st.writeDeadline.Reset(deadline.Sub(time.Now()))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *responseWriter) Flush() {
|
||||
w.FlushError()
|
||||
}
|
||||
|
||||
func (w *responseWriter) FlushError() error {
|
||||
rws := w.rws
|
||||
if rws == nil {
|
||||
panic("Header called after Handler finished")
|
||||
}
|
||||
var err error
|
||||
if rws.bw.Buffered() > 0 {
|
||||
if err := rws.bw.Flush(); err != nil {
|
||||
// Ignore the error. The frame writer already knows.
|
||||
return
|
||||
}
|
||||
err = rws.bw.Flush()
|
||||
} else {
|
||||
// The bufio.Writer won't call chunkWriter.Write
|
||||
// (writeChunk with zero bytes, so we have to do it
|
||||
// ourselves to force the HTTP response header and/or
|
||||
// final DATA frame (with END_STREAM) to be sent.
|
||||
rws.writeChunk(nil)
|
||||
_, err = chunkWriter{rws}.Write(nil)
|
||||
if err == nil {
|
||||
select {
|
||||
case <-rws.stream.cw:
|
||||
err = rws.stream.closeErr
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *responseWriter) CloseNotify() <-chan bool {
|
||||
|
Reference in New Issue
Block a user