mirror of
https://github.com/openfaas/faasd.git
synced 2025-06-24 15:53:24 +00:00
Bump golang.org/x/net from 0.10.0 to 0.17.0
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.10.0 to 0.17.0. - [Commits](https://github.com/golang/net/compare/v0.10.0...v0.17.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
Alex Ellis
parent
99ccd75b62
commit
5356fca4c5
95
vendor/golang.org/x/net/http2/server.go
generated
vendored
95
vendor/golang.org/x/net/http2/server.go
generated
vendored
@ -441,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
if s.NewWriteScheduler != nil {
|
||||
sc.writeSched = s.NewWriteScheduler()
|
||||
} else {
|
||||
sc.writeSched = NewPriorityWriteScheduler(nil)
|
||||
sc.writeSched = newRoundRobinWriteScheduler()
|
||||
}
|
||||
|
||||
// These start at the RFC-specified defaults. If there is a higher
|
||||
@ -581,9 +581,11 @@ type serverConn struct {
|
||||
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
|
||||
curClientStreams uint32 // number of open streams initiated by the client
|
||||
curPushedStreams uint32 // number of open streams initiated by server push
|
||||
curHandlers uint32 // number of running handler goroutines
|
||||
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
|
||||
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
|
||||
streams map[uint32]*stream
|
||||
unstartedHandlers []unstartedHandler
|
||||
initialStreamSendWindowSize int32
|
||||
maxFrameSize int32
|
||||
peerMaxHeaderListSize uint32 // zero means unknown (default)
|
||||
@ -981,6 +983,8 @@ func (sc *serverConn) serve() {
|
||||
return
|
||||
case gracefulShutdownMsg:
|
||||
sc.startGracefulShutdownInternal()
|
||||
case handlerDoneMsg:
|
||||
sc.handlerDone()
|
||||
default:
|
||||
panic("unknown timer")
|
||||
}
|
||||
@ -1012,14 +1016,6 @@ func (sc *serverConn) serve() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
|
||||
select {
|
||||
case <-sc.doneServing:
|
||||
case <-sharedCh:
|
||||
close(privateCh)
|
||||
}
|
||||
}
|
||||
|
||||
type serverMessage int
|
||||
|
||||
// Message values sent to serveMsgCh.
|
||||
@ -1028,6 +1024,7 @@ var (
|
||||
idleTimerMsg = new(serverMessage)
|
||||
shutdownTimerMsg = new(serverMessage)
|
||||
gracefulShutdownMsg = new(serverMessage)
|
||||
handlerDoneMsg = new(serverMessage)
|
||||
)
|
||||
|
||||
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
|
||||
@ -1900,9 +1897,11 @@ func (st *stream) copyTrailersToHandlerRequest() {
|
||||
// onReadTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's ReadTimeout has fired.
|
||||
func (st *stream) onReadTimeout() {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
if st.body != nil {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
}
|
||||
}
|
||||
|
||||
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
@ -2020,13 +2019,10 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
// (in Go 1.8), though. That's a more sane option anyway.
|
||||
if sc.hs.ReadTimeout != 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
if st.body != nil {
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
|
||||
go sc.runHandler(rw, req, handler)
|
||||
return nil
|
||||
return sc.scheduleHandler(id, rw, req, handler)
|
||||
}
|
||||
|
||||
func (sc *serverConn) upgradeRequest(req *http.Request) {
|
||||
@ -2046,6 +2042,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
}
|
||||
|
||||
// This is the first request on the connection,
|
||||
// so start the handler directly rather than going
|
||||
// through scheduleHandler.
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
|
||||
}
|
||||
|
||||
@ -2286,8 +2286,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response
|
||||
return &responseWriter{rws: rws}
|
||||
}
|
||||
|
||||
type unstartedHandler struct {
|
||||
streamID uint32
|
||||
rw *responseWriter
|
||||
req *http.Request
|
||||
handler func(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
// scheduleHandler starts a handler goroutine,
|
||||
// or schedules one to start as soon as an existing handler finishes.
|
||||
func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
|
||||
sc.serveG.check()
|
||||
maxHandlers := sc.advMaxStreams
|
||||
if sc.curHandlers < maxHandlers {
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, handler)
|
||||
return nil
|
||||
}
|
||||
if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
|
||||
return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
|
||||
}
|
||||
sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
|
||||
streamID: streamID,
|
||||
rw: rw,
|
||||
req: req,
|
||||
handler: handler,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *serverConn) handlerDone() {
|
||||
sc.serveG.check()
|
||||
sc.curHandlers--
|
||||
i := 0
|
||||
maxHandlers := sc.advMaxStreams
|
||||
for ; i < len(sc.unstartedHandlers); i++ {
|
||||
u := sc.unstartedHandlers[i]
|
||||
if sc.streams[u.streamID] == nil {
|
||||
// This stream was reset before its goroutine had a chance to start.
|
||||
continue
|
||||
}
|
||||
if sc.curHandlers >= maxHandlers {
|
||||
break
|
||||
}
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(u.rw, u.req, u.handler)
|
||||
sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
|
||||
}
|
||||
sc.unstartedHandlers = sc.unstartedHandlers[i:]
|
||||
if len(sc.unstartedHandlers) == 0 {
|
||||
sc.unstartedHandlers = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Run on its own goroutine.
|
||||
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
|
||||
defer sc.sendServeMsg(handlerDoneMsg)
|
||||
didPanic := true
|
||||
defer func() {
|
||||
rw.rws.stream.cancelCtx()
|
||||
@ -2429,7 +2483,7 @@ type requestBody struct {
|
||||
conn *serverConn
|
||||
closeOnce sync.Once // for use by Close only
|
||||
sawEOF bool // for use by Read only
|
||||
pipe *pipe // non-nil if we have a HTTP entity message body
|
||||
pipe *pipe // non-nil if we have an HTTP entity message body
|
||||
needsContinue bool // need to send a 100-continue
|
||||
}
|
||||
|
||||
@ -2569,7 +2623,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
clen = ""
|
||||
}
|
||||
}
|
||||
if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
|
||||
_, hasContentLength := rws.snapHeader["Content-Length"]
|
||||
if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
|
||||
clen = strconv.Itoa(len(p))
|
||||
}
|
||||
_, hasContentType := rws.snapHeader["Content-Type"]
|
||||
@ -2774,7 +2829,7 @@ func (w *responseWriter) FlushError() error {
|
||||
err = rws.bw.Flush()
|
||||
} else {
|
||||
// The bufio.Writer won't call chunkWriter.Write
|
||||
// (writeChunk with zero bytes, so we have to do it
|
||||
// (writeChunk with zero bytes), so we have to do it
|
||||
// ourselves to force the HTTP response header and/or
|
||||
// final DATA frame (with END_STREAM) to be sent.
|
||||
_, err = chunkWriter{rws}.Write(nil)
|
||||
|
Reference in New Issue
Block a user