mirror of
https://github.com/openfaas/faasd.git
synced 2025-06-19 20:46:40 +00:00
Remove armhf/armv7 support from faasd
See notes in: #364 Signed-off-by: Alex Ellis (OpenFaaS Ltd) <alexellis2@gmail.com>
This commit is contained in:
231
vendor/google.golang.org/grpc/server.go
generated
vendored
231
vendor/google.golang.org/grpc/server.go
generated
vendored
@ -86,7 +86,7 @@ func init() {
|
||||
var statusOK = status.New(codes.OK, "")
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
|
||||
type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
|
||||
|
||||
// MethodDesc represents an RPC service's method specification.
|
||||
type MethodDesc struct {
|
||||
@ -99,20 +99,20 @@ type ServiceDesc struct {
|
||||
ServiceName string
|
||||
// The pointer to the service interface. Used to check whether the user
|
||||
// provided implementation satisfies the interface requirements.
|
||||
HandlerType interface{}
|
||||
HandlerType any
|
||||
Methods []MethodDesc
|
||||
Streams []StreamDesc
|
||||
Metadata interface{}
|
||||
Metadata any
|
||||
}
|
||||
|
||||
// serviceInfo wraps information about a service. It is very similar to
|
||||
// ServiceDesc and is constructed from it for internal purposes.
|
||||
type serviceInfo struct {
|
||||
// Contains the implementation for the methods in this service.
|
||||
serviceImpl interface{}
|
||||
serviceImpl any
|
||||
methods map[string]*MethodDesc
|
||||
streams map[string]*StreamDesc
|
||||
mdata interface{}
|
||||
mdata any
|
||||
}
|
||||
|
||||
// Server is a gRPC server to serve RPC requests.
|
||||
@ -164,10 +164,12 @@ type serverOptions struct {
|
||||
initialConnWindowSize int32
|
||||
writeBufferSize int
|
||||
readBufferSize int
|
||||
sharedWriteBuffer bool
|
||||
connectionTimeout time.Duration
|
||||
maxHeaderListSize *uint32
|
||||
headerTableSize *uint32
|
||||
numServerWorkers uint32
|
||||
recvBufferPool SharedBufferPool
|
||||
}
|
||||
|
||||
var defaultServerOptions = serverOptions{
|
||||
@ -177,6 +179,7 @@ var defaultServerOptions = serverOptions{
|
||||
connectionTimeout: 120 * time.Second,
|
||||
writeBufferSize: defaultWriteBufSize,
|
||||
readBufferSize: defaultReadBufSize,
|
||||
recvBufferPool: nopBufferPool{},
|
||||
}
|
||||
var globalServerOptions []ServerOption
|
||||
|
||||
@ -228,6 +231,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption {
|
||||
return &joinServerOption{opts: opts}
|
||||
}
|
||||
|
||||
// SharedWriteBuffer allows reusing per-connection transport write buffer.
|
||||
// If this option is set to true every connection will release the buffer after
|
||||
// flushing the data on the wire.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func SharedWriteBuffer(val bool) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.sharedWriteBuffer = val
|
||||
})
|
||||
}
|
||||
|
||||
// WriteBufferSize determines how much data can be batched before doing a write
|
||||
// on the wire. The corresponding memory allocation for this buffer will be
|
||||
// twice the size to keep syscalls low. The default value for this buffer is
|
||||
@ -268,9 +285,9 @@ func InitialConnWindowSize(s int32) ServerOption {
|
||||
|
||||
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
|
||||
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
|
||||
if kp.Time > 0 && kp.Time < time.Second {
|
||||
if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime {
|
||||
logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
|
||||
kp.Time = time.Second
|
||||
kp.Time = internal.KeepaliveMinServerPingTime
|
||||
}
|
||||
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
@ -550,6 +567,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
|
||||
})
|
||||
}
|
||||
|
||||
// RecvBufferPool returns a ServerOption that configures the server
|
||||
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||
// on the application's workload, this could result in reduced memory allocation.
|
||||
//
|
||||
// If you are unsure about how to implement a memory pool but want to utilize one,
|
||||
// begin with grpc.NewSharedBufferPool.
|
||||
//
|
||||
// Note: The shared buffer pool feature will not be active if any of the following
|
||||
// options are used: StatsHandler, EnableTracing, or binary logging. In such
|
||||
// cases, the shared buffer pool will be ignored.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.recvBufferPool = bufferPool
|
||||
})
|
||||
}
|
||||
|
||||
// serverWorkerResetThreshold defines how often the stack must be reset. Every
|
||||
// N requests, by spawning a new goroutine in its place, a worker can reset its
|
||||
// stack so that large stacks don't live in memory forever. 2^16 should allow
|
||||
@ -625,7 +663,7 @@ func NewServer(opt ...ServerOption) *Server {
|
||||
|
||||
// printf records an event in s's event log, unless s has been stopped.
|
||||
// REQUIRES s.mu is held.
|
||||
func (s *Server) printf(format string, a ...interface{}) {
|
||||
func (s *Server) printf(format string, a ...any) {
|
||||
if s.events != nil {
|
||||
s.events.Printf(format, a...)
|
||||
}
|
||||
@ -633,7 +671,7 @@ func (s *Server) printf(format string, a ...interface{}) {
|
||||
|
||||
// errorf records an error in s's event log, unless s has been stopped.
|
||||
// REQUIRES s.mu is held.
|
||||
func (s *Server) errorf(format string, a ...interface{}) {
|
||||
func (s *Server) errorf(format string, a ...any) {
|
||||
if s.events != nil {
|
||||
s.events.Errorf(format, a...)
|
||||
}
|
||||
@ -648,14 +686,14 @@ type ServiceRegistrar interface {
|
||||
// once the server has started serving.
|
||||
// desc describes the service and its methods and handlers. impl is the
|
||||
// service implementation which is passed to the method handlers.
|
||||
RegisterService(desc *ServiceDesc, impl interface{})
|
||||
RegisterService(desc *ServiceDesc, impl any)
|
||||
}
|
||||
|
||||
// RegisterService registers a service and its implementation to the gRPC
|
||||
// server. It is called from the IDL generated code. This must be called before
|
||||
// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
|
||||
// ensure it implements sd.HandlerType.
|
||||
func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
|
||||
func (s *Server) RegisterService(sd *ServiceDesc, ss any) {
|
||||
if ss != nil {
|
||||
ht := reflect.TypeOf(sd.HandlerType).Elem()
|
||||
st := reflect.TypeOf(ss)
|
||||
@ -666,7 +704,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
|
||||
s.register(sd, ss)
|
||||
}
|
||||
|
||||
func (s *Server) register(sd *ServiceDesc, ss interface{}) {
|
||||
func (s *Server) register(sd *ServiceDesc, ss any) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.printf("RegisterService(%q)", sd.ServiceName)
|
||||
@ -707,7 +745,7 @@ type MethodInfo struct {
|
||||
type ServiceInfo struct {
|
||||
Methods []MethodInfo
|
||||
// Metadata is the metadata specified in ServiceDesc when registering service.
|
||||
Metadata interface{}
|
||||
Metadata any
|
||||
}
|
||||
|
||||
// GetServiceInfo returns a map from service names to ServiceInfo.
|
||||
@ -908,6 +946,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||
InitialConnWindowSize: s.opts.initialConnWindowSize,
|
||||
WriteBufferSize: s.opts.writeBufferSize,
|
||||
ReadBufferSize: s.opts.readBufferSize,
|
||||
SharedWriteBuffer: s.opts.sharedWriteBuffer,
|
||||
ChannelzParentID: s.channelzID,
|
||||
MaxHeaderListSize: s.opts.maxHeaderListSize,
|
||||
HeaderTableSize: s.opts.headerTableSize,
|
||||
@ -944,7 +983,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
f := func() {
|
||||
defer streamQuota.release()
|
||||
defer wg.Done()
|
||||
s.handleStream(st, stream, s.traceInfo(st, stream))
|
||||
s.handleStream(st, stream)
|
||||
}
|
||||
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
@ -956,12 +995,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
}
|
||||
}
|
||||
go f()
|
||||
}, func(ctx context.Context, method string) context.Context {
|
||||
if !EnableTracing {
|
||||
return ctx
|
||||
}
|
||||
tr := trace.New("grpc.Recv."+methodFamily(method), method)
|
||||
return trace.NewContext(ctx, tr)
|
||||
})
|
||||
wg.Wait()
|
||||
}
|
||||
@ -1010,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
s.serveStreams(st)
|
||||
}
|
||||
|
||||
// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
|
||||
// If tracing is not enabled, it returns nil.
|
||||
func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
|
||||
if !EnableTracing {
|
||||
return nil
|
||||
}
|
||||
tr, ok := trace.FromContext(stream.Context())
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
trInfo = &traceInfo{
|
||||
tr: tr,
|
||||
firstLine: firstLine{
|
||||
client: false,
|
||||
remoteAddr: st.RemoteAddr(),
|
||||
},
|
||||
}
|
||||
if dl, ok := stream.Context().Deadline(); ok {
|
||||
trInfo.firstLine.deadline = time.Until(dl)
|
||||
}
|
||||
return trInfo
|
||||
}
|
||||
|
||||
func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
@ -1094,7 +1103,7 @@ func (s *Server) incrCallsFailed() {
|
||||
atomic.AddInt64(&s.czData.callsFailed, 1)
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||
func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
||||
if err != nil {
|
||||
channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
|
||||
@ -1113,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
|
||||
err = t.Write(stream, hdr, payload, opts)
|
||||
if err == nil {
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
|
||||
sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now()))
|
||||
}
|
||||
}
|
||||
return err
|
||||
@ -1141,7 +1150,7 @@ func chainUnaryServerInterceptors(s *Server) {
|
||||
}
|
||||
|
||||
func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
|
||||
return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) {
|
||||
return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
|
||||
}
|
||||
}
|
||||
@ -1150,12 +1159,12 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
|
||||
if curr == len(interceptors)-1 {
|
||||
return finalHandler
|
||||
}
|
||||
return func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return func(ctx context.Context, req any) (any, error) {
|
||||
return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||
func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||
shs := s.opts.statsHandlers
|
||||
if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
|
||||
if channelz.IsOn() {
|
||||
@ -1169,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
IsClientStream: false,
|
||||
IsServerStream: false,
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), statsBegin)
|
||||
sh.HandleRPC(ctx, statsBegin)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
@ -1187,7 +1196,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
defer func() {
|
||||
if trInfo != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
trInfo.tr.Finish()
|
||||
@ -1201,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
if err != nil && err != io.EOF {
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
sh.HandleRPC(ctx, end)
|
||||
}
|
||||
|
||||
if channelz.IsOn() {
|
||||
@ -1223,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
}
|
||||
}
|
||||
if len(binlogs) != 0 {
|
||||
ctx := stream.Context()
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
logEntry := &binarylog.ClientHeader{
|
||||
Header: md,
|
||||
@ -1294,7 +1302,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
if len(shs) != 0 || len(binlogs) != 0 {
|
||||
payInfo = &payloadInfo{}
|
||||
}
|
||||
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
if err != nil {
|
||||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
@ -1304,12 +1312,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
if channelz.IsOn() {
|
||||
t.IncrMsgRecv()
|
||||
}
|
||||
df := func(v interface{}) error {
|
||||
df := func(v any) error {
|
||||
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
||||
}
|
||||
for _, sh := range shs {
|
||||
sh.HandleRPC(stream.Context(), &stats.InPayload{
|
||||
sh.HandleRPC(ctx, &stats.InPayload{
|
||||
RecvTime: time.Now(),
|
||||
Payload: v,
|
||||
Length: len(d),
|
||||
@ -1323,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
Message: d,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(stream.Context(), cm)
|
||||
binlog.Log(ctx, cm)
|
||||
}
|
||||
}
|
||||
if trInfo != nil {
|
||||
@ -1331,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
}
|
||||
return nil
|
||||
}
|
||||
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||
ctx = NewContextWithServerTransportStream(ctx, stream)
|
||||
reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
|
||||
if appErr != nil {
|
||||
appStatus, ok := status.FromError(appErr)
|
||||
@ -1356,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
Header: h,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(stream.Context(), sh)
|
||||
binlog.Log(ctx, sh)
|
||||
}
|
||||
}
|
||||
st := &binarylog.ServerTrailer{
|
||||
@ -1364,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
Err: appErr,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(stream.Context(), st)
|
||||
binlog.Log(ctx, st)
|
||||
}
|
||||
}
|
||||
return appErr
|
||||
@ -1379,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
if stream.SendCompress() != sendCompressorName {
|
||||
comp = encoding.GetCompressor(stream.SendCompress())
|
||||
}
|
||||
if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
|
||||
if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil {
|
||||
if err == io.EOF {
|
||||
// The entire stream is done (for unary RPC only).
|
||||
return err
|
||||
@ -1406,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
Err: appErr,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(stream.Context(), sh)
|
||||
binlog.Log(stream.Context(), st)
|
||||
binlog.Log(ctx, sh)
|
||||
binlog.Log(ctx, st)
|
||||
}
|
||||
}
|
||||
return err
|
||||
@ -1421,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
Message: reply,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(stream.Context(), sh)
|
||||
binlog.Log(stream.Context(), sm)
|
||||
binlog.Log(ctx, sh)
|
||||
binlog.Log(ctx, sm)
|
||||
}
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
@ -1440,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
Err: appErr,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(stream.Context(), st)
|
||||
binlog.Log(ctx, st)
|
||||
}
|
||||
}
|
||||
return t.WriteStatus(stream, statusOK)
|
||||
@ -1468,7 +1476,7 @@ func chainStreamServerInterceptors(s *Server) {
|
||||
}
|
||||
|
||||
func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
|
||||
return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
|
||||
return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
|
||||
return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
|
||||
}
|
||||
}
|
||||
@ -1477,12 +1485,12 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf
|
||||
if curr == len(interceptors)-1 {
|
||||
return finalHandler
|
||||
}
|
||||
return func(srv interface{}, stream ServerStream) error {
|
||||
return func(srv any, stream ServerStream) error {
|
||||
return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||
func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
}
|
||||
@ -1496,15 +1504,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
IsServerStream: sd.ServerStreams,
|
||||
}
|
||||
for _, sh := range shs {
|
||||
sh.HandleRPC(stream.Context(), statsBegin)
|
||||
sh.HandleRPC(ctx, statsBegin)
|
||||
}
|
||||
}
|
||||
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||
ctx = NewContextWithServerTransportStream(ctx, stream)
|
||||
ss := &serverStream{
|
||||
ctx: ctx,
|
||||
t: t,
|
||||
s: stream,
|
||||
p: &parser{r: stream},
|
||||
p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
|
||||
codec: s.getCodec(stream.ContentSubtype()),
|
||||
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
||||
maxSendMessageSize: s.opts.maxSendMessageSize,
|
||||
@ -1518,7 +1526,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
if trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
if err != nil && err != io.EOF {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
ss.trInfo.tr.Finish()
|
||||
@ -1535,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
for _, sh := range shs {
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
sh.HandleRPC(ctx, end)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1577,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
logEntry.PeerAddr = peer.Addr
|
||||
}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(stream.Context(), logEntry)
|
||||
binlog.Log(ctx, logEntry)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1621,7 +1629,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
}
|
||||
var appErr error
|
||||
var server interface{}
|
||||
var server any
|
||||
if info != nil {
|
||||
server = info.serviceImpl
|
||||
}
|
||||
@ -1655,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
Err: appErr,
|
||||
}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(stream.Context(), st)
|
||||
binlog.Log(ctx, st)
|
||||
}
|
||||
}
|
||||
t.WriteStatus(ss.s, appStatus)
|
||||
@ -1673,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
Err: appErr,
|
||||
}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(stream.Context(), st)
|
||||
binlog.Log(ctx, st)
|
||||
}
|
||||
}
|
||||
return t.WriteStatus(ss.s, statusOK)
|
||||
}
|
||||
|
||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
|
||||
ctx := stream.Context()
|
||||
var ti *traceInfo
|
||||
if EnableTracing {
|
||||
tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
|
||||
ctx = trace.NewContext(ctx, tr)
|
||||
ti = &traceInfo{
|
||||
tr: tr,
|
||||
firstLine: firstLine{
|
||||
client: false,
|
||||
remoteAddr: t.RemoteAddr(),
|
||||
},
|
||||
}
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
ti.firstLine.deadline = time.Until(dl)
|
||||
}
|
||||
}
|
||||
|
||||
sm := stream.Method()
|
||||
if sm != "" && sm[0] == '/' {
|
||||
sm = sm[1:]
|
||||
}
|
||||
pos := strings.LastIndex(sm, "/")
|
||||
if pos == -1 {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
|
||||
trInfo.tr.SetError()
|
||||
if ti != nil {
|
||||
ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
|
||||
ti.tr.SetError()
|
||||
}
|
||||
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
|
||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
if ti != nil {
|
||||
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ti.tr.SetError()
|
||||
}
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.Finish()
|
||||
if ti != nil {
|
||||
ti.tr.Finish()
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -1709,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
srv, knownService := s.services[service]
|
||||
if knownService {
|
||||
if md, ok := srv.methods[method]; ok {
|
||||
s.processUnaryRPC(t, stream, srv, md, trInfo)
|
||||
s.processUnaryRPC(ctx, t, stream, srv, md, ti)
|
||||
return
|
||||
}
|
||||
if sd, ok := srv.streams[method]; ok {
|
||||
s.processStreamingRPC(t, stream, srv, sd, trInfo)
|
||||
s.processStreamingRPC(ctx, t, stream, srv, sd, ti)
|
||||
return
|
||||
}
|
||||
}
|
||||
// Unknown service, or known server unknown method.
|
||||
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
|
||||
s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
|
||||
s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti)
|
||||
return
|
||||
}
|
||||
var errDesc string
|
||||
@ -1728,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
} else {
|
||||
errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyPrintf("%s", errDesc)
|
||||
trInfo.tr.SetError()
|
||||
if ti != nil {
|
||||
ti.tr.LazyPrintf("%s", errDesc)
|
||||
ti.tr.SetError()
|
||||
}
|
||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
if ti != nil {
|
||||
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ti.tr.SetError()
|
||||
}
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.Finish()
|
||||
if ti != nil {
|
||||
ti.tr.Finish()
|
||||
}
|
||||
}
|
||||
|
||||
@ -2054,12 +2079,12 @@ func validateSendCompressor(name, clientCompressors string) error {
|
||||
// atomicSemaphore implements a blocking, counting semaphore. acquire should be
|
||||
// called synchronously; release may be called asynchronously.
|
||||
type atomicSemaphore struct {
|
||||
n int64
|
||||
n atomic.Int64
|
||||
wait chan struct{}
|
||||
}
|
||||
|
||||
func (q *atomicSemaphore) acquire() {
|
||||
if atomic.AddInt64(&q.n, -1) < 0 {
|
||||
if q.n.Add(-1) < 0 {
|
||||
// We ran out of quota. Block until a release happens.
|
||||
<-q.wait
|
||||
}
|
||||
@ -2070,12 +2095,14 @@ func (q *atomicSemaphore) release() {
|
||||
// concurrent calls to acquire, but also note that with synchronous calls to
|
||||
// acquire, as our system does, n will never be less than -1. There are
|
||||
// fairness issues (queuing) to consider if this was to be generalized.
|
||||
if atomic.AddInt64(&q.n, 1) <= 0 {
|
||||
if q.n.Add(1) <= 0 {
|
||||
// An acquire was waiting on us. Unblock it.
|
||||
q.wait <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func newHandlerQuota(n uint32) *atomicSemaphore {
|
||||
return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)}
|
||||
a := &atomicSemaphore{wait: make(chan struct{}, 1)}
|
||||
a.n.Store(int64(n))
|
||||
return a
|
||||
}
|
||||
|
Reference in New Issue
Block a user