package grpc
import (
- "bytes"
+ "context"
"errors"
"io"
+ "math"
+ "strconv"
"sync"
"time"
- "golang.org/x/net/context"
"golang.org/x/net/trace"
+ "google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/binarylog"
+ "google.golang.org/grpc/internal/channelz"
+ "google.golang.org/grpc/internal/grpcrand"
+ "google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
- "google.golang.org/grpc/transport"
)
// StreamHandler defines the handler called by gRPC server to complete the
-// execution of a streaming RPC.
+// execution of a streaming RPC. If a StreamHandler returns an error, it
+// should be produced by the status package, or else gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message
+// of the RPC.
type StreamHandler func(srv interface{}, stream ServerStream) error
// StreamDesc represents a streaming RPC service's method specification.
}
// Stream defines the common interface a client or server stream has to satisfy.
+//
+// Deprecated: See ClientStream and ServerStream documentation instead.
type Stream interface {
- // Context returns the context for this stream.
+ // Deprecated: See ClientStream and ServerStream documentation instead.
Context() context.Context
- // SendMsg blocks until it sends m, the stream is done or the stream
- // breaks.
- // On error, it aborts the stream and returns an RPC status on client
- // side. On server side, it simply returns the error to the caller.
- // SendMsg is called by generated code. Also Users can call SendMsg
- // directly when it is really needed in their use cases.
- // It's safe to have a goroutine calling SendMsg and another goroutine calling
- // recvMsg on the same stream at the same time.
- // But it is not safe to call SendMsg on the same stream in different goroutines.
+ // Deprecated: See ClientStream and ServerStream documentation instead.
SendMsg(m interface{}) error
- // RecvMsg blocks until it receives a message or the stream is
- // done. On client side, it returns io.EOF when the stream is done. On
- // any other error, it aborts the stream and returns an RPC status. On
- // server side, it simply returns the error to the caller.
- // It's safe to have a goroutine calling SendMsg and another goroutine calling
- // recvMsg on the same stream at the same time.
- // But it is not safe to call RecvMsg on the same stream in different goroutines.
+ // Deprecated: See ClientStream and ServerStream documentation instead.
RecvMsg(m interface{}) error
}
-// ClientStream defines the interface a client stream has to satisfy.
+// ClientStream defines the client-side behavior of a streaming RPC.
+//
+// All errors returned from ClientStream methods are compatible with the
+// status package.
type ClientStream interface {
// Header returns the header metadata received from the server if there
// is any. It blocks if the metadata is not ready to read.
// stream.Recv has returned a non-nil error (including io.EOF).
Trailer() metadata.MD
// CloseSend closes the send direction of the stream. It closes the stream
- // when non-nil error is met.
+ // when non-nil error is met. It is also not safe to call CloseSend
+ // concurrently with SendMsg.
CloseSend() error
- // Stream.SendMsg() may return a non-nil error when something wrong happens sending
- // the request. The returned error indicates the status of this sending, not the final
- // status of the RPC.
- // Always call Stream.RecvMsg() to get the final status if you care about the status of
- // the RPC.
- Stream
+ // Context returns the context for this stream.
+ //
+ // It should not be called until after Header or RecvMsg has returned. Once
+ // called, subsequent client-side retries are disabled.
+ Context() context.Context
+ // SendMsg is generally called by generated code. On error, SendMsg aborts
+ // the stream. If the error was generated by the client, the status is
+ // returned directly; otherwise, io.EOF is returned and the status of
+ // the stream may be discovered using RecvMsg.
+ //
+ // SendMsg blocks until:
+ // - There is sufficient flow control to schedule m with the transport, or
+ // - The stream is done, or
+ // - The stream breaks.
+ //
+ // SendMsg does not wait until the message is received by the server. An
+ // untimely stream closure may result in lost messages. To ensure delivery,
+ // users should ensure the RPC completed successfully using RecvMsg.
+ //
+ // It is safe to have a goroutine calling SendMsg and another goroutine
+ // calling RecvMsg on the same stream at the same time, but it is not safe
+ // to call SendMsg on the same stream in different goroutines. It is also
+ // not safe to call CloseSend concurrently with SendMsg.
+ SendMsg(m interface{}) error
+ // RecvMsg blocks until it receives a message into m or the stream is
+ // done. It returns io.EOF when the stream completes successfully. On
+ // any other error, the stream is aborted and the error contains the RPC
+ // status.
+ //
+ // It is safe to have a goroutine calling SendMsg and another goroutine
+ // calling RecvMsg on the same stream at the same time, but it is not
+ // safe to call RecvMsg on the same stream in different goroutines.
+ RecvMsg(m interface{}) error
}
-// NewClientStream creates a new Stream for the client side. This is called
-// by generated code.
-func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+// NewStream creates a new Stream for the client side. This is typically
+// called by generated code. ctx is used for the lifetime of the stream.
+//
+// To ensure resources are not leaked due to the stream returned, one of the following
+// actions must be performed:
+//
+// 1. Call Close on the ClientConn.
+// 2. Cancel the context provided.
+// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
+// client-streaming RPC, for instance, might use the helper function
+// CloseAndRecv (note that CloseSend does not Recv, therefore is not
+// guaranteed to release all resources).
+// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
+//
+// If none of the above happen, a goroutine and a context will be leaked, and grpc
+// will not call the optionally-configured stats handler with a stats.End message.
+func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+ // allow interceptor to see all applicable call options, which means those
+ // configured as defaults from dial option as well as per-call options
+ opts = combine(cc.dopts.callOptions, opts)
+
if cc.dopts.streamInt != nil {
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
}
return newClientStream(ctx, desc, cc, method, opts...)
}
+// NewClientStream is a wrapper for ClientConn.NewStream.
+func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+ return cc.NewStream(ctx, desc, method, opts...)
+}
+
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
- var (
- t transport.ClientTransport
- s *transport.Stream
- put func()
- cancel context.CancelFunc
- )
- c := defaultCallInfo
+ if channelz.IsOn() {
+ cc.incrCallsStarted()
+ defer func() {
+ if err != nil {
+ cc.incrCallsFailed()
+ }
+ }()
+ }
+ c := defaultCallInfo()
+ // Provide an opportunity for the first RPC to see the first service config
+ // provided by the resolver.
+ if err := cc.waitForResolvedAddrs(ctx); err != nil {
+ return nil, err
+ }
mc := cc.GetMethodConfig(method)
if mc.WaitForReady != nil {
c.failFast = !*mc.WaitForReady
}
- if mc.Timeout != nil {
+ // Possible context leak:
+ // The cancel function for the child context we create will only be called
+ // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
+ // an error is generated by SendMsg.
+ // https://github.com/grpc/grpc-go/issues/1818.
+ var cancel context.CancelFunc
+ if mc.Timeout != nil && *mc.Timeout >= 0 {
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
+ } else {
+ ctx, cancel = context.WithCancel(ctx)
}
+ defer func() {
+ if err != nil {
+ cancel()
+ }
+ }()
- opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts {
- if err := o.before(&c); err != nil {
+ if err := o.before(c); err != nil {
return nil, toRPCErr(err)
}
}
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+ if err := setCallInfoCodec(c); err != nil {
+ return nil, err
+ }
callHdr := &transport.CallHdr{
- Host: cc.authority,
- Method: method,
- // If it's not client streaming, we should already have the request to be sent,
- // so we don't flush the header.
- // If it's client streaming, the user may never send a request or send it any
- // time soon, so we ask the transport to flush the header.
- Flush: desc.ClientStreams,
- }
- if cc.dopts.cp != nil {
+ Host: cc.authority,
+ Method: method,
+ ContentSubtype: c.contentSubtype,
+ }
+
+ // Set our outgoing compression according to the UseCompressor CallOption, if
+ // set. In that case, also find the compressor from the encoding package.
+ // Otherwise, use the compressor configured by the WithCompressor DialOption,
+ // if set.
+ var cp Compressor
+ var comp encoding.Compressor
+ if ct := c.compressorType; ct != "" {
+ callHdr.SendCompress = ct
+ if ct != encoding.Identity {
+ comp = encoding.GetCompressor(ct)
+ if comp == nil {
+ return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
+ }
+ }
+ } else if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
+ cp = cc.dopts.cp
}
if c.creds != nil {
callHdr.Creds = c.creds
}
trInfo.tr.LazyLog(&trInfo.firstLine, false)
ctx = trace.NewContext(ctx, trInfo.tr)
- defer func() {
- if err != nil {
- // Need to call tr.finish() if error is returned.
- // Because tr will not be returned to caller.
- trInfo.tr.LazyPrintf("RPC: [%v]", err)
- trInfo.tr.SetError()
- trInfo.tr.Finish()
- }
- }()
}
- ctx = newContextWithRPCInfo(ctx)
+ ctx = newContextWithRPCInfo(ctx, c.failFast)
sh := cc.dopts.copts.StatsHandler
+ var beginTime time.Time
if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
+ beginTime = time.Now()
begin := &stats.Begin{
Client: true,
- BeginTime: time.Now(),
+ BeginTime: beginTime,
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
- defer func() {
- if err != nil {
- // Only handle end stats if err != nil.
- end := &stats.End{
- Client: true,
- Error: err,
- }
- sh.HandleRPC(ctx, end)
- }
- }()
}
- gopts := BalancerGetOptions{
- BlockingWait: !c.failFast,
+
+ cs := &clientStream{
+ callHdr: callHdr,
+ ctx: ctx,
+ methodConfig: &mc,
+ opts: opts,
+ callInfo: c,
+ cc: cc,
+ desc: desc,
+ codec: c.codec,
+ cp: cp,
+ comp: comp,
+ cancel: cancel,
+ beginTime: beginTime,
+ firstAttempt: true,
+ }
+ if !cc.dopts.disableRetry {
+ cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
}
- for {
- t, put, err = cc.getTransport(ctx, gopts)
- if err != nil {
- // TODO(zhaoq): Probably revisit the error handling.
- if _, ok := status.FromError(err); ok {
- return nil, err
- }
- if err == errConnClosing || err == errConnUnavailable {
- if c.failFast {
- return nil, Errorf(codes.Unavailable, "%v", err)
- }
- continue
- }
- // All the other errors are treated as Internal errors.
- return nil, Errorf(codes.Internal, "%v", err)
- }
+ cs.binlog = binarylog.GetMethodLogger(method)
- s, err = t.NewStream(ctx, callHdr)
- if err != nil {
- if _, ok := err.(transport.ConnectionError); ok && put != nil {
- // If error is connection error, transport was sending data on wire,
- // and we are not sure if anything has been sent on wire.
- // If error is not connection error, we are sure nothing has been sent.
- updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
- }
- if put != nil {
- put()
- put = nil
- }
- if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
- continue
+ cs.callInfo.stream = cs
+ // Only this initial attempt has stats/tracing.
+ // TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
+ if err := cs.newAttemptLocked(sh, trInfo); err != nil {
+ cs.finish(err)
+ return nil, err
+ }
+
+ op := func(a *csAttempt) error { return a.newStream() }
+ if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
+ cs.finish(err)
+ return nil, err
+ }
+
+ if cs.binlog != nil {
+ md, _ := metadata.FromOutgoingContext(ctx)
+ logEntry := &binarylog.ClientHeader{
+ OnClientSide: true,
+ Header: md,
+ MethodName: method,
+ Authority: cs.cc.authority,
+ }
+ if deadline, ok := ctx.Deadline(); ok {
+ logEntry.Timeout = deadline.Sub(time.Now())
+ if logEntry.Timeout < 0 {
+ logEntry.Timeout = 0
}
- return nil, toRPCErr(err)
}
- break
+ cs.binlog.Log(logEntry)
}
- // Set callInfo.peer object from stream's context.
- if peer, ok := peer.FromContext(s.Context()); ok {
- c.peer = peer
+
+ if desc != unaryStreamDesc {
+ // Listen on cc and stream contexts to cleanup when the user closes the
+ // ClientConn or cancels the stream context. In all other cases, an error
+ // should already be injected into the recv buffer by the transport, which
+ // the client will eventually receive, and then we will cancel the stream's
+ // context in clientStream.finish.
+ go func() {
+ select {
+ case <-cc.ctx.Done():
+ cs.finish(ErrClientConnClosing)
+ case <-ctx.Done():
+ cs.finish(toRPCErr(ctx.Err()))
+ }
+ }()
}
- cs := &clientStream{
- opts: opts,
- c: c,
- desc: desc,
- codec: cc.dopts.codec,
- cp: cc.dopts.cp,
- dc: cc.dopts.dc,
- cancel: cancel,
-
- put: put,
- t: t,
- s: s,
- p: &parser{r: s},
-
- tracing: EnableTracing,
- trInfo: trInfo,
-
- statsCtx: ctx,
- statsHandler: cc.dopts.copts.StatsHandler,
- }
- if cc.dopts.cp != nil {
- cs.cbuf = new(bytes.Buffer)
- }
- // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
- // when there is no pending I/O operations on this stream.
- go func() {
- select {
- case <-t.Error():
- // Incur transport error, simply exit.
- case <-cc.ctx.Done():
- cs.finish(ErrClientConnClosing)
- cs.closeTransportStream(ErrClientConnClosing)
- case <-s.Done():
- // TODO: The trace of the RPC is terminated here when there is no pending
- // I/O, which is probably not the optimal solution.
- cs.finish(s.Status().Err())
- cs.closeTransportStream(nil)
- case <-s.GoAway():
- cs.finish(errConnDrain)
- cs.closeTransportStream(errConnDrain)
- case <-s.Context().Done():
- err := s.Context().Err()
- cs.finish(err)
- cs.closeTransportStream(transport.ContextErr(err))
- }
- }()
return cs, nil
}
+func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) error {
+ cs.attempt = &csAttempt{
+ cs: cs,
+ dc: cs.cc.dopts.dc,
+ statsHandler: sh,
+ trInfo: trInfo,
+ }
+
+ if err := cs.ctx.Err(); err != nil {
+ return toRPCErr(err)
+ }
+ t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
+ if err != nil {
+ return err
+ }
+ cs.attempt.t = t
+ cs.attempt.done = done
+ return nil
+}
+
+func (a *csAttempt) newStream() error {
+ cs := a.cs
+ cs.callHdr.PreviousAttempts = cs.numRetries
+ s, err := a.t.NewStream(cs.ctx, cs.callHdr)
+ if err != nil {
+ return toRPCErr(err)
+ }
+ cs.attempt.s = s
+ cs.attempt.p = &parser{r: s}
+ return nil
+}
+
// clientStream implements a client side Stream.
type clientStream struct {
- opts []CallOption
- c callInfo
- t transport.ClientTransport
- s *transport.Stream
- p *parser
- desc *StreamDesc
- codec Codec
- cp Compressor
- cbuf *bytes.Buffer
- dc Decompressor
- cancel context.CancelFunc
+ callHdr *transport.CallHdr
+ opts []CallOption
+ callInfo *callInfo
+ cc *ClientConn
+ desc *StreamDesc
+
+ codec baseCodec
+ cp Compressor
+ comp encoding.Compressor
+
+ cancel context.CancelFunc // cancels all attempts
+
+ sentLast bool // sent an end stream
+ beginTime time.Time
+
+ methodConfig *MethodConfig
+
+ ctx context.Context // the application's context, wrapped by stats/tracing
- tracing bool // set to EnableTracing when the clientStream is created.
+ retryThrottler *retryThrottler // The throttler active when the RPC began.
- mu sync.Mutex
- put func()
- closed bool
- finished bool
- // trInfo.tr is set when the clientStream is created (if EnableTracing is true),
- // and is set to nil when the clientStream's finish method is called.
+ binlog *binarylog.MethodLogger // Binary logger, can be nil.
+ // serverHeaderBinlogged is a boolean for whether server header has been
+ // logged. Server header will be logged when the first time one of those
+ // happens: stream.Header(), stream.Recv().
+ //
+ // It's only read and used by Recv() and Header(), so it doesn't need to be
+ // synchronized.
+ serverHeaderBinlogged bool
+
+ mu sync.Mutex
+ firstAttempt bool // if true, transparent retry is valid
+ numRetries int // exclusive of transparent retry attempt(s)
+ numRetriesSincePushback int // retries since pushback; to reset backoff
+ finished bool // TODO: replace with atomic cmpxchg or sync.Once?
+ attempt *csAttempt // the active client stream attempt
+ // TODO(hedging): hedging will have multiple attempts simultaneously.
+ committed bool // active attempt committed for retry?
+ buffer []func(a *csAttempt) error // operations to replay on retry
+ bufferSize int // current size of buffer
+}
+
+// csAttempt implements a single transport stream attempt within a
+// clientStream.
+type csAttempt struct {
+ cs *clientStream
+ t transport.ClientTransport
+ s *transport.Stream
+ p *parser
+ done func(balancer.DoneInfo)
+
+ finished bool
+ dc Decompressor
+ decomp encoding.Compressor
+ decompSet bool
+
+ mu sync.Mutex // guards trInfo.tr
+ // trInfo.tr is set when created (if EnableTracing is true),
+ // and cleared when the finish method is called.
trInfo traceInfo
- // statsCtx keeps the user context for stats handling.
- // All stats collection should use the statsCtx (instead of the stream context)
- // so that all the generated stats for a particular RPC can be associated in the processing phase.
- statsCtx context.Context
statsHandler stats.Handler
}
+func (cs *clientStream) commitAttemptLocked() {
+ cs.committed = true
+ cs.buffer = nil
+}
+
+func (cs *clientStream) commitAttempt() {
+ cs.mu.Lock()
+ cs.commitAttemptLocked()
+ cs.mu.Unlock()
+}
+
+// shouldRetry returns nil if the RPC should be retried; otherwise it returns
+// the error that should be returned by the operation.
+func (cs *clientStream) shouldRetry(err error) error {
+ if cs.attempt.s == nil && !cs.callInfo.failFast {
+ // In the event of any error from NewStream (attempt.s == nil), we
+ // never attempted to write anything to the wire, so we can retry
+ // indefinitely for non-fail-fast RPCs.
+ return nil
+ }
+ if cs.finished || cs.committed {
+ // RPC is finished or committed; cannot retry.
+ return err
+ }
+ // Wait for the trailers.
+ if cs.attempt.s != nil {
+ <-cs.attempt.s.Done()
+ }
+ if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
+ // First attempt, wait-for-ready, stream unprocessed: transparently retry.
+ cs.firstAttempt = false
+ return nil
+ }
+ cs.firstAttempt = false
+ if cs.cc.dopts.disableRetry {
+ return err
+ }
+
+ pushback := 0
+ hasPushback := false
+ if cs.attempt.s != nil {
+ if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to {
+ return err
+ }
+
+ // TODO(retry): Move down if the spec changes to not check server pushback
+ // before considering this a failure for throttling.
+ sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
+ if len(sps) == 1 {
+ var e error
+ if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
+ grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0])
+ cs.retryThrottler.throttle() // This counts as a failure for throttling.
+ return err
+ }
+ hasPushback = true
+ } else if len(sps) > 1 {
+ grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps)
+ cs.retryThrottler.throttle() // This counts as a failure for throttling.
+ return err
+ }
+ }
+
+ var code codes.Code
+ if cs.attempt.s != nil {
+ code = cs.attempt.s.Status().Code()
+ } else {
+ code = status.Convert(err).Code()
+ }
+
+ rp := cs.methodConfig.retryPolicy
+ if rp == nil || !rp.retryableStatusCodes[code] {
+ return err
+ }
+
+ // Note: the ordering here is important; we count this as a failure
+ // only if the code matched a retryable code.
+ if cs.retryThrottler.throttle() {
+ return err
+ }
+ if cs.numRetries+1 >= rp.maxAttempts {
+ return err
+ }
+
+ var dur time.Duration
+ if hasPushback {
+ dur = time.Millisecond * time.Duration(pushback)
+ cs.numRetriesSincePushback = 0
+ } else {
+ fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
+ cur := float64(rp.initialBackoff) * fact
+ if max := float64(rp.maxBackoff); cur > max {
+ cur = max
+ }
+ dur = time.Duration(grpcrand.Int63n(int64(cur)))
+ cs.numRetriesSincePushback++
+ }
+
+ // TODO(dfawley): we could eagerly fail here if dur puts us past the
+ // deadline, but unsure if it is worth doing.
+ t := time.NewTimer(dur)
+ select {
+ case <-t.C:
+ cs.numRetries++
+ return nil
+ case <-cs.ctx.Done():
+ t.Stop()
+ return status.FromContextError(cs.ctx.Err()).Err()
+ }
+}
+
+// Returns nil if a retry was performed and succeeded; error otherwise.
+func (cs *clientStream) retryLocked(lastErr error) error {
+ for {
+ cs.attempt.finish(lastErr)
+ if err := cs.shouldRetry(lastErr); err != nil {
+ cs.commitAttemptLocked()
+ return err
+ }
+ if err := cs.newAttemptLocked(nil, traceInfo{}); err != nil {
+ return err
+ }
+ if lastErr = cs.replayBufferLocked(); lastErr == nil {
+ return nil
+ }
+ }
+}
+
func (cs *clientStream) Context() context.Context {
- return cs.s.Context()
+ cs.commitAttempt()
+ // No need to lock before using attempt, since we know it is committed and
+ // cannot change.
+ return cs.attempt.s.Context()
+}
+
+func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
+ cs.mu.Lock()
+ for {
+ if cs.committed {
+ cs.mu.Unlock()
+ return op(cs.attempt)
+ }
+ a := cs.attempt
+ cs.mu.Unlock()
+ err := op(a)
+ cs.mu.Lock()
+ if a != cs.attempt {
+ // We started another attempt already.
+ continue
+ }
+ if err == io.EOF {
+ <-a.s.Done()
+ }
+ if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
+ onSuccess()
+ cs.mu.Unlock()
+ return err
+ }
+ if err := cs.retryLocked(err); err != nil {
+ cs.mu.Unlock()
+ return err
+ }
+ }
}
func (cs *clientStream) Header() (metadata.MD, error) {
- m, err := cs.s.Header()
+ var m metadata.MD
+ err := cs.withRetry(func(a *csAttempt) error {
+ var err error
+ m, err = a.s.Header()
+ return toRPCErr(err)
+ }, cs.commitAttemptLocked)
if err != nil {
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
+ cs.finish(err)
+ return nil, err
+ }
+ if cs.binlog != nil && !cs.serverHeaderBinlogged {
+ // Only log if binary log is on and header has not been logged.
+ logEntry := &binarylog.ServerHeader{
+ OnClientSide: true,
+ Header: m,
+ PeerAddr: nil,
}
+ if peer, ok := peer.FromContext(cs.Context()); ok {
+ logEntry.PeerAddr = peer.Addr
+ }
+ cs.binlog.Log(logEntry)
+ cs.serverHeaderBinlogged = true
}
return m, err
}
func (cs *clientStream) Trailer() metadata.MD {
- return cs.s.Trailer()
+ // On RPC failure, we never need to retry, because usage requires that
+ // RecvMsg() returned a non-nil error before calling this function is valid.
+ // We would have retried earlier if necessary.
+ //
+ // Commit the attempt anyway, just in case users are not following those
+ // directions -- it will prevent races and should not meaningfully impact
+ // performance.
+ cs.commitAttempt()
+ if cs.attempt.s == nil {
+ return nil
+ }
+ return cs.attempt.s.Trailer()
}
-func (cs *clientStream) SendMsg(m interface{}) (err error) {
- if cs.tracing {
- cs.mu.Lock()
- if cs.trInfo.tr != nil {
- cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+func (cs *clientStream) replayBufferLocked() error {
+ a := cs.attempt
+ for _, f := range cs.buffer {
+ if err := f(a); err != nil {
+ return err
}
- cs.mu.Unlock()
}
- // TODO Investigate how to signal the stats handling party.
- // generate error stats if err != nil && err != io.EOF?
+ return nil
+}
+
+func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
+ // Note: we still will buffer if retry is disabled (for transparent retries).
+ if cs.committed {
+ return
+ }
+ cs.bufferSize += sz
+ if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
+ cs.commitAttemptLocked()
+ return
+ }
+ cs.buffer = append(cs.buffer, op)
+}
+
+func (cs *clientStream) SendMsg(m interface{}) (err error) {
defer func() {
- if err != nil {
+ if err != nil && err != io.EOF {
+ // Call finish on the client stream for errors generated by this SendMsg
+ // call, as these indicate problems created by this client. (Transport
+ // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
+ // error will be returned from RecvMsg eventually in that case, or be
+ // retried.)
cs.finish(err)
}
- if err == nil {
- return
- }
- if err == io.EOF {
- // Specialize the process for server streaming. SendMesg is only called
- // once when creating the stream object. io.EOF needs to be skipped when
- // the rpc is early finished (before the stream object is created.).
- // TODO: It is probably better to move this into the generated code.
- if !cs.desc.ClientStreams && cs.desc.ServerStreams {
- err = nil
- }
- return
- }
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
- }
- err = toRPCErr(err)
}()
- var outPayload *stats.OutPayload
- if cs.statsHandler != nil {
- outPayload = &stats.OutPayload{
- Client: true,
- }
+ if cs.sentLast {
+ return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
}
- out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload)
- defer func() {
- if cs.cbuf != nil {
- cs.cbuf.Reset()
- }
- }()
+ if !cs.desc.ClientStreams {
+ cs.sentLast = true
+ }
+ data, err := encode(cs.codec, m)
if err != nil {
return err
}
- if cs.c.maxSendMessageSize == nil {
- return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
+ compData, err := compress(data, cs.cp, cs.comp)
+ if err != nil {
+ return err
}
- if len(out) > *cs.c.maxSendMessageSize {
- return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), *cs.c.maxSendMessageSize)
+ hdr, payload := msgHeader(data, compData)
+ // TODO(dfawley): should we be checking len(data) instead?
+ if len(payload) > *cs.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
}
- err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
- if err == nil && outPayload != nil {
- outPayload.SentTime = time.Now()
- cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
+ msgBytes := data // Store the pointer before setting to nil. For binary logging.
+ op := func(a *csAttempt) error {
+ err := a.sendMsg(m, hdr, payload, data)
+ // nil out the message and uncomp when replaying; they are only needed for
+ // stats which is disabled for subsequent attempts.
+ m, data = nil, nil
+ return err
}
- return err
+ err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
+ if cs.binlog != nil && err == nil {
+ cs.binlog.Log(&binarylog.ClientMessage{
+ OnClientSide: true,
+ Message: msgBytes,
+ })
+ }
+ return
}
-func (cs *clientStream) RecvMsg(m interface{}) (err error) {
- var inPayload *stats.InPayload
- if cs.statsHandler != nil {
- inPayload = &stats.InPayload{
- Client: true,
- }
+func (cs *clientStream) RecvMsg(m interface{}) error {
+ if cs.binlog != nil && !cs.serverHeaderBinlogged {
+ // Call Header() to binary log header if it's not already logged.
+ cs.Header()
}
- if cs.c.maxReceiveMessageSize == nil {
- return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+ var recvInfo *payloadInfo
+ if cs.binlog != nil {
+ recvInfo = &payloadInfo{}
}
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload)
- defer func() {
- // err != nil indicates the termination of the stream.
- if err != nil {
- cs.finish(err)
+ err := cs.withRetry(func(a *csAttempt) error {
+ return a.recvMsg(m, recvInfo)
+ }, cs.commitAttemptLocked)
+ if cs.binlog != nil && err == nil {
+ cs.binlog.Log(&binarylog.ServerMessage{
+ OnClientSide: true,
+ Message: recvInfo.uncompressedBytes,
+ })
+ }
+ if err != nil || !cs.desc.ServerStreams {
+ // err != nil or non-server-streaming indicates end of stream.
+ cs.finish(err)
+
+ if cs.binlog != nil {
+ // finish will not log Trailer. Log Trailer here.
+ logEntry := &binarylog.ServerTrailer{
+ OnClientSide: true,
+ Trailer: cs.Trailer(),
+ Err: err,
+ }
+ if logEntry.Err == io.EOF {
+ logEntry.Err = nil
+ }
+ if peer, ok := peer.FromContext(cs.Context()); ok {
+ logEntry.PeerAddr = peer.Addr
+ }
+ cs.binlog.Log(logEntry)
}
- }()
+ }
+ return err
+}
+
+func (cs *clientStream) CloseSend() error {
+ if cs.sentLast {
+ // TODO: return an error and finish the stream instead, due to API misuse?
+ return nil
+ }
+ cs.sentLast = true
+ op := func(a *csAttempt) error {
+ a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
+ // Always return nil; io.EOF is the only error that might make sense
+ // instead, but there is no need to signal the client to call RecvMsg
+ // as the only use left for the stream after CloseSend is to call
+ // RecvMsg. This also matches historical behavior.
+ return nil
+ }
+ cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
+ if cs.binlog != nil {
+ cs.binlog.Log(&binarylog.ClientHalfClose{
+ OnClientSide: true,
+ })
+ }
+ // We never returned an error here for reasons.
+ return nil
+}
+
+func (cs *clientStream) finish(err error) {
+ if err == io.EOF {
+ // Ending a stream with EOF indicates a success.
+ err = nil
+ }
+ cs.mu.Lock()
+ if cs.finished {
+ cs.mu.Unlock()
+ return
+ }
+ cs.finished = true
+ cs.commitAttemptLocked()
+ cs.mu.Unlock()
+ // For binary logging. only log cancel in finish (could be caused by RPC ctx
+ // canceled or ClientConn closed). Trailer will be logged in RecvMsg.
+ //
+ // Only one of cancel or trailer needs to be logged. In the cases where
+ // users don't call RecvMsg, users must have already canceled the RPC.
+ if cs.binlog != nil && status.Code(err) == codes.Canceled {
+ cs.binlog.Log(&binarylog.Cancel{
+ OnClientSide: true,
+ })
+ }
if err == nil {
- if cs.tracing {
- cs.mu.Lock()
- if cs.trInfo.tr != nil {
- cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
- }
- cs.mu.Unlock()
+ cs.retryThrottler.successfulRPC()
+ }
+ if channelz.IsOn() {
+ if err != nil {
+ cs.cc.incrCallsFailed()
+ } else {
+ cs.cc.incrCallsSucceeded()
}
- if inPayload != nil {
- cs.statsHandler.HandleRPC(cs.statsCtx, inPayload)
+ }
+ if cs.attempt != nil {
+ cs.attempt.finish(err)
+ }
+ // after functions all rely upon having a stream.
+ if cs.attempt.s != nil {
+ for _, o := range cs.opts {
+ o.after(cs.callInfo)
}
- if !cs.desc.ClientStreams || cs.desc.ServerStreams {
- return
+ }
+ cs.cancel()
+}
+
+func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
+ cs := a.cs
+ if EnableTracing {
+ a.mu.Lock()
+ if a.trInfo.tr != nil {
+ a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
}
- // Special handling for client streaming rpc.
- // This recv expects EOF or errors, so we don't collect inPayload.
- if cs.c.maxReceiveMessageSize == nil {
- return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+ a.mu.Unlock()
+ }
+ if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
+ if !cs.desc.ClientStreams {
+ // For non-client-streaming RPCs, we return nil instead of EOF on error
+ // because the generated code requires it. finish is not called; RecvMsg()
+ // will call it with the stream's status independently.
+ return nil
}
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil)
- cs.closeTransportStream(err)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+ return io.EOF
+ }
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
+ }
+ if channelz.IsOn() {
+ a.t.IncrMsgSent()
+ }
+ return nil
+}
+
+func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
+ cs := a.cs
+ if a.statsHandler != nil && payInfo == nil {
+ payInfo = &payloadInfo{}
+ }
+
+ if !a.decompSet {
+ // Block until we receive headers containing received message encoding.
+ if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
+ if a.dc == nil || a.dc.Type() != ct {
+ // No configured decompressor, or it does not match the incoming
+ // message encoding; attempt to find a registered compressor that does.
+ a.dc = nil
+ a.decomp = encoding.GetCompressor(ct)
+ }
+ } else {
+ // No compression is used; disable our decompressor.
+ a.dc = nil
}
+ // Only initialize this state once per stream.
+ a.decompSet = true
+ }
+ err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
+ if err != nil {
if err == io.EOF {
- if se := cs.s.Status().Err(); se != nil {
- return se
+ if statusErr := a.s.Status().Err(); statusErr != nil {
+ return statusErr
}
- cs.finish(err)
- return nil
+ return io.EOF // indicates successful end of stream.
}
return toRPCErr(err)
}
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
+ if EnableTracing {
+ a.mu.Lock()
+ if a.trInfo.tr != nil {
+ a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+ }
+ a.mu.Unlock()
+ }
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{
+ Client: true,
+ RecvTime: time.Now(),
+ Payload: m,
+ // TODO truncate large payload.
+ Data: payInfo.uncompressedBytes,
+ Length: len(payInfo.uncompressedBytes),
+ })
+ }
+ if channelz.IsOn() {
+ a.t.IncrMsgRecv()
+ }
+ if cs.desc.ServerStreams {
+ // Subsequent messages should be received by subsequent RecvMsg calls.
+ return nil
+ }
+ // Special handling for non-server-stream rpcs.
+ // This recv expects EOF or errors, so we don't collect inPayload.
+ err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
+ if err == nil {
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
}
if err == io.EOF {
- if statusErr := cs.s.Status().Err(); statusErr != nil {
- return statusErr
- }
- // Returns io.EOF to indicate the end of the stream.
- return
+ return a.s.Status().Err() // non-server streaming Recv returns nil on success
}
return toRPCErr(err)
}
-func (cs *clientStream) CloseSend() (err error) {
- err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
+func (a *csAttempt) finish(err error) {
+ a.mu.Lock()
+ if a.finished {
+ a.mu.Unlock()
+ return
+ }
+ a.finished = true
+ if err == io.EOF {
+ // Ending a stream with EOF indicates a success.
+ err = nil
+ }
+ if a.s != nil {
+ a.t.CloseStream(a.s, err)
+ }
+
+ if a.done != nil {
+ br := false
+ var tr metadata.MD
+ if a.s != nil {
+ br = a.s.BytesReceived()
+ tr = a.s.Trailer()
+ }
+ a.done(balancer.DoneInfo{
+ Err: err,
+ Trailer: tr,
+ BytesSent: a.s != nil,
+ BytesReceived: br,
+ })
+ }
+ if a.statsHandler != nil {
+ end := &stats.End{
+ Client: true,
+ BeginTime: a.cs.beginTime,
+ EndTime: time.Now(),
+ Error: err,
+ }
+ a.statsHandler.HandleRPC(a.cs.ctx, end)
+ }
+ if a.trInfo.tr != nil {
+ if err == nil {
+ a.trInfo.tr.LazyPrintf("RPC: [OK]")
+ } else {
+ a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
+ a.trInfo.tr.SetError()
+ }
+ a.trInfo.tr.Finish()
+ a.trInfo.tr = nil
+ }
+ a.mu.Unlock()
+}
+
+func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, opts ...CallOption) (_ ClientStream, err error) {
+ ac.mu.Lock()
+ if ac.transport != t {
+ ac.mu.Unlock()
+ return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
+ }
+ // transition to CONNECTING state when an attempt starts
+ if ac.state != connectivity.Connecting {
+ ac.updateConnectivityState(connectivity.Connecting)
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+ }
+ ac.mu.Unlock()
+
+ if t == nil {
+ // TODO: return RPC error here?
+ return nil, errors.New("transport provided is nil")
+ }
+ // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
+ c := &callInfo{}
+
+ for _, o := range opts {
+ if err := o.before(c); err != nil {
+ return nil, toRPCErr(err)
+ }
+ }
+ c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+ c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
+
+ // Possible context leak:
+ // The cancel function for the child context we create will only be called
+ // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
+ // an error is generated by SendMsg.
+ // https://github.com/grpc/grpc-go/issues/1818.
+ ctx, cancel := context.WithCancel(ctx)
defer func() {
if err != nil {
- cs.finish(err)
+ cancel()
}
}()
- if err == nil || err == io.EOF {
- return nil
+
+ if err := setCallInfoCodec(c); err != nil {
+ return nil, err
}
- if _, ok := err.(transport.ConnectionError); !ok {
- cs.closeTransportStream(err)
+
+ callHdr := &transport.CallHdr{
+ Host: ac.cc.authority,
+ Method: method,
+ ContentSubtype: c.contentSubtype,
}
- err = toRPCErr(err)
- return
+
+ // Set our outgoing compression according to the UseCompressor CallOption, if
+ // set. In that case, also find the compressor from the encoding package.
+ // Otherwise, use the compressor configured by the WithCompressor DialOption,
+ // if set.
+ var cp Compressor
+ var comp encoding.Compressor
+ if ct := c.compressorType; ct != "" {
+ callHdr.SendCompress = ct
+ if ct != encoding.Identity {
+ comp = encoding.GetCompressor(ct)
+ if comp == nil {
+ return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
+ }
+ }
+ } else if ac.cc.dopts.cp != nil {
+ callHdr.SendCompress = ac.cc.dopts.cp.Type()
+ cp = ac.cc.dopts.cp
+ }
+ if c.creds != nil {
+ callHdr.Creds = c.creds
+ }
+
+ as := &addrConnStream{
+ callHdr: callHdr,
+ ac: ac,
+ ctx: ctx,
+ cancel: cancel,
+ opts: opts,
+ callInfo: c,
+ desc: desc,
+ codec: c.codec,
+ cp: cp,
+ comp: comp,
+ t: t,
+ }
+
+ as.callInfo.stream = as
+ s, err := as.t.NewStream(as.ctx, as.callHdr)
+ if err != nil {
+ err = toRPCErr(err)
+ return nil, err
+ }
+ as.s = s
+ as.p = &parser{r: s}
+ ac.incrCallsStarted()
+ if desc != unaryStreamDesc {
+ // Listen on cc and stream contexts to cleanup when the user closes the
+ // ClientConn or cancels the stream context. In all other cases, an error
+ // should already be injected into the recv buffer by the transport, which
+ // the client will eventually receive, and then we will cancel the stream's
+ // context in clientStream.finish.
+ go func() {
+ select {
+ case <-ac.ctx.Done():
+ as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
+ case <-ctx.Done():
+ as.finish(toRPCErr(ctx.Err()))
+ }
+ }()
+ }
+ return as, nil
}
-func (cs *clientStream) closeTransportStream(err error) {
- cs.mu.Lock()
- if cs.closed {
- cs.mu.Unlock()
- return
+type addrConnStream struct {
+ s *transport.Stream
+ ac *addrConn
+ callHdr *transport.CallHdr
+ cancel context.CancelFunc
+ opts []CallOption
+ callInfo *callInfo
+ t transport.ClientTransport
+ ctx context.Context
+ sentLast bool
+ desc *StreamDesc
+ codec baseCodec
+ cp Compressor
+ comp encoding.Compressor
+ decompSet bool
+ dc Decompressor
+ decomp encoding.Compressor
+ p *parser
+ done func(balancer.DoneInfo)
+ mu sync.Mutex
+ finished bool
+}
+
+func (as *addrConnStream) Header() (metadata.MD, error) {
+ m, err := as.s.Header()
+ if err != nil {
+ as.finish(toRPCErr(err))
}
- cs.closed = true
- cs.mu.Unlock()
- cs.t.CloseStream(cs.s, err)
+ return m, err
}
-func (cs *clientStream) finish(err error) {
- cs.mu.Lock()
- defer cs.mu.Unlock()
- if cs.finished {
- return
+func (as *addrConnStream) Trailer() metadata.MD {
+ return as.s.Trailer()
+}
+
+func (as *addrConnStream) CloseSend() error {
+ if as.sentLast {
+ // TODO: return an error and finish the stream instead, due to API misuse?
+ return nil
}
- cs.finished = true
+ as.sentLast = true
+
+ as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
+ // Always return nil; io.EOF is the only error that might make sense
+ // instead, but there is no need to signal the client to call RecvMsg
+ // as the only use left for the stream after CloseSend is to call
+ // RecvMsg. This also matches historical behavior.
+ return nil
+}
+
+func (as *addrConnStream) Context() context.Context {
+ return as.s.Context()
+}
+
+func (as *addrConnStream) SendMsg(m interface{}) (err error) {
defer func() {
- if cs.cancel != nil {
- cs.cancel()
+ if err != nil && err != io.EOF {
+ // Call finish on the client stream for errors generated by this SendMsg
+ // call, as these indicate problems created by this client. (Transport
+ // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
+ // error will be returned from RecvMsg eventually in that case, or be
+ // retried.)
+ as.finish(err)
}
}()
- for _, o := range cs.opts {
- o.after(&cs.c)
+ if as.sentLast {
+ return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
}
- if cs.put != nil {
- updateRPCInfoInContext(cs.s.Context(), rpcInfo{
- bytesSent: cs.s.BytesSent(),
- bytesReceived: cs.s.BytesReceived(),
- })
- cs.put()
- cs.put = nil
+ if !as.desc.ClientStreams {
+ as.sentLast = true
}
- if cs.statsHandler != nil {
- end := &stats.End{
- Client: true,
- EndTime: time.Now(),
- }
- if err != io.EOF {
- // end.Error is nil if the RPC finished successfully.
- end.Error = toRPCErr(err)
+ data, err := encode(as.codec, m)
+ if err != nil {
+ return err
+ }
+ compData, err := compress(data, as.cp, as.comp)
+ if err != nil {
+ return err
+ }
+ hdr, payld := msgHeader(data, compData)
+ // TODO(dfawley): should we be checking len(data) instead?
+ if len(payld) > *as.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
+ }
+
+ if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
+ if !as.desc.ClientStreams {
+ // For non-client-streaming RPCs, we return nil instead of EOF on error
+ // because the generated code requires it. finish is not called; RecvMsg()
+ // will call it with the stream's status independently.
+ return nil
}
- cs.statsHandler.HandleRPC(cs.statsCtx, end)
+ return io.EOF
}
- if !cs.tracing {
- return
+
+ if channelz.IsOn() {
+ as.t.IncrMsgSent()
}
- if cs.trInfo.tr != nil {
- if err == nil || err == io.EOF {
- cs.trInfo.tr.LazyPrintf("RPC: [OK]")
+ return nil
+}
+
+func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
+ defer func() {
+ if err != nil || !as.desc.ServerStreams {
+ // err != nil or non-server-streaming indicates end of stream.
+ as.finish(err)
+ }
+ }()
+
+ if !as.decompSet {
+ // Block until we receive headers containing received message encoding.
+ if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
+ if as.dc == nil || as.dc.Type() != ct {
+ // No configured decompressor, or it does not match the incoming
+ // message encoding; attempt to find a registered compressor that does.
+ as.dc = nil
+ as.decomp = encoding.GetCompressor(ct)
+ }
} else {
- cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
- cs.trInfo.tr.SetError()
+ // No compression is used; disable our decompressor.
+ as.dc = nil
}
- cs.trInfo.tr.Finish()
- cs.trInfo.tr = nil
+ // Only initialize this state once per stream.
+ as.decompSet = true
+ }
+ err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
+ if err != nil {
+ if err == io.EOF {
+ if statusErr := as.s.Status().Err(); statusErr != nil {
+ return statusErr
+ }
+ return io.EOF // indicates successful end of stream.
+ }
+ return toRPCErr(err)
+ }
+
+ if channelz.IsOn() {
+ as.t.IncrMsgRecv()
}
+ if as.desc.ServerStreams {
+ // Subsequent messages should be received by subsequent RecvMsg calls.
+ return nil
+ }
+
+ // Special handling for non-server-stream rpcs.
+ // This recv expects EOF or errors, so we don't collect inPayload.
+ err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
+ if err == nil {
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+ }
+ if err == io.EOF {
+ return as.s.Status().Err() // non-server streaming Recv returns nil on success
+ }
+ return toRPCErr(err)
}
-// ServerStream defines the interface a server stream has to satisfy.
+func (as *addrConnStream) finish(err error) {
+ as.mu.Lock()
+ if as.finished {
+ as.mu.Unlock()
+ return
+ }
+ as.finished = true
+ if err == io.EOF {
+ // Ending a stream with EOF indicates a success.
+ err = nil
+ }
+ if as.s != nil {
+ as.t.CloseStream(as.s, err)
+ }
+
+ if err != nil {
+ as.ac.incrCallsFailed()
+ } else {
+ as.ac.incrCallsSucceeded()
+ }
+ as.cancel()
+ as.mu.Unlock()
+}
+
+// ServerStream defines the server-side behavior of a streaming RPC.
+//
+// All errors returned from ServerStream methods are compatible with the
+// status package.
type ServerStream interface {
// SetHeader sets the header metadata. It may be called multiple times.
// When call multiple times, all the provided metadata will be merged.
// SetTrailer sets the trailer metadata which will be sent with the RPC status.
// When called more than once, all the provided metadata will be merged.
SetTrailer(metadata.MD)
- Stream
+ // Context returns the context for this stream.
+ Context() context.Context
+ // SendMsg sends a message. On error, SendMsg aborts the stream and the
+ // error is returned directly.
+ //
+ // SendMsg blocks until:
+ // - There is sufficient flow control to schedule m with the transport, or
+ // - The stream is done, or
+ // - The stream breaks.
+ //
+ // SendMsg does not wait until the message is received by the client. An
+ // untimely stream closure may result in lost messages.
+ //
+ // It is safe to have a goroutine calling SendMsg and another goroutine
+ // calling RecvMsg on the same stream at the same time, but it is not safe
+ // to call SendMsg on the same stream in different goroutines.
+ SendMsg(m interface{}) error
+ // RecvMsg blocks until it receives a message into m or the stream is
+ // done. It returns io.EOF when the client has performed a CloseSend. On
+ // any non-EOF error, the stream is aborted and the error contains the
+ // RPC status.
+ //
+ // It is safe to have a goroutine calling SendMsg and another goroutine
+ // calling RecvMsg on the same stream at the same time, but it is not
+ // safe to call RecvMsg on the same stream in different goroutines.
+ RecvMsg(m interface{}) error
}
// serverStream implements a server side Stream.
type serverStream struct {
- t transport.ServerTransport
- s *transport.Stream
- p *parser
- codec Codec
- cp Compressor
- dc Decompressor
- cbuf *bytes.Buffer
+ ctx context.Context
+ t transport.ServerTransport
+ s *transport.Stream
+ p *parser
+ codec baseCodec
+
+ cp Compressor
+ dc Decompressor
+ comp encoding.Compressor
+ decomp encoding.Compressor
+
maxReceiveMessageSize int
maxSendMessageSize int
trInfo *traceInfo
statsHandler stats.Handler
+ binlog *binarylog.MethodLogger
+ // serverHeaderBinlogged indicates whether server header has been logged. It
+ // will happen when one of the following two happens: stream.SendHeader(),
+ // stream.Send().
+ //
+ // It's only checked in send and sendHeader, doesn't need to be
+ // synchronized.
+ serverHeaderBinlogged bool
+
mu sync.Mutex // protects trInfo.tr after the service handler runs.
}
func (ss *serverStream) Context() context.Context {
- return ss.s.Context()
+ return ss.ctx
}
func (ss *serverStream) SetHeader(md metadata.MD) error {
}
func (ss *serverStream) SendHeader(md metadata.MD) error {
- return ss.t.WriteHeader(ss.s, md)
+ err := ss.t.WriteHeader(ss.s, md)
+ if ss.binlog != nil && !ss.serverHeaderBinlogged {
+ h, _ := ss.s.Header()
+ ss.binlog.Log(&binarylog.ServerHeader{
+ Header: h,
+ })
+ ss.serverHeaderBinlogged = true
+ }
+ return err
}
func (ss *serverStream) SetTrailer(md metadata.MD) {
return
}
ss.s.SetTrailer(md)
- return
}
func (ss *serverStream) SendMsg(m interface{}) (err error) {
}
ss.mu.Unlock()
}
- }()
- var outPayload *stats.OutPayload
- if ss.statsHandler != nil {
- outPayload = &stats.OutPayload{}
- }
- out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload)
- defer func() {
- if ss.cbuf != nil {
- ss.cbuf.Reset()
+ if err != nil && err != io.EOF {
+ st, _ := status.FromError(toRPCErr(err))
+ ss.t.WriteStatus(ss.s, st)
+ // Non-user specified status was sent out. This should be an error
+ // case (as a server side Cancel maybe).
+ //
+ // This is not handled specifically now. User will return a final
+ // status from the service handler, we will log that error instead.
+ // This behavior is similar to an interceptor.
+ }
+ if channelz.IsOn() && err == nil {
+ ss.t.IncrMsgSent()
}
}()
+ data, err := encode(ss.codec, m)
+ if err != nil {
+ return err
+ }
+ compData, err := compress(data, ss.cp, ss.comp)
if err != nil {
return err
}
- if len(out) > ss.maxSendMessageSize {
- return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(out), ss.maxSendMessageSize)
+ hdr, payload := msgHeader(data, compData)
+ // TODO(dfawley): should we be checking len(data) instead?
+ if len(payload) > ss.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
}
- if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil {
+ if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
- if outPayload != nil {
- outPayload.SentTime = time.Now()
- ss.statsHandler.HandleRPC(ss.s.Context(), outPayload)
+ if ss.binlog != nil {
+ if !ss.serverHeaderBinlogged {
+ h, _ := ss.s.Header()
+ ss.binlog.Log(&binarylog.ServerHeader{
+ Header: h,
+ })
+ ss.serverHeaderBinlogged = true
+ }
+ ss.binlog.Log(&binarylog.ServerMessage{
+ Message: data,
+ })
+ }
+ if ss.statsHandler != nil {
+ ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
}
return nil
}
}
ss.mu.Unlock()
}
+ if err != nil && err != io.EOF {
+ st, _ := status.FromError(toRPCErr(err))
+ ss.t.WriteStatus(ss.s, st)
+ // Non-user specified status was sent out. This should be an error
+ // case (as a server side Cancel maybe).
+ //
+ // This is not handled specifically now. User will return a final
+ // status from the service handler, we will log that error instead.
+ // This behavior is similar to an interceptor.
+ }
+ if channelz.IsOn() && err == nil {
+ ss.t.IncrMsgRecv()
+ }
}()
- var inPayload *stats.InPayload
- if ss.statsHandler != nil {
- inPayload = &stats.InPayload{}
+ var payInfo *payloadInfo
+ if ss.statsHandler != nil || ss.binlog != nil {
+ payInfo = &payloadInfo{}
}
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil {
+ if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
if err == io.EOF {
+ if ss.binlog != nil {
+ ss.binlog.Log(&binarylog.ClientHalfClose{})
+ }
return err
}
if err == io.ErrUnexpectedEOF {
- err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+ err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
}
return toRPCErr(err)
}
- if inPayload != nil {
- ss.statsHandler.HandleRPC(ss.s.Context(), inPayload)
+ if ss.statsHandler != nil {
+ ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
+ RecvTime: time.Now(),
+ Payload: m,
+ // TODO truncate large payload.
+ Data: payInfo.uncompressedBytes,
+ Length: len(payInfo.uncompressedBytes),
+ })
+ }
+ if ss.binlog != nil {
+ ss.binlog.Log(&binarylog.ClientMessage{
+ Message: payInfo.uncompressedBytes,
+ })
}
return nil
}
+
+// MethodFromServerStream returns the method string for the input stream.
+// The returned string is in the format of "/service/method".
+func MethodFromServerStream(stream ServerStream) (string, bool) {
+ return Method(stream.Context())
+}