[chore] Update a bunch of database dependencies (#1772)

* [chore] Update a bunch of database dependencies

* fix lil thing
This commit is contained in:
tobi
2023-05-12 14:33:40 +02:00
committed by GitHub
parent 66df974143
commit ec325fee14
402 changed files with 35068 additions and 35401 deletions

View File

@@ -20,6 +20,19 @@ How to get your contributions merged smoothly and quickly.
both author's & review's time is wasted. Create more PRs to address different
concerns and everyone will be happy.
- For speculative changes, consider opening an issue and discussing it first. If
you are suggesting a behavioral or API change, consider starting with a [gRFC
proposal](https://github.com/grpc/proposal).
- If you are searching for features to work on, issues labeled [Status: Help
Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22)
is a great place to start. These issues are well-documented and usually can be
resolved with a single pull request.
- If you are adding a new file, make sure it has the copyright message template
at the top as a comment. You can copy over the message from an existing file
and update the year.
- The grpc package should only depend on standard Go packages and a small number
of exceptions. If your contribution introduces new dependencies which are NOT
in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
@@ -32,14 +45,18 @@ How to get your contributions merged smoothly and quickly.
- Provide a good **PR description** as a record of **what** change is being made
and **why** it was made. Link to a github issue if it exists.
- Don't fix code style and formatting unless you are already changing that line
to address an issue. PRs with irrelevant changes won't be merged. If you do
want to fix formatting or style, do that in a separate PR.
- If you want to fix formatting or style, consider whether your changes are an
obvious improvement or might be considered a personal preference. If a style
change is based on preference, it likely will not be accepted. If it corrects
widely agreed-upon anti-patterns, then please do create a PR and explain the
benefits of the change.
- Unless your PR is trivial, you should expect there will be reviewer comments
that you'll need to address before merging. We expect you to be reasonably
responsive to those comments, otherwise the PR will be closed after 2-3 weeks
of inactivity.
that you'll need to address before merging. We'll mark it as `Status: Requires
Reporter Clarification` if we expect you to respond to these comments in a
timely manner. If the PR remains inactive for 6 days, it will be marked as
`stale` and automatically close 7 days after that if we don't hear back from
you.
- Maintain **clean commit history** and use **meaningful commit messages**. PRs
with messy commit history are difficult to review and won't be merged. Use

View File

@@ -19,7 +19,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.14.0
// protoc v4.22.0
// source: grpc/binlog/v1/binarylog.proto
package grpc_binarylog_v1

View File

@@ -146,8 +146,18 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
cc.ctx, cc.cancel = context.WithCancel(context.Background())
for _, opt := range extraDialOptions {
opt.apply(&cc.dopts)
disableGlobalOpts := false
for _, opt := range opts {
if _, ok := opt.(*disableGlobalDialOptions); ok {
disableGlobalOpts = true
break
}
}
if !disableGlobalOpts {
for _, opt := range globalDialOptions {
opt.apply(&cc.dopts)
}
}
for _, opt := range opts {
@@ -1103,7 +1113,11 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
return
}
ac.state = s
channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
if lastErr == nil {
channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
} else {
channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
}
ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
}
@@ -1527,6 +1541,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
// referenced by users.
var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
// getResolver finds the scheme in the cc's resolvers or the global registry.
// scheme should always be lowercase (typically by virtue of url.Parse()
// performing proper RFC3986 behavior).
func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
for _, rb := range cc.dopts.resolvers {
if scheme == rb.Scheme() {

View File

@@ -18,7 +18,15 @@
package codes
import "strconv"
import (
"strconv"
"google.golang.org/grpc/internal"
)
func init() {
internal.CanonicalString = canonicalString
}
func (c Code) String() string {
switch c {
@@ -60,3 +68,44 @@ func (c Code) String() string {
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
}
}
func canonicalString(c Code) string {
switch c {
case OK:
return "OK"
case Canceled:
return "CANCELLED"
case Unknown:
return "UNKNOWN"
case InvalidArgument:
return "INVALID_ARGUMENT"
case DeadlineExceeded:
return "DEADLINE_EXCEEDED"
case NotFound:
return "NOT_FOUND"
case AlreadyExists:
return "ALREADY_EXISTS"
case PermissionDenied:
return "PERMISSION_DENIED"
case ResourceExhausted:
return "RESOURCE_EXHAUSTED"
case FailedPrecondition:
return "FAILED_PRECONDITION"
case Aborted:
return "ABORTED"
case OutOfRange:
return "OUT_OF_RANGE"
case Unimplemented:
return "UNIMPLEMENTED"
case Internal:
return "INTERNAL"
case Unavailable:
return "UNAVAILABLE"
case DataLoss:
return "DATA_LOSS"
case Unauthenticated:
return "UNAUTHENTICATED"
default:
return "CODE(" + strconv.FormatInt(int64(c), 10) + ")"
}
}

View File

@@ -38,13 +38,14 @@ import (
func init() {
internal.AddGlobalDialOptions = func(opt ...DialOption) {
extraDialOptions = append(extraDialOptions, opt...)
globalDialOptions = append(globalDialOptions, opt...)
}
internal.ClearGlobalDialOptions = func() {
extraDialOptions = nil
globalDialOptions = nil
}
internal.WithBinaryLogger = withBinaryLogger
internal.JoinDialOptions = newJoinDialOption
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
}
// dialOptions configure a Dial call. dialOptions are set by the DialOption
@@ -83,7 +84,7 @@ type DialOption interface {
apply(*dialOptions)
}
var extraDialOptions []DialOption
var globalDialOptions []DialOption
// EmptyDialOption does not alter the dial configuration. It can be embedded in
// another structure to build custom dial options.
@@ -96,6 +97,16 @@ type EmptyDialOption struct{}
func (EmptyDialOption) apply(*dialOptions) {}
type disableGlobalDialOptions struct{}
func (disableGlobalDialOptions) apply(*dialOptions) {}
// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn
// from applying the global DialOptions (set via AddGlobalDialOptions).
func newDisableGlobalDialOptions() DialOption {
return &disableGlobalDialOptions{}
}
// funcDialOption wraps a function that modifies dialOptions into an
// implementation of the DialOption interface.
type funcDialOption struct {

View File

@@ -28,8 +28,10 @@ import (
"google.golang.org/grpc/internal/grpcutil"
)
// Logger is the global binary logger. It can be used to get binary logger for
// each method.
var grpclogLogger = grpclog.Component("binarylog")
// Logger specifies MethodLoggers for method names with a Log call that
// takes a context.
type Logger interface {
GetMethodLogger(methodName string) MethodLogger
}
@@ -40,8 +42,6 @@ type Logger interface {
// It is used to get a MethodLogger for each individual method.
var binLogger Logger
var grpclogLogger = grpclog.Component("binarylog")
// SetLogger sets the binary logger.
//
// Only call this at init time.

View File

@@ -19,6 +19,7 @@
package binarylog
import (
"context"
"net"
"strings"
"sync/atomic"
@@ -49,7 +50,7 @@ var idGen callIDGenerator
// MethodLogger is the sub-logger for each method.
type MethodLogger interface {
Log(LogEntryConfig)
Log(context.Context, LogEntryConfig)
}
// TruncatingMethodLogger is a method logger that truncates headers and messages
@@ -98,7 +99,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry
}
// Log creates a proto binary log entry, and logs it to the sink.
func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) {
func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
ml.sink.Write(ml.Build(c))
}

View File

@@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
// Debugf does info logging at verbose level 2.
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
// rewrite PrefixLogger a little to ensure that we don't use the global
// `Logger` here, and instead use the `logger` field.
if !Logger.V(2) {
return
}
@@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
return
}
InfoDepth(1, fmt.Sprintf(format, args...))
}
// V reports whether verbosity level l is at least the requested verbose level.
func (pl *PrefixLogger) V(l int) bool {
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
// rewrite PrefixLogger a little to ensure that we don't use the global
// `Logger` here, and instead use the `logger` field.
return Logger.V(l)
}
// NewPrefixLogger creates a prefix logger with the given prefix.

View File

@@ -58,6 +58,9 @@ var (
// gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go.
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
// CanonicalString returns the canonical string of the code defined here:
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
CanonicalString interface{} // func (codes.Code) string
// DrainServerTransports initiates a graceful close of existing connections
// on a gRPC server accepted on the provided listener address. An
// xDS-enabled server invokes this method on a grpc.Server when a particular
@@ -74,6 +77,10 @@ var (
// globally for newly created client channels. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
AddGlobalDialOptions interface{} // func(opt ...DialOption)
// DisableGlobalDialOptions returns a DialOption that prevents the
// ClientConn from applying the global DialOptions (set via
// AddGlobalDialOptions).
DisableGlobalDialOptions interface{} // func() grpc.DialOption
// ClearGlobalDialOptions clears the array of extra DialOption. This
// method is useful in testing and benchmarking.
ClearGlobalDialOptions func()
@@ -130,6 +137,9 @@ var (
//
// TODO: Remove this function once the RBAC env var is removed.
UnregisterRBACHTTPFilterForTesting func()
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions)
)
// HealthChecker defines the signature of the client-side LB channel health checking function.

View File

@@ -76,33 +76,11 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address {
return addr
}
// Validate returns an error if the input md contains invalid keys or values.
//
// If the header is not a pseudo-header, the following items are checked:
// - header names must contain one or more characters from this set [0-9 a-z _ - .].
// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
// Validate validates every pair in md with ValidatePair.
func Validate(md metadata.MD) error {
for k, vals := range md {
// pseudo-header will be ignored
if k[0] == ':' {
continue
}
// check key, for i that saving a conversion if not using for range
for i := 0; i < len(k); i++ {
r := k[i]
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
}
}
if strings.HasSuffix(k, "-bin") {
continue
}
// check value
for _, val := range vals {
if hasNotPrintable(val) {
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
}
if err := ValidatePair(k, vals...); err != nil {
return err
}
}
return nil
@@ -118,3 +96,37 @@ func hasNotPrintable(msg string) bool {
}
return false
}
// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) :
//
// - key must contain one or more characters.
// - the characters in the key must be contained in [0-9 a-z _ - .].
// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed.
// - the characters in the every value must be printable (in [%x20-%x7E]).
func ValidatePair(key string, vals ...string) error {
// key should not be empty
if key == "" {
return fmt.Errorf("there is an empty key in the header")
}
// pseudo-header will be ignored
if key[0] == ':' {
return nil
}
// check key, for i that saving a conversion if not using for range
for i := 0; i < len(key); i++ {
r := key[i]
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
}
}
if strings.HasSuffix(key, "-bin") {
return nil
}
// check value
for _, val := range vals {
if hasNotPrintable(val) {
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key)
}
}
return nil
}

View File

@@ -22,6 +22,7 @@ import (
"bytes"
"errors"
"fmt"
"net"
"runtime"
"strconv"
"sync"
@@ -486,12 +487,13 @@ type loopyWriter struct {
hEnc *hpack.Encoder // HPACK encoder.
bdpEst *bdpEstimator
draining bool
conn net.Conn
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
}
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
side: s,
@@ -504,6 +506,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
bdpEst: bdpEst,
conn: conn,
}
return l
}
@@ -521,15 +524,27 @@ const minBatchSize = 1000
// 2. Stream level flow control quota available.
//
// In each iteration of run loop, other than processing the incoming control
// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
// This results in writing of HTTP2 frames into an underlying write buffer.
// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
// if the batch size is too low to give stream goroutines a chance to fill it up.
// frame, loopy calls processData, which processes one node from the
// activeStreams linked-list. This results in writing of HTTP2 frames into an
// underlying write buffer. When there's no more control frames to read from
// controlBuf, loopy flushes the write buffer. As an optimization, to increase
// the batch size for each flush, loopy yields the processor, once if the batch
// size is too low to give stream goroutines a chance to fill it up.
//
// Upon exiting, if the error causing the exit is not an I/O error, run()
// flushes and closes the underlying connection. Otherwise, the connection is
// left open to allow the I/O error to be encountered by the reader instead.
func (l *loopyWriter) run() (err error) {
// Always flush the writer before exiting in case there are pending frames
// to be sent.
defer l.framer.writer.Flush()
defer func() {
if logger.V(logLevel) {
logger.Infof("transport: loopyWriter exiting with error: %v", err)
}
if !isIOError(err) {
l.framer.writer.Flush()
l.conn.Close()
}
l.cbuf.finish()
}()
for {
it, err := l.cbuf.get(true)
if err != nil {
@@ -581,11 +596,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
}
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) {
// Otherwise update the quota.
if w.streamID == 0 {
l.sendQuota += w.increment
return nil
return
}
// Find the stream and update it.
if str, ok := l.estdStreams[w.streamID]; ok {
@@ -593,10 +608,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
str.state = active
l.activeStreams.enqueue(str)
return nil
return
}
}
return nil
}
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
@@ -604,13 +618,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
}
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
if err := l.applySettings(s.ss); err != nil {
return err
}
l.applySettings(s.ss)
return l.framer.fr.WriteSettingsAck()
}
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
func (l *loopyWriter) registerStreamHandler(h *registerStream) {
str := &outStream{
id: h.streamID,
state: empty,
@@ -618,7 +630,6 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
wq: h.wq,
}
l.estdStreams[h.streamID] = str
return nil
}
func (l *loopyWriter) headerHandler(h *headerFrame) error {
@@ -720,10 +731,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
return nil
}
func (l *loopyWriter) preprocessData(df *dataFrame) error {
func (l *loopyWriter) preprocessData(df *dataFrame) {
str, ok := l.estdStreams[df.streamID]
if !ok {
return nil
return
}
// If we got data for a stream it means that
// stream was originated and the headers were sent out.
@@ -732,7 +743,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error {
str.state = active
l.activeStreams.enqueue(str)
}
return nil
}
func (l *loopyWriter) pingHandler(p *ping) error {
@@ -743,9 +753,8 @@ func (l *loopyWriter) pingHandler(p *ping) error {
}
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) {
o.resp <- l.sendQuota
return nil
}
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
@@ -763,6 +772,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
}
}
if l.draining && len(l.estdStreams) == 0 {
// Flush and close the connection; we are done with it.
return errors.New("finished processing active streams while in draining mode")
}
return nil
@@ -798,6 +808,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
if l.side == clientSide {
l.draining = true
if len(l.estdStreams) == 0 {
// Flush and close the connection; we are done with it.
return errors.New("received GOAWAY with no active streams")
}
}
@@ -816,17 +827,10 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
return nil
}
func (l *loopyWriter) closeConnectionHandler() error {
// Exit loopyWriter entirely by returning an error here. This will lead to
// the transport closing the connection, and, ultimately, transport
// closure.
return ErrConnClosing
}
func (l *loopyWriter) handle(i interface{}) error {
switch i := i.(type) {
case *incomingWindowUpdate:
return l.incomingWindowUpdateHandler(i)
l.incomingWindowUpdateHandler(i)
case *outgoingWindowUpdate:
return l.outgoingWindowUpdateHandler(i)
case *incomingSettings:
@@ -836,7 +840,7 @@ func (l *loopyWriter) handle(i interface{}) error {
case *headerFrame:
return l.headerHandler(i)
case *registerStream:
return l.registerStreamHandler(i)
l.registerStreamHandler(i)
case *cleanupStream:
return l.cleanupStreamHandler(i)
case *earlyAbortStream:
@@ -844,21 +848,24 @@ func (l *loopyWriter) handle(i interface{}) error {
case *incomingGoAway:
return l.incomingGoAwayHandler(i)
case *dataFrame:
return l.preprocessData(i)
l.preprocessData(i)
case *ping:
return l.pingHandler(i)
case *goAway:
return l.goAwayHandler(i)
case *outFlowControlSizeRequest:
return l.outFlowControlSizeRequestHandler(i)
l.outFlowControlSizeRequestHandler(i)
case closeConnection:
return l.closeConnectionHandler()
// Just return a non-I/O error and run() will flush and close the
// connection.
return ErrConnClosing
default:
return fmt.Errorf("transport: unknown control message type %T", i)
}
return nil
}
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
func (l *loopyWriter) applySettings(ss []http2.Setting) {
for _, s := range ss {
switch s.ID {
case http2.SettingInitialWindowSize:
@@ -877,7 +884,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error {
updateHeaderTblSize(l.hEnc, s.Val)
}
}
return nil
}
// processData removes the first stream from active streams, writes out at most 16KB
@@ -911,7 +917,7 @@ func (l *loopyWriter) processData() (bool, error) {
return false, err
}
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
return false, nil
return false, err
}
} else {
l.activeStreams.enqueue(str)

View File

@@ -444,15 +444,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
return nil, err
}
go func() {
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
err := t.loopy.run()
if logger.V(logLevel) {
logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
}
// Do not close the transport. Let reader goroutine handle it since
// there might be data in the buffers.
t.conn.Close()
t.controlBuf.finish()
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn)
t.loopy.run()
close(t.writerDone)
}()
return t, nil
@@ -1264,10 +1257,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.mu.Unlock()
return
}
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
if logger.V(logLevel) {
logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
}
if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
// data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is
// enabled by default and double the configure KEEPALIVE_TIME used for new connections
// on that channel.
logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".")
}
id := f.LastStreamID
if id > 0 && id%2 == 0 {

View File

@@ -331,14 +331,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.handleSettings(sf)
go func() {
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
err := t.loopy.run()
if logger.V(logLevel) {
logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
}
t.conn.Close()
t.controlBuf.finish()
t.loopy.run()
close(t.writerDone)
}()
go t.keepalive()
@@ -383,7 +378,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
// if false, content-type was missing or invalid
isGRPC = false
contentType = ""
mdata = make(map[string][]string)
mdata = make(metadata.MD, len(frame.Fields))
httpMethod string
// these are set if an error is encountered while parsing the headers
protocolError bool
@@ -404,6 +399,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
s.contentSubtype = contentSubtype
isGRPC = true
case "grpc-accept-encoding":
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
if hf.Value == "" {
continue
}
compressors := hf.Value
if s.clientAdvertisedCompressors != "" {
compressors = s.clientAdvertisedCompressors + "," + compressors
}
s.clientAdvertisedCompressors = compressors
case "grpc-encoding":
s.recvCompress = hf.Value
case ":method":
@@ -595,7 +601,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
LocalAddr: t.localAddr,
Compression: s.recvCompress,
WireLength: int(frame.Header().Length),
Header: metadata.MD(mdata).Copy(),
Header: mdata.Copy(),
}
sh.HandleRPC(s.ctx, inHeader)
}
@@ -1344,9 +1350,6 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
return false, err
}
if retErr != nil {
// Abruptly close the connection following the GoAway (via
// loopywriter). But flush out what's inside the buffer first.
t.framer.writer.Flush()
return false, retErr
}
return true, nil

View File

@@ -21,6 +21,7 @@ package transport
import (
"bufio"
"encoding/base64"
"errors"
"fmt"
"io"
"math"
@@ -330,7 +331,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) {
return 0, w.err
}
if w.batchSize == 0 { // Buffer has been disabled.
return w.conn.Write(b)
n, err = w.conn.Write(b)
return n, toIOError(err)
}
for len(b) > 0 {
nn := copy(w.buf[w.offset:], b)
@@ -352,10 +354,30 @@ func (w *bufWriter) Flush() error {
return nil
}
_, w.err = w.conn.Write(w.buf[:w.offset])
w.err = toIOError(w.err)
w.offset = 0
return w.err
}
type ioError struct {
error
}
func (i ioError) Unwrap() error {
return i.error
}
func isIOError(err error) bool {
return errors.As(err, &ioError{})
}
func toIOError(err error) error {
if err == nil {
return nil
}
return ioError{error: err}
}
type framer struct {
writer *bufWriter
fr *http2.Framer

View File

@@ -257,6 +257,9 @@ type Stream struct {
fc *inFlow
wq *writeQuota
// Holds compressor names passed in grpc-accept-encoding metadata from the
// client. This is empty for the client side stream.
clientAdvertisedCompressors string
// Callback to state application's intentions to read data. This
// is used to adjust flow control, if needed.
requestRead func(int)
@@ -345,8 +348,24 @@ func (s *Stream) RecvCompress() string {
}
// SetSendCompress sets the compression algorithm to the stream.
func (s *Stream) SetSendCompress(str string) {
s.sendCompress = str
func (s *Stream) SetSendCompress(name string) error {
if s.isHeaderSent() || s.getState() == streamDone {
return errors.New("transport: set send compressor called after headers sent or stream done")
}
s.sendCompress = name
return nil
}
// SendCompress returns the send compressor name.
func (s *Stream) SendCompress() string {
return s.sendCompress
}
// ClientAdvertisedCompressors returns the compressor names advertised by the
// client via grpc-accept-encoding header.
func (s *Stream) ClientAdvertisedCompressors() string {
return s.clientAdvertisedCompressors
}
// Done returns a channel which is closed when it receives the final status

View File

@@ -91,7 +91,11 @@ func (md MD) Len() int {
// Copy returns a copy of md.
func (md MD) Copy() MD {
return Join(md)
out := make(MD, len(md))
for k, v := range md {
out[k] = copyOf(v)
}
return out
}
// Get obtains the values for a given key.
@@ -171,8 +175,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context
md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
added := make([][]string, len(md.added)+1)
copy(added, md.added)
added[len(added)-1] = make([]string, len(kv))
copy(added[len(added)-1], kv)
kvCopy := make([]string, 0, len(kv))
for i := 0; i < len(kv); i += 2 {
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
}
added[len(added)-1] = kvCopy
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
}

View File

@@ -41,8 +41,9 @@ var (
// TODO(bar) install dns resolver in init(){}.
// Register registers the resolver builder to the resolver map. b.Scheme will be
// used as the scheme registered with this builder.
// Register registers the resolver builder to the resolver map. b.Scheme will
// be used as the scheme registered with this builder. The registry is case
// sensitive, and schemes should not contain any uppercase characters.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple Resolvers are
@@ -203,6 +204,15 @@ type State struct {
// gRPC to add new methods to this interface.
type ClientConn interface {
// UpdateState updates the state of the ClientConn appropriately.
//
// If an error is returned, the resolver should try to resolve the
// target again. The resolver should use a backoff timer to prevent
// overloading the server with requests. If a resolver is certain that
// reresolving will not change the result, e.g. because it is
// a watch-based resolver, returned errors can be ignored.
//
// If the resolved State is the same as the last reported one, calling
// UpdateState can be omitted.
UpdateState(State) error
// ReportError notifies the ClientConn that the Resolver encountered an
// error. The ClientConn will notify the load balancer and begin calling
@@ -280,8 +290,10 @@ type Builder interface {
// gRPC dial calls Build synchronously, and fails if the returned error is
// not nil.
Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
// Scheme returns the scheme supported by this resolver.
// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
// Scheme returns the scheme supported by this resolver. Scheme is defined
// at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned
// string should not contain uppercase characters, as they will not match
// the parsed target's scheme as defined in RFC 3986.
Scheme() string
}

View File

@@ -159,6 +159,7 @@ type callInfo struct {
contentSubtype string
codec baseCodec
maxRetryRPCBufferSize int
onFinish []func(err error)
}
func defaultCallInfo() *callInfo {
@@ -295,6 +296,41 @@ func (o FailFastCallOption) before(c *callInfo) error {
}
func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
// OnFinish returns a CallOption that configures a callback to be called when
// the call completes. The error passed to the callback is the status of the
// RPC, and may be nil. The onFinish callback provided will only be called once
// by gRPC. This is mainly used to be used by streaming interceptors, to be
// notified when the RPC completes along with information about the status of
// the RPC.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func OnFinish(onFinish func(err error)) CallOption {
return OnFinishCallOption{
OnFinish: onFinish,
}
}
// OnFinishCallOption is CallOption that indicates a callback to be called when
// the call completes.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type OnFinishCallOption struct {
OnFinish func(error)
}
func (o OnFinishCallOption) before(c *callInfo) error {
c.onFinish = append(c.onFinish, o.OnFinish)
return nil
}
func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can receive. If this is not set, gRPC uses the default
// 4MB.
@@ -658,12 +694,13 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
return &stats.OutPayload{
Client: client,
Payload: msg,
Data: data,
Length: len(data),
WireLength: len(payload) + headerLen,
SentTime: t,
Client: client,
Payload: msg,
Data: data,
Length: len(data),
WireLength: len(payload) + headerLen,
CompressedLength: len(payload),
SentTime: t,
}
}
@@ -684,7 +721,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
}
type payloadInfo struct {
wireLength int // The compressed length got from wire.
compressedLength int // The compressed length got from wire.
uncompressedBytes []byte
}
@@ -694,7 +731,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
return nil, err
}
if payInfo != nil {
payInfo.wireLength = len(d)
payInfo.compressedLength = len(d)
}
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {

View File

@@ -45,6 +45,7 @@ import (
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
@@ -74,10 +75,10 @@ func init() {
srv.drainServerTransports(addr)
}
internal.AddGlobalServerOptions = func(opt ...ServerOption) {
extraServerOptions = append(extraServerOptions, opt...)
globalServerOptions = append(globalServerOptions, opt...)
}
internal.ClearGlobalServerOptions = func() {
extraServerOptions = nil
globalServerOptions = nil
}
internal.BinaryLogger = binaryLogger
internal.JoinServerOptions = newJoinServerOption
@@ -183,7 +184,7 @@ var defaultServerOptions = serverOptions{
writeBufferSize: defaultWriteBufSize,
readBufferSize: defaultReadBufSize,
}
var extraServerOptions []ServerOption
var globalServerOptions []ServerOption
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
type ServerOption interface {
@@ -600,7 +601,7 @@ func (s *Server) stopServerWorkers() {
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
opts := defaultServerOptions
for _, o := range extraServerOptions {
for _, o := range globalServerOptions {
o.apply(&opts)
}
for _, o := range opt {
@@ -1252,7 +1253,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
logEntry.PeerAddr = peer.Addr
}
for _, binlog := range binlogs {
binlog.Log(logEntry)
binlog.Log(ctx, logEntry)
}
}
@@ -1263,6 +1264,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
var comp, decomp encoding.Compressor
var cp Compressor
var dc Decompressor
var sendCompressorName string
// If dc is set and matches the stream's compression, use it. Otherwise, try
// to find a matching registered compressor for decomp.
@@ -1283,12 +1285,18 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
if s.opts.cp != nil {
cp = s.opts.cp
stream.SetSendCompress(cp.Type())
sendCompressorName = cp.Type()
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
// Legacy compressor not specified; attempt to respond with same encoding.
comp = encoding.GetCompressor(rc)
if comp != nil {
stream.SetSendCompress(rc)
sendCompressorName = comp.Name()
}
}
if sendCompressorName != "" {
if err := stream.SetSendCompress(sendCompressorName); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
}
}
@@ -1312,11 +1320,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
for _, sh := range shs {
sh.HandleRPC(stream.Context(), &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
WireLength: payInfo.wireLength + headerLen,
Data: d,
Length: len(d),
RecvTime: time.Now(),
Payload: v,
Length: len(d),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
Data: d,
})
}
if len(binlogs) != 0 {
@@ -1324,7 +1333,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Message: d,
}
for _, binlog := range binlogs {
binlog.Log(cm)
binlog.Log(stream.Context(), cm)
}
}
if trInfo != nil {
@@ -1357,7 +1366,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Header: h,
}
for _, binlog := range binlogs {
binlog.Log(sh)
binlog.Log(stream.Context(), sh)
}
}
st := &binarylog.ServerTrailer{
@@ -1365,7 +1374,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Err: appErr,
}
for _, binlog := range binlogs {
binlog.Log(st)
binlog.Log(stream.Context(), st)
}
}
return appErr
@@ -1375,6 +1384,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
opts := &transport.Options{Last: true}
// Server handler could have set new compressor by calling SetSendCompressor.
// In case it is set, we need to use it for compressing outbound message.
if stream.SendCompress() != sendCompressorName {
comp = encoding.GetCompressor(stream.SendCompress())
}
if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
if err == io.EOF {
// The entire stream is done (for unary RPC only).
@@ -1402,8 +1416,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Err: appErr,
}
for _, binlog := range binlogs {
binlog.Log(sh)
binlog.Log(st)
binlog.Log(stream.Context(), sh)
binlog.Log(stream.Context(), st)
}
}
return err
@@ -1417,8 +1431,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
Message: reply,
}
for _, binlog := range binlogs {
binlog.Log(sh)
binlog.Log(sm)
binlog.Log(stream.Context(), sh)
binlog.Log(stream.Context(), sm)
}
}
if channelz.IsOn() {
@@ -1430,17 +1444,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
// TODO: Should we be logging if writing status failed here, like above?
// Should the logging be in WriteStatus? Should we ignore the WriteStatus
// error or allow the stats handler to see it?
err = t.WriteStatus(stream, statusOK)
if len(binlogs) != 0 {
st := &binarylog.ServerTrailer{
Trailer: stream.Trailer(),
Err: appErr,
}
for _, binlog := range binlogs {
binlog.Log(st)
binlog.Log(stream.Context(), st)
}
}
return err
return t.WriteStatus(stream, statusOK)
}
// chainStreamServerInterceptors chains all stream server interceptors into one.
@@ -1574,7 +1587,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
logEntry.PeerAddr = peer.Addr
}
for _, binlog := range ss.binlogs {
binlog.Log(logEntry)
binlog.Log(stream.Context(), logEntry)
}
}
@@ -1597,12 +1610,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
if s.opts.cp != nil {
ss.cp = s.opts.cp
stream.SetSendCompress(s.opts.cp.Type())
ss.sendCompressorName = s.opts.cp.Type()
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
// Legacy compressor not specified; attempt to respond with same encoding.
ss.comp = encoding.GetCompressor(rc)
if ss.comp != nil {
stream.SetSendCompress(rc)
ss.sendCompressorName = rc
}
}
if ss.sendCompressorName != "" {
if err := stream.SetSendCompress(ss.sendCompressorName); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
}
}
@@ -1640,16 +1659,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
ss.trInfo.tr.SetError()
ss.mu.Unlock()
}
t.WriteStatus(ss.s, appStatus)
if len(ss.binlogs) != 0 {
st := &binarylog.ServerTrailer{
Trailer: ss.s.Trailer(),
Err: appErr,
}
for _, binlog := range ss.binlogs {
binlog.Log(st)
binlog.Log(stream.Context(), st)
}
}
t.WriteStatus(ss.s, appStatus)
// TODO: Should we log an error from WriteStatus here and below?
return appErr
}
@@ -1658,17 +1677,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
ss.trInfo.tr.LazyLog(stringer("OK"), false)
ss.mu.Unlock()
}
err = t.WriteStatus(ss.s, statusOK)
if len(ss.binlogs) != 0 {
st := &binarylog.ServerTrailer{
Trailer: ss.s.Trailer(),
Err: appErr,
}
for _, binlog := range ss.binlogs {
binlog.Log(st)
binlog.Log(stream.Context(), st)
}
}
return err
return t.WriteStatus(ss.s, statusOK)
}
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
@@ -1935,6 +1953,60 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
return nil
}
// SetSendCompressor sets a compressor for outbound messages from the server.
// It must not be called after any event that causes headers to be sent
// (see ServerStream.SetHeader for the complete list). Provided compressor is
// used when below conditions are met:
//
// - compressor is registered via encoding.RegisterCompressor
// - compressor name must exist in the client advertised compressor names
// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to
// get client supported compressor names.
//
// The context provided must be the context passed to the server's handler.
// It must be noted that compressor name encoding.Identity disables the
// outbound compression.
// By default, server messages will be sent using the same compressor with
// which request messages were sent.
//
// It is not safe to call SetSendCompressor concurrently with SendHeader and
// SendMsg.
//
// # Experimental
//
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
// later release.
func SetSendCompressor(ctx context.Context, name string) error {
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
if !ok || stream == nil {
return fmt.Errorf("failed to fetch the stream from the given context")
}
if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil {
return fmt.Errorf("unable to set send compressor: %w", err)
}
return stream.SetSendCompress(name)
}
// ClientSupportedCompressors returns compressor names advertised by the client
// via grpc-accept-encoding header.
//
// The context provided must be the context passed to the server's handler.
//
// # Experimental
//
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
// later release.
func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
if !ok || stream == nil {
return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
}
return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil
}
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
// When called more than once, all the provided metadata will be merged.
//
@@ -1969,3 +2041,22 @@ type channelzServer struct {
func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
return c.s.channelzMetric()
}
// validateSendCompressor returns an error when given compressor name cannot be
// handled by the server or the client based on the advertised compressors.
func validateSendCompressor(name, clientCompressors string) error {
if name == encoding.Identity {
return nil
}
if !grpcutil.IsCompressorNameRegistered(name) {
return fmt.Errorf("compressor not registered %q", name)
}
for _, c := range strings.Split(clientCompressors, ",") {
if c == name {
return nil // found match
}
}
return fmt.Errorf("client does not support compressor %q", name)
}

View File

@@ -67,10 +67,18 @@ type InPayload struct {
Payload interface{}
// Data is the serialized message payload.
Data []byte
// Length is the length of uncompressed data.
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
Length int
// WireLength is the length of data on wire (compressed, signed, encrypted).
// CompressedLength is the size of the compressed payload data. Does not
// include any framing (gRPC or HTTP/2). Same as Length if compression not
// enabled.
CompressedLength int
// WireLength is the size of the compressed payload data plus gRPC framing.
// Does not include HTTP/2 framing.
WireLength int
// RecvTime is the time when the payload is received.
RecvTime time.Time
}
@@ -129,9 +137,15 @@ type OutPayload struct {
Payload interface{}
// Data is the serialized message payload.
Data []byte
// Length is the length of uncompressed data.
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
Length int
// WireLength is the length of data on wire (compressed, signed, encrypted).
// CompressedLength is the size of the compressed payload data. Does not
// include any framing (gRPC or HTTP/2). Same as Length if compression not
// enabled.
CompressedLength int
// WireLength is the size of the compressed payload data plus gRPC framing.
// Does not include HTTP/2 framing.
WireLength int
// SentTime is the time when the payload is sent.
SentTime time.Time

View File

@@ -168,10 +168,19 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok {
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
// validate md
if err := imetadata.Validate(md); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
// validate added
for _, kvs := range added {
for i := 0; i < len(kvs); i += 2 {
if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
}
}
if channelz.IsOn() {
cc.incrCallsStarted()
@@ -352,7 +361,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
}
}
for _, binlog := range cs.binlogs {
binlog.Log(logEntry)
binlog.Log(cs.ctx, logEntry)
}
}
@@ -800,7 +809,7 @@ func (cs *clientStream) Header() (metadata.MD, error) {
}
cs.serverHeaderBinlogged = true
for _, binlog := range cs.binlogs {
binlog.Log(logEntry)
binlog.Log(cs.ctx, logEntry)
}
}
return m, nil
@@ -881,7 +890,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
Message: data,
}
for _, binlog := range cs.binlogs {
binlog.Log(cm)
binlog.Log(cs.ctx, cm)
}
}
return err
@@ -905,7 +914,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error {
Message: recvInfo.uncompressedBytes,
}
for _, binlog := range cs.binlogs {
binlog.Log(sm)
binlog.Log(cs.ctx, sm)
}
}
if err != nil || !cs.desc.ServerStreams {
@@ -926,7 +935,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error {
logEntry.PeerAddr = peer.Addr
}
for _, binlog := range cs.binlogs {
binlog.Log(logEntry)
binlog.Log(cs.ctx, logEntry)
}
}
}
@@ -953,7 +962,7 @@ func (cs *clientStream) CloseSend() error {
OnClientSide: true,
}
for _, binlog := range cs.binlogs {
binlog.Log(chc)
binlog.Log(cs.ctx, chc)
}
}
// We never returned an error here for reasons.
@@ -971,6 +980,9 @@ func (cs *clientStream) finish(err error) {
return
}
cs.finished = true
for _, onFinish := range cs.callInfo.onFinish {
onFinish(err)
}
cs.commitAttemptLocked()
if cs.attempt != nil {
cs.attempt.finish(err)
@@ -992,7 +1004,7 @@ func (cs *clientStream) finish(err error) {
OnClientSide: true,
}
for _, binlog := range cs.binlogs {
binlog.Log(c)
binlog.Log(cs.ctx, c)
}
}
if err == nil {
@@ -1081,9 +1093,10 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
RecvTime: time.Now(),
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
WireLength: payInfo.wireLength + headerLen,
Length: len(payInfo.uncompressedBytes),
Data: payInfo.uncompressedBytes,
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
Length: len(payInfo.uncompressedBytes),
})
}
if channelz.IsOn() {
@@ -1511,6 +1524,8 @@ type serverStream struct {
comp encoding.Compressor
decomp encoding.Compressor
sendCompressorName string
maxReceiveMessageSize int
maxSendMessageSize int
trInfo *traceInfo
@@ -1558,7 +1573,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error {
}
ss.serverHeaderBinlogged = true
for _, binlog := range ss.binlogs {
binlog.Log(sh)
binlog.Log(ss.ctx, sh)
}
}
return err
@@ -1603,6 +1618,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
}
}()
// Server handler could have set new compressor by calling SetSendCompressor.
// In case it is set, we need to use it for compressing outbound message.
if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
ss.comp = encoding.GetCompressor(sendCompressorsName)
ss.sendCompressorName = sendCompressorsName
}
// load hdr, payload, data
hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
if err != nil {
@@ -1624,14 +1646,14 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
}
ss.serverHeaderBinlogged = true
for _, binlog := range ss.binlogs {
binlog.Log(sh)
binlog.Log(ss.ctx, sh)
}
}
sm := &binarylog.ServerMessage{
Message: data,
}
for _, binlog := range ss.binlogs {
binlog.Log(sm)
binlog.Log(ss.ctx, sm)
}
}
if len(ss.statsHandler) != 0 {
@@ -1679,7 +1701,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
for _, binlog := range ss.binlogs {
binlog.Log(chc)
binlog.Log(ss.ctx, chc)
}
}
return err
@@ -1695,9 +1717,10 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
RecvTime: time.Now(),
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
WireLength: payInfo.wireLength + headerLen,
Length: len(payInfo.uncompressedBytes),
Data: payInfo.uncompressedBytes,
Length: len(payInfo.uncompressedBytes),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
})
}
}
@@ -1706,7 +1729,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
Message: payInfo.uncompressedBytes,
}
for _, binlog := range ss.binlogs {
binlog.Log(cm)
binlog.Log(ss.ctx, cm)
}
}
return nil

View File

@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.53.0"
const Version = "1.54.0"

15
vendor/google.golang.org/grpc/vet.sh generated vendored
View File

@@ -41,16 +41,8 @@ if [[ "$1" = "-install" ]]; then
github.com/client9/misspell/cmd/misspell
popd
if [[ -z "${VET_SKIP_PROTO}" ]]; then
if [[ "${TRAVIS}" = "true" ]]; then
PROTOBUF_VERSION=3.14.0
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
pushd /home/travis
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
unzip ${PROTOC_FILENAME}
bin/protoc --version
popd
elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then
PROTOBUF_VERSION=3.14.0
if [[ "${GITHUB_ACTIONS}" = "true" ]]; then
PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files.
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
pushd /home/runner/go
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
@@ -68,8 +60,7 @@ fi
# - Check that generated proto files are up to date.
if [[ -z "${VET_SKIP_PROTO}" ]]; then
PATH="/home/travis/bin:${PATH}" make proto && \
git status --porcelain 2>&1 | fail_on_output || \
make proto && git status --porcelain 2>&1 | fail_on_output || \
(git status; git --no-pager diff; exit 1)
fi

View File

@@ -4,7 +4,7 @@
// Package protojson marshals and unmarshals protocol buffer messages as JSON
// format. It follows the guide at
// https://developers.google.com/protocol-buffers/docs/proto3#json.
// https://protobuf.dev/programming-guides/proto3#json.
//
// This package produces a different output than the standard "encoding/json"
// package, which does not operate correctly on protocol buffer messages.

View File

@@ -814,16 +814,22 @@ func (d decoder) unmarshalTimestamp(m protoreflect.Message) error {
return d.unexpectedTokenError(tok)
}
t, err := time.Parse(time.RFC3339Nano, tok.ParsedString())
s := tok.ParsedString()
t, err := time.Parse(time.RFC3339Nano, s)
if err != nil {
return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
}
// Validate seconds. No need to validate nanos because time.Parse would have
// covered that already.
// Validate seconds.
secs := t.Unix()
if secs < minTimestampSeconds || secs > maxTimestampSeconds {
return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString())
}
// Validate subseconds.
i := strings.LastIndexByte(s, '.') // start of subsecond field
j := strings.LastIndexAny(s, "Z-+") // start of timezone field
if i >= 0 && j >= i && j-i > len(".999999999") {
return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
}
fds := m.Descriptor().Fields()
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)

View File

@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package protowire parses and formats the raw wire encoding.
// See https://developers.google.com/protocol-buffers/docs/encoding.
// See https://protobuf.dev/programming-guides/encoding.
//
// For marshaling and unmarshaling entire protobuf messages,
// use the "google.golang.org/protobuf/proto" package instead.
@@ -29,12 +29,8 @@ const (
)
// IsValid reports whether the field number is semantically valid.
//
// Note that while numbers within the reserved range are semantically invalid,
// they are syntactically valid in the wire format.
// Implementations may treat records with reserved field numbers as unknown.
func (n Number) IsValid() bool {
return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber
return MinValidNumber <= n && n <= MaxValidNumber
}
// Type represents the wire type.

View File

@@ -294,7 +294,7 @@ func (d *Decoder) isValueNext() bool {
}
// consumeToken constructs a Token for given Kind with raw value derived from
// current d.in and given size, and consumes the given size-lenght of it.
// current d.in and given size, and consumes the given size-length of it.
func (d *Decoder) consumeToken(kind Kind, size int) Token {
tok := Token{
kind: kind,

View File

@@ -412,12 +412,13 @@ func (d *Decoder) parseFieldName() (tok Token, err error) {
// Field number. Identify if input is a valid number that is not negative
// and is decimal integer within 32-bit range.
if num := parseNumber(d.in); num.size > 0 {
str := num.string(d.in)
if !num.neg && num.kind == numDec {
if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil {
if _, err := strconv.ParseInt(str, 10, 32); err == nil {
return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil
}
}
return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size])
return Token{}, d.newSyntaxError("invalid field number: %s", str)
}
return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in))

View File

@@ -15,17 +15,12 @@ func (d *Decoder) parseNumberValue() (Token, bool) {
if num.neg {
numAttrs |= isNegative
}
strSize := num.size
last := num.size - 1
if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') {
strSize = last
}
tok := Token{
kind: Scalar,
attrs: numberValue,
pos: len(d.orig) - len(d.in),
raw: d.in[:num.size],
str: string(d.in[:strSize]),
str: num.string(d.in),
numAttrs: numAttrs,
}
d.consume(num.size)
@@ -46,6 +41,27 @@ type number struct {
kind uint8
neg bool
size int
// if neg, this is the length of whitespace and comments between
// the minus sign and the rest fo the number literal
sep int
}
func (num number) string(data []byte) string {
strSize := num.size
last := num.size - 1
if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') {
strSize = last
}
if num.neg && num.sep > 0 {
// strip whitespace/comments between negative sign and the rest
strLen := strSize - num.sep
str := make([]byte, strLen)
str[0] = data[0]
copy(str[1:], data[num.sep+1:strSize])
return string(str)
}
return string(data[:strSize])
}
// parseNumber constructs a number object from given input. It allows for the
@@ -67,19 +83,22 @@ func parseNumber(input []byte) number {
}
// Optional -
var sep int
if s[0] == '-' {
neg = true
s = s[1:]
size++
// Consume any whitespace or comments between the
// negative sign and the rest of the number
lenBefore := len(s)
s = consume(s, 0)
sep = lenBefore - len(s)
size += sep
if len(s) == 0 {
return number{}
}
}
// C++ allows for whitespace and comments in between the negative sign and
// the rest of the number. This logic currently does not but is consistent
// with v1.
switch {
case s[0] == '0':
if len(s) > 1 {
@@ -116,7 +135,7 @@ func parseNumber(input []byte) number {
if len(s) > 0 && !isDelim(s[0]) {
return number{}
}
return number{kind: kind, neg: neg, size: size}
return number{kind: kind, neg: neg, size: size, sep: sep}
}
}
s = s[1:]
@@ -188,5 +207,5 @@ func parseNumber(input []byte) number {
return number{}
}
return number{kind: kind, neg: neg, size: size}
return number{kind: kind, neg: neg, size: size, sep: sep}
}

View File

@@ -50,6 +50,7 @@ const (
FileDescriptorProto_Options_field_name protoreflect.Name = "options"
FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info"
FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax"
FileDescriptorProto_Edition_field_name protoreflect.Name = "edition"
FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name"
FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package"
@@ -63,6 +64,7 @@ const (
FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options"
FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info"
FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax"
FileDescriptorProto_Edition_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.edition"
)
// Field numbers for google.protobuf.FileDescriptorProto.
@@ -79,6 +81,7 @@ const (
FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8
FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9
FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12
FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13
)
// Names for google.protobuf.DescriptorProto.
@@ -494,26 +497,29 @@ const (
// Field names for google.protobuf.MessageOptions.
const (
MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format"
MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor"
MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated"
MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry"
MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format"
MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor"
MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated"
MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry"
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor"
MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor"
MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts"
MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.MessageOptions.
const (
MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1
MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2
MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3
MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7
MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1
MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2
MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3
MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11
MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
// Names for google.protobuf.FieldOptions.
@@ -528,16 +534,24 @@ const (
FieldOptions_Packed_field_name protoreflect.Name = "packed"
FieldOptions_Jstype_field_name protoreflect.Name = "jstype"
FieldOptions_Lazy_field_name protoreflect.Name = "lazy"
FieldOptions_UnverifiedLazy_field_name protoreflect.Name = "unverified_lazy"
FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated"
FieldOptions_Weak_field_name protoreflect.Name = "weak"
FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
FieldOptions_Retention_field_name protoreflect.Name = "retention"
FieldOptions_Target_field_name protoreflect.Name = "target"
FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed"
FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype"
FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy"
FieldOptions_UnverifiedLazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.unverified_lazy"
FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated"
FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak"
FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact"
FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention"
FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target"
FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
)
@@ -547,8 +561,12 @@ const (
FieldOptions_Packed_field_number protoreflect.FieldNumber = 2
FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6
FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5
FieldOptions_UnverifiedLazy_field_number protoreflect.FieldNumber = 15
FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3
FieldOptions_Weak_field_number protoreflect.FieldNumber = 10
FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16
FieldOptions_Retention_field_number protoreflect.FieldNumber = 17
FieldOptions_Target_field_number protoreflect.FieldNumber = 18
FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -564,6 +582,18 @@ const (
FieldOptions_JSType_enum_name = "JSType"
)
// Full and short names for google.protobuf.FieldOptions.OptionRetention.
const (
FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention"
FieldOptions_OptionRetention_enum_name = "OptionRetention"
)
// Full and short names for google.protobuf.FieldOptions.OptionTargetType.
const (
FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType"
FieldOptions_OptionTargetType_enum_name = "OptionTargetType"
)
// Names for google.protobuf.OneofOptions.
const (
OneofOptions_message_name protoreflect.Name = "OneofOptions"
@@ -590,20 +620,23 @@ const (
// Field names for google.protobuf.EnumOptions.
const (
EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias"
EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated"
EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias"
EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated"
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias"
EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated"
EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option"
EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias"
EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated"
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts"
EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.EnumOptions.
const (
EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2
EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3
EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2
EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6
EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
// Names for google.protobuf.EnumValueOptions.
@@ -813,11 +846,13 @@ const (
GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file"
GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin"
GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end"
GeneratedCodeInfo_Annotation_Semantic_field_name protoreflect.Name = "semantic"
GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path"
GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file"
GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin"
GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end"
GeneratedCodeInfo_Annotation_Semantic_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.semantic"
)
// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation.
@@ -826,4 +861,11 @@ const (
GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2
GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3
GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4
GeneratedCodeInfo_Annotation_Semantic_field_number protoreflect.FieldNumber = 5
)
// Full and short names for google.protobuf.GeneratedCodeInfo.Annotation.Semantic.
const (
GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic"
GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic"
)

View File

@@ -59,7 +59,6 @@ func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter {
default:
return newSingularConverter(t, fd)
}
panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
}
var (

View File

@@ -87,7 +87,7 @@ func (sb *Builder) grow(n int) {
// Unlike strings.Builder, we do not need to copy over the contents
// of the old buffer since our builder provides no API for
// retrieving previously created strings.
sb.buf = make([]byte, 2*(cap(sb.buf)+n))
sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
}
func (sb *Builder) last(n int) string {

View File

@@ -51,8 +51,8 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
Minor = 28
Patch = 1
Minor = 30
Patch = 0
PreRelease = ""
)

View File

@@ -5,16 +5,13 @@
// Package proto provides functions operating on protocol buffer messages.
//
// For documentation on protocol buffers in general, see:
//
// https://developers.google.com/protocol-buffers
// https://protobuf.dev.
//
// For a tutorial on using protocol buffers with Go, see:
//
// https://developers.google.com/protocol-buffers/docs/gotutorial
// https://protobuf.dev/getting-started/gotutorial.
//
// For a guide to generated Go protocol buffer code, see:
//
// https://developers.google.com/protocol-buffers/docs/reference/go-generated
// https://protobuf.dev/reference/go/go-generated.
//
// # Binary serialization
//

View File

@@ -5,30 +5,39 @@
package proto
import (
"bytes"
"math"
"reflect"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/reflect/protoreflect"
)
// Equal reports whether two messages are equal.
// If two messages marshal to the same bytes under deterministic serialization,
// then Equal is guaranteed to report true.
// Equal reports whether two messages are equal,
// by recursively comparing the fields of the message.
//
// Two messages are equal if they belong to the same message descriptor,
// have the same set of populated known and extension field values,
// and the same set of unknown fields values. If either of the top-level
// messages are invalid, then Equal reports true only if both are invalid.
// - Bytes fields are equal if they contain identical bytes.
// Empty bytes (regardless of nil-ness) are considered equal.
//
// Scalar values are compared with the equivalent of the == operator in Go,
// except bytes values which are compared using bytes.Equal and
// floating point values which specially treat NaNs as equal.
// Message values are compared by recursively calling Equal.
// Lists are equal if each element value is also equal.
// Maps are equal if they have the same set of keys, where the pair of values
// for each key is also equal.
// - Floating-point fields are equal if they contain the same value.
// Unlike the == operator, a NaN is equal to another NaN.
//
// - Other scalar fields are equal if they contain the same value.
//
// - Message fields are equal if they have
// the same set of populated known and extension field values, and
// the same set of unknown fields values.
//
// - Lists are equal if they are the same length and
// each corresponding element is equal.
//
// - Maps are equal if they have the same set of keys and
// the corresponding value for each key is equal.
//
// An invalid message is not equal to a valid message.
// An invalid message is only equal to another invalid message of the
// same type. An invalid message often corresponds to a nil pointer
// of the concrete message type. For example, (*pb.M)(nil) is not equal
// to &pb.M{}.
// If two valid messages marshal to the same bytes under deterministic
// serialization, then Equal is guaranteed to report true.
func Equal(x, y Message) bool {
if x == nil || y == nil {
return x == nil && y == nil
@@ -42,130 +51,7 @@ func Equal(x, y Message) bool {
if mx.IsValid() != my.IsValid() {
return false
}
return equalMessage(mx, my)
}
// equalMessage compares two messages.
func equalMessage(mx, my protoreflect.Message) bool {
if mx.Descriptor() != my.Descriptor() {
return false
}
nx := 0
equal := true
mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool {
nx++
vy := my.Get(fd)
equal = my.Has(fd) && equalField(fd, vx, vy)
return equal
})
if !equal {
return false
}
ny := 0
my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool {
ny++
return true
})
if nx != ny {
return false
}
return equalUnknown(mx.GetUnknown(), my.GetUnknown())
}
// equalField compares two fields.
func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
switch {
case fd.IsList():
return equalList(fd, x.List(), y.List())
case fd.IsMap():
return equalMap(fd, x.Map(), y.Map())
default:
return equalValue(fd, x, y)
}
}
// equalMap compares two maps.
func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool {
if x.Len() != y.Len() {
return false
}
equal := true
x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
vy := y.Get(k)
equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy)
return equal
})
return equal
}
// equalList compares two lists.
func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool {
if x.Len() != y.Len() {
return false
}
for i := x.Len() - 1; i >= 0; i-- {
if !equalValue(fd, x.Get(i), y.Get(i)) {
return false
}
}
return true
}
// equalValue compares two singular values.
func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
switch fd.Kind() {
case protoreflect.BoolKind:
return x.Bool() == y.Bool()
case protoreflect.EnumKind:
return x.Enum() == y.Enum()
case protoreflect.Int32Kind, protoreflect.Sint32Kind,
protoreflect.Int64Kind, protoreflect.Sint64Kind,
protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
return x.Int() == y.Int()
case protoreflect.Uint32Kind, protoreflect.Uint64Kind,
protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
return x.Uint() == y.Uint()
case protoreflect.FloatKind, protoreflect.DoubleKind:
fx := x.Float()
fy := y.Float()
if math.IsNaN(fx) || math.IsNaN(fy) {
return math.IsNaN(fx) && math.IsNaN(fy)
}
return fx == fy
case protoreflect.StringKind:
return x.String() == y.String()
case protoreflect.BytesKind:
return bytes.Equal(x.Bytes(), y.Bytes())
case protoreflect.MessageKind, protoreflect.GroupKind:
return equalMessage(x.Message(), y.Message())
default:
return x.Interface() == y.Interface()
}
}
// equalUnknown compares unknown fields by direct comparison on the raw bytes
// of each individual field number.
func equalUnknown(x, y protoreflect.RawFields) bool {
if len(x) != len(y) {
return false
}
if bytes.Equal([]byte(x), []byte(y)) {
return true
}
mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
for len(x) > 0 {
fnum, _, n := protowire.ConsumeField(x)
mx[fnum] = append(mx[fnum], x[:n]...)
x = x[n:]
}
for len(y) > 0 {
fnum, _, n := protowire.ConsumeField(y)
my[fnum] = append(my[fnum], y[:n]...)
y = y[n:]
}
return reflect.DeepEqual(mx, my)
vx := protoreflect.ValueOfMessage(mx)
vy := protoreflect.ValueOfMessage(my)
return vx.Equal(vy)
}

View File

@@ -35,6 +35,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo)
case 12:
b = p.appendSingularField(b, "syntax", nil)
case 13:
b = p.appendSingularField(b, "edition", nil)
}
return b
}
@@ -236,6 +238,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte {
b = p.appendSingularField(b, "deprecated", nil)
case 7:
b = p.appendSingularField(b, "map_entry", nil)
case 11:
b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -279,6 +283,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte {
b = p.appendSingularField(b, "allow_alias", nil)
case 3:
b = p.appendSingularField(b, "deprecated", nil)
case 6:
b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -345,10 +351,18 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte {
b = p.appendSingularField(b, "jstype", nil)
case 5:
b = p.appendSingularField(b, "lazy", nil)
case 15:
b = p.appendSingularField(b, "unverified_lazy", nil)
case 3:
b = p.appendSingularField(b, "deprecated", nil)
case 10:
b = p.appendSingularField(b, "weak", nil)
case 16:
b = p.appendSingularField(b, "debug_redact", nil)
case 17:
b = p.appendSingularField(b, "retention", nil)
case 18:
b = p.appendSingularField(b, "target", nil)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}

View File

@@ -148,7 +148,7 @@ type Message interface {
// be preserved in marshaling or other operations.
IsValid() bool
// ProtoMethods returns optional fast-path implementions of various operations.
// ProtoMethods returns optional fast-path implementations of various operations.
// This method may return nil.
//
// The returned methods type is identical to

View File

@@ -0,0 +1,168 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package protoreflect
import (
"bytes"
"fmt"
"math"
"reflect"
"google.golang.org/protobuf/encoding/protowire"
)
// Equal reports whether v1 and v2 are recursively equal.
//
// - Values of different types are always unequal.
//
// - Bytes values are equal if they contain identical bytes.
// Empty bytes (regardless of nil-ness) are considered equal.
//
// - Floating point values are equal if they contain the same value.
// Unlike the == operator, a NaN is equal to another NaN.
//
// - Enums are equal if they contain the same number.
// Since Value does not contain an enum descriptor,
// enum values do not consider the type of the enum.
//
// - Other scalar values are equal if they contain the same value.
//
// - Message values are equal if they belong to the same message descriptor,
// have the same set of populated known and extension field values,
// and the same set of unknown fields values.
//
// - Lists are equal if they are the same length and
// each corresponding element is equal.
//
// - Maps are equal if they have the same set of keys and
// the corresponding value for each key is equal.
func (v1 Value) Equal(v2 Value) bool {
return equalValue(v1, v2)
}
func equalValue(x, y Value) bool {
eqType := x.typ == y.typ
switch x.typ {
case nilType:
return eqType
case boolType:
return eqType && x.Bool() == y.Bool()
case int32Type, int64Type:
return eqType && x.Int() == y.Int()
case uint32Type, uint64Type:
return eqType && x.Uint() == y.Uint()
case float32Type, float64Type:
return eqType && equalFloat(x.Float(), y.Float())
case stringType:
return eqType && x.String() == y.String()
case bytesType:
return eqType && bytes.Equal(x.Bytes(), y.Bytes())
case enumType:
return eqType && x.Enum() == y.Enum()
default:
switch x := x.Interface().(type) {
case Message:
y, ok := y.Interface().(Message)
return ok && equalMessage(x, y)
case List:
y, ok := y.Interface().(List)
return ok && equalList(x, y)
case Map:
y, ok := y.Interface().(Map)
return ok && equalMap(x, y)
default:
panic(fmt.Sprintf("unknown type: %T", x))
}
}
}
// equalFloat compares two floats, where NaNs are treated as equal.
func equalFloat(x, y float64) bool {
if math.IsNaN(x) || math.IsNaN(y) {
return math.IsNaN(x) && math.IsNaN(y)
}
return x == y
}
// equalMessage compares two messages.
func equalMessage(mx, my Message) bool {
if mx.Descriptor() != my.Descriptor() {
return false
}
nx := 0
equal := true
mx.Range(func(fd FieldDescriptor, vx Value) bool {
nx++
vy := my.Get(fd)
equal = my.Has(fd) && equalValue(vx, vy)
return equal
})
if !equal {
return false
}
ny := 0
my.Range(func(fd FieldDescriptor, vx Value) bool {
ny++
return true
})
if nx != ny {
return false
}
return equalUnknown(mx.GetUnknown(), my.GetUnknown())
}
// equalList compares two lists.
func equalList(x, y List) bool {
if x.Len() != y.Len() {
return false
}
for i := x.Len() - 1; i >= 0; i-- {
if !equalValue(x.Get(i), y.Get(i)) {
return false
}
}
return true
}
// equalMap compares two maps.
func equalMap(x, y Map) bool {
if x.Len() != y.Len() {
return false
}
equal := true
x.Range(func(k MapKey, vx Value) bool {
vy := y.Get(k)
equal = y.Has(k) && equalValue(vx, vy)
return equal
})
return equal
}
// equalUnknown compares unknown fields by direct comparison on the raw bytes
// of each individual field number.
func equalUnknown(x, y RawFields) bool {
if len(x) != len(y) {
return false
}
if bytes.Equal([]byte(x), []byte(y)) {
return true
}
mx := make(map[FieldNumber]RawFields)
my := make(map[FieldNumber]RawFields)
for len(x) > 0 {
fnum, _, n := protowire.ConsumeField(x)
mx[fnum] = append(mx[fnum], x[:n]...)
x = x[n:]
}
for len(y) > 0 {
fnum, _, n := protowire.ConsumeField(y)
my[fnum] = append(my[fnum], y[:n]...)
y = y[n:]
}
return reflect.DeepEqual(mx, my)
}

View File

@@ -54,11 +54,11 @@ import (
// // Append a 0 to a "repeated int32" field.
// // Since the Value returned by Mutable is guaranteed to alias
// // the source message, modifying the Value modifies the message.
// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0))
// message.Mutable(fieldDesc).List().Append(protoreflect.ValueOfInt32(0))
//
// // Assign [0] to a "repeated int32" field by creating a new Value,
// // modifying it, and assigning it.
// list := message.NewField(fieldDesc).(List)
// list := message.NewField(fieldDesc).List()
// list.Append(protoreflect.ValueOfInt32(0))
// message.Set(fieldDesc, list)
// // ERROR: Since it is not defined whether Set aliases the source,

View File

@@ -46,7 +46,7 @@ var conflictPolicy = "panic" // "panic" | "warn" | "ignore"
// It is a variable so that the behavior is easily overridden in another file.
var ignoreConflict = func(d protoreflect.Descriptor, err error) bool {
const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT"
const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict"
const faq = "https://protobuf.dev/reference/go/faq#namespace-conflict"
policy := conflictPolicy
if v := os.Getenv(env); v != "" {
policy = v

File diff suppressed because it is too large Load Diff

View File

@@ -37,8 +37,7 @@
// It is functionally a tuple of the full name of the remote message type and
// the serialized bytes of the remote message value.
//
//
// Constructing an Any
// # Constructing an Any
//
// An Any message containing another message value is constructed using New:
//
@@ -48,8 +47,7 @@
// }
// ... // make use of any
//
//
// Unmarshaling an Any
// # Unmarshaling an Any
//
// With a populated Any message, the underlying message can be serialized into
// a remote concrete message value in a few ways.
@@ -95,8 +93,7 @@
// listed in the case clauses are linked into the Go binary and therefore also
// registered in the global registry.
//
//
// Type checking an Any
// # Type checking an Any
//
// In order to type check whether an Any message represents some other message,
// then use the MessageIs method:
@@ -115,7 +112,6 @@
// }
// ... // make use of m
// }
//
package anypb
import (
@@ -136,45 +132,49 @@ import (
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
// // or ...
// if (any.isSameTypeAs(Foo.getDefaultInstance())) {
// foo = any.unpack(Foo.getDefaultInstance());
// }
//
// Example 3: Pack and unpack a message in Python.
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := anypb.New(foo)
// if err != nil {
// ...
// }
// ...
// foo := &pb.Foo{}
// if err := any.UnmarshalTo(foo); err != nil {
// ...
// }
// foo := &pb.Foo{...}
// any, err := anypb.New(foo)
// if err != nil {
// ...
// }
// ...
// foo := &pb.Foo{}
// if err := any.UnmarshalTo(foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
@@ -182,35 +182,33 @@ import (
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
// # JSON
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
type Any struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -228,14 +226,14 @@ type Any struct {
// scheme `http`, `https`, or no scheme, one can optionally set up a type
// server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
// - If no scheme is provided, `https` is assumed.
// - An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// - Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
@@ -243,7 +241,6 @@ type Any struct {
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`

View File

@@ -35,8 +35,7 @@
//
// The Duration message represents a signed span of time.
//
//
// Conversion to a Go Duration
// # Conversion to a Go Duration
//
// The AsDuration method can be used to convert a Duration message to a
// standard Go time.Duration value:
@@ -65,15 +64,13 @@
// the resulting value to the closest representable value (e.g., math.MaxInt64
// for positive overflow and math.MinInt64 for negative overflow).
//
//
// Conversion from a Go Duration
// # Conversion from a Go Duration
//
// The durationpb.New function can be used to construct a Duration message
// from a standard Go time.Duration value:
//
// dur := durationpb.New(d)
// ... // make use of d as a *durationpb.Duration
//
package durationpb
import (
@@ -96,43 +93,43 @@ import (
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (duration.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (duration.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
@@ -143,8 +140,6 @@ import (
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
type Duration struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache

View File

@@ -37,8 +37,7 @@
// The paths are specific to some target message type,
// which is not stored within the FieldMask message itself.
//
//
// Constructing a FieldMask
// # Constructing a FieldMask
//
// The New function is used construct a FieldMask:
//
@@ -61,8 +60,7 @@
// ... // handle error
// }
//
//
// Type checking a FieldMask
// # Type checking a FieldMask
//
// In order to verify that a FieldMask represents a set of fields that are
// reachable from some target message type, use the IsValid method:
@@ -89,8 +87,8 @@ import (
// `FieldMask` represents a set of symbolic field paths, for example:
//
// paths: "f.a"
// paths: "f.b.d"
// paths: "f.a"
// paths: "f.b.d"
//
// Here `f` represents a field in some root message, `a` and `b`
// fields in the message found in `f`, and `d` a field found in the
@@ -107,27 +105,26 @@ import (
// specified in the mask. For example, if the mask in the previous
// example is applied to a response message as follows:
//
// f {
// a : 22
// b {
// d : 1
// x : 2
// }
// y : 13
// }
// z: 8
// f {
// a : 22
// b {
// d : 1
// x : 2
// }
// y : 13
// }
// z: 8
//
// The result will not contain specific values for fields x,y and z
// (their value will be set to the default, and omitted in proto text
// output):
//
//
// f {
// a : 22
// b {
// d : 1
// }
// }
// f {
// a : 22
// b {
// d : 1
// }
// }
//
// A repeated field is not allowed except at the last position of a
// paths string.
@@ -165,36 +162,36 @@ import (
//
// For example, given the target message:
//
// f {
// b {
// d: 1
// x: 2
// }
// c: [1]
// }
// f {
// b {
// d: 1
// x: 2
// }
// c: [1]
// }
//
// And an update message:
//
// f {
// b {
// d: 10
// }
// c: [2]
// }
// f {
// b {
// d: 10
// }
// c: [2]
// }
//
// then if the field mask is:
//
// paths: ["f.b", "f.c"]
// paths: ["f.b", "f.c"]
//
// then the result will be:
//
// f {
// b {
// d: 10
// x: 2
// }
// c: [1, 2]
// }
// f {
// b {
// d: 10
// x: 2
// }
// c: [1, 2]
// }
//
// An implementation may provide options to override this default behavior for
// repeated and message fields.
@@ -232,51 +229,51 @@ import (
//
// As an example, consider the following message declarations:
//
// message Profile {
// User user = 1;
// Photo photo = 2;
// }
// message User {
// string display_name = 1;
// string address = 2;
// }
// message Profile {
// User user = 1;
// Photo photo = 2;
// }
// message User {
// string display_name = 1;
// string address = 2;
// }
//
// In proto a field mask for `Profile` may look as such:
//
// mask {
// paths: "user.display_name"
// paths: "photo"
// }
// mask {
// paths: "user.display_name"
// paths: "photo"
// }
//
// In JSON, the same mask is represented as below:
//
// {
// mask: "user.displayName,photo"
// }
// {
// mask: "user.displayName,photo"
// }
//
// # Field Masks and Oneof Fields
//
// Field masks treat fields in oneofs just as regular fields. Consider the
// following message:
//
// message SampleMessage {
// oneof test_oneof {
// string name = 4;
// SubMessage sub_message = 9;
// }
// }
// message SampleMessage {
// oneof test_oneof {
// string name = 4;
// SubMessage sub_message = 9;
// }
// }
//
// The field mask can be:
//
// mask {
// paths: "name"
// }
// mask {
// paths: "name"
// }
//
// Or:
//
// mask {
// paths: "sub_message"
// }
// mask {
// paths: "sub_message"
// }
//
// Note that oneof type names ("test_oneof" in this case) cannot be used in
// paths.

View File

@@ -36,8 +36,7 @@
// The Timestamp message represents a timestamp,
// an instant in time since the Unix epoch (January 1st, 1970).
//
//
// Conversion to a Go Time
// # Conversion to a Go Time
//
// The AsTime method can be used to convert a Timestamp message to a
// standard Go time.Time value in UTC:
@@ -59,8 +58,7 @@
// ... // handle error
// }
//
//
// Conversion from a Go Time
// # Conversion from a Go Time
//
// The timestamppb.New function can be used to construct a Timestamp message
// from a standard Go time.Time value:
@@ -72,7 +70,6 @@
//
// ts := timestamppb.Now()
// ... // make use of ts as a *timestamppb.Timestamp
//
package timestamppb
import (
@@ -101,52 +98,50 @@ import (
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
// Example 5: Compute Timestamp from Java `Instant.now()`.
//
// Instant now = Instant.now();
//
// Timestamp timestamp =
// Timestamp.newBuilder().setSeconds(now.getEpochSecond())
// .setNanos(now.getNano()).build();
// Instant now = Instant.now();
//
// Timestamp timestamp =
// Timestamp.newBuilder().setSeconds(now.getEpochSecond())
// .setNanos(now.getNano()).build();
//
// Example 6: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
@@ -174,8 +169,6 @@ import (
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
// ) to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache

View File

@@ -27,7 +27,7 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Wrappers for primitive (non-message) types. These types are useful
// for embedding primitives in the `google.protobuf.Any` type and for places
// where we need to distinguish between the absence of a primitive