mirror of
https://github.com/superseriousbusiness/gotosocial
synced 2025-06-05 21:59:39 +02:00
feat: initial tracing support (#1623)
This commit is contained in:
73
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
Normal file
73
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package backoff implement the backoff strategy for gRPC.
|
||||
//
|
||||
// This is kept in internal until the gRPC project decides whether or not to
|
||||
// allow alternative backoff strategies.
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
grpcbackoff "google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
)
|
||||
|
||||
// Strategy defines the methodology for backing off after a grpc connection
|
||||
// failure.
|
||||
type Strategy interface {
|
||||
// Backoff returns the amount of time to wait before the next retry given
|
||||
// the number of consecutive failures.
|
||||
Backoff(retries int) time.Duration
|
||||
}
|
||||
|
||||
// DefaultExponential is an exponential backoff implementation using the
|
||||
// default values for all the configurable knobs defined in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig}
|
||||
|
||||
// Exponential implements exponential backoff algorithm as defined in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
type Exponential struct {
|
||||
// Config contains all options to configure the backoff algorithm.
|
||||
Config grpcbackoff.Config
|
||||
}
|
||||
|
||||
// Backoff returns the amount of time to wait before the next retry given the
|
||||
// number of retries.
|
||||
func (bc Exponential) Backoff(retries int) time.Duration {
|
||||
if retries == 0 {
|
||||
return bc.Config.BaseDelay
|
||||
}
|
||||
backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= bc.Config.Multiplier
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
384
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
Normal file
384
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
Normal file
@@ -0,0 +1,384 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package gracefulswitch implements a graceful switch load balancer.
|
||||
package gracefulswitch
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
|
||||
var _ balancer.Balancer = (*Balancer)(nil)
|
||||
|
||||
// NewBalancer returns a graceful switch Balancer.
|
||||
func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
|
||||
return &Balancer{
|
||||
cc: cc,
|
||||
bOpts: opts,
|
||||
}
|
||||
}
|
||||
|
||||
// Balancer is a utility to gracefully switch from one balancer to
|
||||
// a new balancer. It implements the balancer.Balancer interface.
|
||||
type Balancer struct {
|
||||
bOpts balancer.BuildOptions
|
||||
cc balancer.ClientConn
|
||||
|
||||
// mu protects the following fields and all fields within balancerCurrent
|
||||
// and balancerPending. mu does not need to be held when calling into the
|
||||
// child balancers, as all calls into these children happen only as a direct
|
||||
// result of a call into the gracefulSwitchBalancer, which are also
|
||||
// guaranteed to be synchronous. There is one exception: an UpdateState call
|
||||
// from a child balancer when current and pending are populated can lead to
|
||||
// calling Close() on the current. To prevent that racing with an
|
||||
// UpdateSubConnState from the channel, we hold currentMu during Close and
|
||||
// UpdateSubConnState calls.
|
||||
mu sync.Mutex
|
||||
balancerCurrent *balancerWrapper
|
||||
balancerPending *balancerWrapper
|
||||
closed bool // set to true when this balancer is closed
|
||||
|
||||
// currentMu must be locked before mu. This mutex guards against this
|
||||
// sequence of events: UpdateSubConnState() called, finds the
|
||||
// balancerCurrent, gives up lock, updateState comes in, causes Close() on
|
||||
// balancerCurrent before the UpdateSubConnState is called on the
|
||||
// balancerCurrent.
|
||||
currentMu sync.Mutex
|
||||
}
|
||||
|
||||
// swap swaps out the current lb with the pending lb and updates the ClientConn.
|
||||
// The caller must hold gsb.mu.
|
||||
func (gsb *Balancer) swap() {
|
||||
gsb.cc.UpdateState(gsb.balancerPending.lastState)
|
||||
cur := gsb.balancerCurrent
|
||||
gsb.balancerCurrent = gsb.balancerPending
|
||||
gsb.balancerPending = nil
|
||||
go func() {
|
||||
gsb.currentMu.Lock()
|
||||
defer gsb.currentMu.Unlock()
|
||||
cur.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
// Helper function that checks if the balancer passed in is current or pending.
|
||||
// The caller must hold gsb.mu.
|
||||
func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
|
||||
return bw == gsb.balancerCurrent || bw == gsb.balancerPending
|
||||
}
|
||||
|
||||
// SwitchTo initializes the graceful switch process, which completes based on
|
||||
// connectivity state changes on the current/pending balancer. Thus, the switch
|
||||
// process is not complete when this method returns. This method must be called
|
||||
// synchronously alongside the rest of the balancer.Balancer methods this
|
||||
// Graceful Switch Balancer implements.
|
||||
func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
|
||||
gsb.mu.Lock()
|
||||
if gsb.closed {
|
||||
gsb.mu.Unlock()
|
||||
return errBalancerClosed
|
||||
}
|
||||
bw := &balancerWrapper{
|
||||
gsb: gsb,
|
||||
lastState: balancer.State{
|
||||
ConnectivityState: connectivity.Connecting,
|
||||
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||
},
|
||||
subconns: make(map[balancer.SubConn]bool),
|
||||
}
|
||||
balToClose := gsb.balancerPending // nil if there is no pending balancer
|
||||
if gsb.balancerCurrent == nil {
|
||||
gsb.balancerCurrent = bw
|
||||
} else {
|
||||
gsb.balancerPending = bw
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
balToClose.Close()
|
||||
// This function takes a builder instead of a balancer because builder.Build
|
||||
// can call back inline, and this utility needs to handle the callbacks.
|
||||
newBalancer := builder.Build(bw, gsb.bOpts)
|
||||
if newBalancer == nil {
|
||||
// This is illegal and should never happen; we clear the balancerWrapper
|
||||
// we were constructing if it happens to avoid a potential panic.
|
||||
gsb.mu.Lock()
|
||||
if gsb.balancerPending != nil {
|
||||
gsb.balancerPending = nil
|
||||
} else {
|
||||
gsb.balancerCurrent = nil
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
// This write doesn't need to take gsb.mu because this field never gets read
|
||||
// or written to on any calls from the current or pending. Calls from grpc
|
||||
// to this balancer are guaranteed to be called synchronously, so this
|
||||
// bw.Balancer field will never be forwarded to until this SwitchTo()
|
||||
// function returns.
|
||||
bw.Balancer = newBalancer
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns nil if the graceful switch balancer is closed.
|
||||
func (gsb *Balancer) latestBalancer() *balancerWrapper {
|
||||
gsb.mu.Lock()
|
||||
defer gsb.mu.Unlock()
|
||||
if gsb.balancerPending != nil {
|
||||
return gsb.balancerPending
|
||||
}
|
||||
return gsb.balancerCurrent
|
||||
}
|
||||
|
||||
// UpdateClientConnState forwards the update to the latest balancer created.
|
||||
func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||
// The resolver data is only relevant to the most recent LB Policy.
|
||||
balToUpdate := gsb.latestBalancer()
|
||||
if balToUpdate == nil {
|
||||
return errBalancerClosed
|
||||
}
|
||||
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||
// back into the channel. The latest balancer can never be closed during a
|
||||
// call from the channel, even without gsb.mu held.
|
||||
return balToUpdate.UpdateClientConnState(state)
|
||||
}
|
||||
|
||||
// ResolverError forwards the error to the latest balancer created.
|
||||
func (gsb *Balancer) ResolverError(err error) {
|
||||
// The resolver data is only relevant to the most recent LB Policy.
|
||||
balToUpdate := gsb.latestBalancer()
|
||||
if balToUpdate == nil {
|
||||
return
|
||||
}
|
||||
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||
// back into the channel. The latest balancer can never be closed during a
|
||||
// call from the channel, even without gsb.mu held.
|
||||
balToUpdate.ResolverError(err)
|
||||
}
|
||||
|
||||
// ExitIdle forwards the call to the latest balancer created.
|
||||
//
|
||||
// If the latest balancer does not support ExitIdle, the subConns are
|
||||
// re-connected to manually.
|
||||
func (gsb *Balancer) ExitIdle() {
|
||||
balToUpdate := gsb.latestBalancer()
|
||||
if balToUpdate == nil {
|
||||
return
|
||||
}
|
||||
// There is no need to protect this read with a mutex, as the write to the
|
||||
// Balancer field happens in SwitchTo, which completes before this can be
|
||||
// called.
|
||||
if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
|
||||
ei.ExitIdle()
|
||||
return
|
||||
}
|
||||
gsb.mu.Lock()
|
||||
defer gsb.mu.Unlock()
|
||||
for sc := range balToUpdate.subconns {
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateSubConnState forwards the update to the appropriate child.
|
||||
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
gsb.currentMu.Lock()
|
||||
defer gsb.currentMu.Unlock()
|
||||
gsb.mu.Lock()
|
||||
// Forward update to the appropriate child. Even if there is a pending
|
||||
// balancer, the current balancer should continue to get SubConn updates to
|
||||
// maintain the proper state while the pending is still connecting.
|
||||
var balToUpdate *balancerWrapper
|
||||
if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
|
||||
balToUpdate = gsb.balancerCurrent
|
||||
} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
|
||||
balToUpdate = gsb.balancerPending
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
if balToUpdate == nil {
|
||||
// SubConn belonged to a stale lb policy that has not yet fully closed,
|
||||
// or the balancer was already closed.
|
||||
return
|
||||
}
|
||||
balToUpdate.UpdateSubConnState(sc, state)
|
||||
}
|
||||
|
||||
// Close closes any active child balancers.
|
||||
func (gsb *Balancer) Close() {
|
||||
gsb.mu.Lock()
|
||||
gsb.closed = true
|
||||
currentBalancerToClose := gsb.balancerCurrent
|
||||
gsb.balancerCurrent = nil
|
||||
pendingBalancerToClose := gsb.balancerPending
|
||||
gsb.balancerPending = nil
|
||||
gsb.mu.Unlock()
|
||||
|
||||
currentBalancerToClose.Close()
|
||||
pendingBalancerToClose.Close()
|
||||
}
|
||||
|
||||
// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
|
||||
// methods to help cleanup SubConns created by the wrapped balancer.
|
||||
//
|
||||
// It implements the balancer.ClientConn interface and is passed down in that
|
||||
// capacity to the wrapped balancer. It maintains a set of subConns created by
|
||||
// the wrapped balancer and calls from the latter to create/update/remove
|
||||
// SubConns update this set before being forwarded to the parent ClientConn.
|
||||
// State updates from the wrapped balancer can result in invocation of the
|
||||
// graceful switch logic.
|
||||
type balancerWrapper struct {
|
||||
balancer.Balancer
|
||||
gsb *Balancer
|
||||
|
||||
lastState balancer.State
|
||||
subconns map[balancer.SubConn]bool // subconns created by this balancer
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
if state.ConnectivityState == connectivity.Shutdown {
|
||||
bw.gsb.mu.Lock()
|
||||
delete(bw.subconns, sc)
|
||||
bw.gsb.mu.Unlock()
|
||||
}
|
||||
// There is no need to protect this read with a mutex, as the write to the
|
||||
// Balancer field happens in SwitchTo, which completes before this can be
|
||||
// called.
|
||||
bw.Balancer.UpdateSubConnState(sc, state)
|
||||
}
|
||||
|
||||
// Close closes the underlying LB policy and removes the subconns it created. bw
|
||||
// must not be referenced via balancerCurrent or balancerPending in gsb when
|
||||
// called. gsb.mu must not be held. Does not panic with a nil receiver.
|
||||
func (bw *balancerWrapper) Close() {
|
||||
// before Close is called.
|
||||
if bw == nil {
|
||||
return
|
||||
}
|
||||
// There is no need to protect this read with a mutex, as Close() is
|
||||
// impossible to be called concurrently with the write in SwitchTo(). The
|
||||
// callsites of Close() for this balancer in Graceful Switch Balancer will
|
||||
// never be called until SwitchTo() returns.
|
||||
bw.Balancer.Close()
|
||||
bw.gsb.mu.Lock()
|
||||
for sc := range bw.subconns {
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateState(state balancer.State) {
|
||||
// Hold the mutex for this entire call to ensure it cannot occur
|
||||
// concurrently with other updateState() calls. This causes updates to
|
||||
// lastState and calls to cc.UpdateState to happen atomically.
|
||||
bw.gsb.mu.Lock()
|
||||
defer bw.gsb.mu.Unlock()
|
||||
bw.lastState = state
|
||||
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
return
|
||||
}
|
||||
|
||||
if bw == bw.gsb.balancerCurrent {
|
||||
// In the case that the current balancer exits READY, and there is a pending
|
||||
// balancer, you can forward the pending balancer's cached State up to
|
||||
// ClientConn and swap the pending into the current. This is because there
|
||||
// is no reason to gracefully switch from and keep using the old policy as
|
||||
// the ClientConn is not connected to any backends.
|
||||
if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
|
||||
bw.gsb.swap()
|
||||
return
|
||||
}
|
||||
// Even if there is a pending balancer waiting to be gracefully switched to,
|
||||
// continue to forward current balancer updates to the Client Conn. Ignoring
|
||||
// state + picker from the current would cause undefined behavior/cause the
|
||||
// system to behave incorrectly from the current LB policies perspective.
|
||||
// Also, the current LB is still being used by grpc to choose SubConns per
|
||||
// RPC, and thus should use the most updated form of the current balancer.
|
||||
bw.gsb.cc.UpdateState(state)
|
||||
return
|
||||
}
|
||||
// This method is now dealing with a state update from the pending balancer.
|
||||
// If the current balancer is currently in a state other than READY, the new
|
||||
// policy can be swapped into place immediately. This is because there is no
|
||||
// reason to gracefully switch from and keep using the old policy as the
|
||||
// ClientConn is not connected to any backends.
|
||||
if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
|
||||
bw.gsb.swap()
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
bw.gsb.mu.Unlock()
|
||||
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
|
||||
sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
bw.gsb.mu.Unlock()
|
||||
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||
}
|
||||
bw.subconns[sc] = true
|
||||
bw.gsb.mu.Unlock()
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
|
||||
// Ignore ResolveNow requests from anything other than the most recent
|
||||
// balancer, because older balancers were already removed from the config.
|
||||
if bw != bw.gsb.latestBalancer() {
|
||||
return
|
||||
}
|
||||
bw.gsb.cc.ResolveNow(opts)
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
bw.gsb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
bw.gsb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
bw.gsb.cc.UpdateAddresses(sc, addrs)
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) Target() string {
|
||||
return bw.gsb.cc.Target()
|
||||
}
|
46
vendor/google.golang.org/grpc/internal/balancerload/load.go
generated
vendored
Normal file
46
vendor/google.golang.org/grpc/internal/balancerload/load.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Package balancerload defines APIs to parse server loads in trailers. The
|
||||
// parsed loads are sent to balancers in DoneInfo.
|
||||
package balancerload
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// Parser converts loads from metadata into a concrete type.
|
||||
type Parser interface {
|
||||
// Parse parses loads from metadata.
|
||||
Parse(md metadata.MD) interface{}
|
||||
}
|
||||
|
||||
var parser Parser
|
||||
|
||||
// SetParser sets the load parser.
|
||||
//
|
||||
// Not mutex-protected, should be called before any gRPC functions.
|
||||
func SetParser(lr Parser) {
|
||||
parser = lr
|
||||
}
|
||||
|
||||
// Parse calls parser.Read().
|
||||
func Parse(md metadata.MD) interface{} {
|
||||
if parser == nil {
|
||||
return nil
|
||||
}
|
||||
return parser.Parse(md)
|
||||
}
|
189
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
Normal file
189
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package binarylog implementation binary logging as defined in
|
||||
// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
)
|
||||
|
||||
// Logger is the global binary logger. It can be used to get binary logger for
|
||||
// each method.
|
||||
type Logger interface {
|
||||
GetMethodLogger(methodName string) MethodLogger
|
||||
}
|
||||
|
||||
// binLogger is the global binary logger for the binary. One of this should be
|
||||
// built at init time from the configuration (environment variable or flags).
|
||||
//
|
||||
// It is used to get a MethodLogger for each individual method.
|
||||
var binLogger Logger
|
||||
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
// SetLogger sets the binary logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
func SetLogger(l Logger) {
|
||||
binLogger = l
|
||||
}
|
||||
|
||||
// GetLogger gets the binary logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
func GetLogger() Logger {
|
||||
return binLogger
|
||||
}
|
||||
|
||||
// GetMethodLogger returns the MethodLogger for the given methodName.
|
||||
//
|
||||
// methodName should be in the format of "/service/method".
|
||||
//
|
||||
// Each MethodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func GetMethodLogger(methodName string) MethodLogger {
|
||||
if binLogger == nil {
|
||||
return nil
|
||||
}
|
||||
return binLogger.GetMethodLogger(methodName)
|
||||
}
|
||||
|
||||
func init() {
|
||||
const envStr = "GRPC_BINARY_LOG_FILTER"
|
||||
configStr := os.Getenv(envStr)
|
||||
binLogger = NewLoggerFromConfigString(configStr)
|
||||
}
|
||||
|
||||
// MethodLoggerConfig contains the setting for logging behavior of a method
|
||||
// logger. Currently, it contains the max length of header and message.
|
||||
type MethodLoggerConfig struct {
|
||||
// Max length of header and message.
|
||||
Header, Message uint64
|
||||
}
|
||||
|
||||
// LoggerConfig contains the config for loggers to create method loggers.
|
||||
type LoggerConfig struct {
|
||||
All *MethodLoggerConfig
|
||||
Services map[string]*MethodLoggerConfig
|
||||
Methods map[string]*MethodLoggerConfig
|
||||
|
||||
Blacklist map[string]struct{}
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
config LoggerConfig
|
||||
}
|
||||
|
||||
// NewLoggerFromConfig builds a logger with the given LoggerConfig.
|
||||
func NewLoggerFromConfig(config LoggerConfig) Logger {
|
||||
return &logger{config: config}
|
||||
}
|
||||
|
||||
// newEmptyLogger creates an empty logger. The map fields need to be filled in
|
||||
// using the set* functions.
|
||||
func newEmptyLogger() *logger {
|
||||
return &logger{}
|
||||
}
|
||||
|
||||
// Set method logger for "*".
|
||||
func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
|
||||
if l.config.All != nil {
|
||||
return fmt.Errorf("conflicting global rules found")
|
||||
}
|
||||
l.config.All = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set method logger for "service/*".
|
||||
//
|
||||
// New MethodLogger with same service overrides the old one.
|
||||
func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
|
||||
if _, ok := l.config.Services[service]; ok {
|
||||
return fmt.Errorf("conflicting service rules for service %v found", service)
|
||||
}
|
||||
if l.config.Services == nil {
|
||||
l.config.Services = make(map[string]*MethodLoggerConfig)
|
||||
}
|
||||
l.config.Services[service] = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set method logger for "service/method".
|
||||
//
|
||||
// New MethodLogger with same method overrides the old one.
|
||||
func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
|
||||
if _, ok := l.config.Blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.config.Methods[method]; ok {
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.config.Methods == nil {
|
||||
l.config.Methods = make(map[string]*MethodLoggerConfig)
|
||||
}
|
||||
l.config.Methods[method] = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set blacklist method for "-service/method".
|
||||
func (l *logger) setBlacklist(method string) error {
|
||||
if _, ok := l.config.Blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.config.Methods[method]; ok {
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.config.Blacklist == nil {
|
||||
l.config.Blacklist = make(map[string]struct{})
|
||||
}
|
||||
l.config.Blacklist[method] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getMethodLogger returns the MethodLogger for the given methodName.
|
||||
//
|
||||
// methodName should be in the format of "/service/method".
|
||||
//
|
||||
// Each MethodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func (l *logger) GetMethodLogger(methodName string) MethodLogger {
|
||||
s, m, err := grpcutil.ParseMethod(methodName)
|
||||
if err != nil {
|
||||
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.config.Methods[s+"/"+m]; ok {
|
||||
return NewTruncatingMethodLogger(ml.Header, ml.Message)
|
||||
}
|
||||
if _, ok := l.config.Blacklist[s+"/"+m]; ok {
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.config.Services[s]; ok {
|
||||
return NewTruncatingMethodLogger(ml.Header, ml.Message)
|
||||
}
|
||||
if l.config.All == nil {
|
||||
return nil
|
||||
}
|
||||
return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message)
|
||||
}
|
42
vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
generated
vendored
Normal file
42
vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// This file contains exported variables/functions that are exported for testing
|
||||
// only.
|
||||
//
|
||||
// An ideal way for this would be to put those in a *_test.go but in binarylog
|
||||
// package. But this doesn't work with staticcheck with go module. Error was:
|
||||
// "MdToMetadataProto not declared by package binarylog". This could be caused
|
||||
// by the way staticcheck looks for files for a certain package, which doesn't
|
||||
// support *_test.go files.
|
||||
//
|
||||
// Move those to binary_test.go when staticcheck is fixed.
|
||||
|
||||
package binarylog
|
||||
|
||||
var (
|
||||
// AllLogger is a logger that logs all headers/messages for all RPCs. It's
|
||||
// for testing only.
|
||||
AllLogger = NewLoggerFromConfigString("*")
|
||||
// MdToMetadataProto converts metadata to a binary logging proto message.
|
||||
// It's for testing only.
|
||||
MdToMetadataProto = mdToMetadataProto
|
||||
// AddrToProto converts an address to a binary logging proto message. It's
|
||||
// for testing only.
|
||||
AddrToProto = addrToProto
|
||||
)
|
208
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
Normal file
208
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NewLoggerFromConfigString reads the string and build a logger. It can be used
|
||||
// to build a new logger and assign it to binarylog.Logger.
|
||||
//
|
||||
// Example filter config strings:
|
||||
// - "" Nothing will be logged
|
||||
// - "*" All headers and messages will be fully logged.
|
||||
// - "*{h}" Only headers will be logged.
|
||||
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||||
// - "Foo/*" Logs every method in service Foo
|
||||
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||||
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||||
// /Foo/Bar, logs all headers and messages in every other method in service
|
||||
// Foo.
|
||||
//
|
||||
// If two configs exist for one certain method or service, the one specified
|
||||
// later overrides the previous config.
|
||||
func NewLoggerFromConfigString(s string) Logger {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
l := newEmptyLogger()
|
||||
methods := strings.Split(s, ",")
|
||||
for _, method := range methods {
|
||||
if err := l.fillMethodLoggerWithConfigString(method); err != nil {
|
||||
grpclogLogger.Warningf("failed to parse binary log config: %v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds
|
||||
// it to the right map in the logger.
|
||||
func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||
// "" is invalid.
|
||||
if config == "" {
|
||||
return errors.New("empty string is not a valid method binary logging config")
|
||||
}
|
||||
|
||||
// "-service/method", blacklist, no * or {} allowed.
|
||||
if config[0] == '-' {
|
||||
s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
if m == "*" {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config")
|
||||
}
|
||||
if suffix != "" {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
|
||||
}
|
||||
if err := l.setBlacklist(s + "/" + m); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// "*{h:256;m:256}"
|
||||
if config[0] == '*' {
|
||||
hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
s, m, suffix, err := parseMethodConfigAndSuffix(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
|
||||
}
|
||||
if m == "*" {
|
||||
if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
// TODO: this const is only used by env_config now. But could be useful for
|
||||
// other config. Move to binarylog.go if necessary.
|
||||
maxUInt = ^uint64(0)
|
||||
|
||||
// For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
|
||||
// expected output.
|
||||
longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
|
||||
|
||||
// For suffix from above, "{h:123,m:123}". See test for expected output.
|
||||
optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123".
|
||||
headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$`
|
||||
messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$`
|
||||
headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
|
||||
)
|
||||
|
||||
var (
|
||||
longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr)
|
||||
headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr)
|
||||
messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr)
|
||||
headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
|
||||
)
|
||||
|
||||
// Turn "service/method{h;m}" into "service", "method", "{h;m}".
|
||||
func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
|
||||
// Regexp result:
|
||||
//
|
||||
// in: "p.s/m{h:123,m:123}",
|
||||
// out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
|
||||
match := longMethodConfigRegexp.FindStringSubmatch(c)
|
||||
if match == nil {
|
||||
return "", "", "", fmt.Errorf("%q contains invalid substring", c)
|
||||
}
|
||||
service = match[1]
|
||||
method = match[2]
|
||||
suffix = match[3]
|
||||
return
|
||||
}
|
||||
|
||||
// Turn "{h:123;m:345}" into 123, 345.
|
||||
//
|
||||
// Return maxUInt if length is unspecified.
|
||||
func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
|
||||
if c == "" {
|
||||
return maxUInt, maxUInt, nil
|
||||
}
|
||||
// Header config only.
|
||||
if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||
if s := match[1]; s != "" {
|
||||
hdrLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
return hdrLenStr, 0, nil
|
||||
}
|
||||
return maxUInt, 0, nil
|
||||
}
|
||||
|
||||
// Message config only.
|
||||
if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||
if s := match[1]; s != "" {
|
||||
msgLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
return 0, msgLenStr, nil
|
||||
}
|
||||
return 0, maxUInt, nil
|
||||
}
|
||||
|
||||
// Header and message config both.
|
||||
if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||
// Both hdr and msg are specified, but one or two of them might be empty.
|
||||
hdrLenStr = maxUInt
|
||||
msgLenStr = maxUInt
|
||||
if s := match[1]; s != "" {
|
||||
hdrLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
}
|
||||
if s := match[2]; s != "" {
|
||||
msgLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
}
|
||||
return hdrLenStr, msgLenStr, nil
|
||||
}
|
||||
return 0, 0, fmt.Errorf("%q contains invalid substring", c)
|
||||
}
|
435
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
Normal file
435
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
Normal file
@@ -0,0 +1,435 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type callIDGenerator struct {
|
||||
id uint64
|
||||
}
|
||||
|
||||
func (g *callIDGenerator) next() uint64 {
|
||||
id := atomic.AddUint64(&g.id, 1)
|
||||
return id
|
||||
}
|
||||
|
||||
// reset is for testing only, and doesn't need to be thread safe.
|
||||
func (g *callIDGenerator) reset() {
|
||||
g.id = 0
|
||||
}
|
||||
|
||||
var idGen callIDGenerator
|
||||
|
||||
// MethodLogger is the sub-logger for each method.
|
||||
type MethodLogger interface {
|
||||
Log(LogEntryConfig)
|
||||
}
|
||||
|
||||
// TruncatingMethodLogger is a method logger that truncates headers and messages
|
||||
// based on configured fields.
|
||||
type TruncatingMethodLogger struct {
|
||||
headerMaxLen, messageMaxLen uint64
|
||||
|
||||
callID uint64
|
||||
idWithinCallGen *callIDGenerator
|
||||
|
||||
sink Sink // TODO(blog): make this plugable.
|
||||
}
|
||||
|
||||
// NewTruncatingMethodLogger returns a new truncating method logger.
|
||||
func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
|
||||
return &TruncatingMethodLogger{
|
||||
headerMaxLen: h,
|
||||
messageMaxLen: m,
|
||||
|
||||
callID: idGen.next(),
|
||||
idWithinCallGen: &callIDGenerator{},
|
||||
|
||||
sink: DefaultSink, // TODO(blog): make it plugable.
|
||||
}
|
||||
}
|
||||
|
||||
// Build is an internal only method for building the proto message out of the
|
||||
// input event. It's made public to enable other library to reuse as much logic
|
||||
// in TruncatingMethodLogger as possible.
|
||||
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
|
||||
m := c.toProto()
|
||||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||
m.Timestamp = timestamp
|
||||
m.CallId = ml.callID
|
||||
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
|
||||
|
||||
switch pay := m.Payload.(type) {
|
||||
case *binlogpb.GrpcLogEntry_ClientHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
|
||||
case *binlogpb.GrpcLogEntry_ServerHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
|
||||
case *binlogpb.GrpcLogEntry_Message:
|
||||
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Log creates a proto binary log entry, and logs it to the sink.
|
||||
func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) {
|
||||
ml.sink.Write(ml.Build(c))
|
||||
}
|
||||
|
||||
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
|
||||
if ml.headerMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
var (
|
||||
bytesLimit = ml.headerMaxLen
|
||||
index int
|
||||
)
|
||||
// At the end of the loop, index will be the first entry where the total
|
||||
// size is greater than the limit:
|
||||
//
|
||||
// len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
|
||||
for ; index < len(mdPb.Entry); index++ {
|
||||
entry := mdPb.Entry[index]
|
||||
if entry.Key == "grpc-trace-bin" {
|
||||
// "grpc-trace-bin" is a special key. It's kept in the log entry,
|
||||
// but not counted towards the size limit.
|
||||
continue
|
||||
}
|
||||
currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
|
||||
if currentEntryLen > bytesLimit {
|
||||
break
|
||||
}
|
||||
bytesLimit -= currentEntryLen
|
||||
}
|
||||
truncated = index < len(mdPb.Entry)
|
||||
mdPb.Entry = mdPb.Entry[:index]
|
||||
return truncated
|
||||
}
|
||||
|
||||
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
|
||||
if ml.messageMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
|
||||
return false
|
||||
}
|
||||
msgPb.Data = msgPb.Data[:ml.messageMaxLen]
|
||||
return true
|
||||
}
|
||||
|
||||
// LogEntryConfig represents the configuration for binary log entry.
|
||||
type LogEntryConfig interface {
|
||||
toProto() *binlogpb.GrpcLogEntry
|
||||
}
|
||||
|
||||
// ClientHeader configs the binary log entry to be a ClientHeader entry.
|
||||
type ClientHeader struct {
|
||||
OnClientSide bool
|
||||
Header metadata.MD
|
||||
MethodName string
|
||||
Authority string
|
||||
Timeout time.Duration
|
||||
// PeerAddr is required only when it's on server side.
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
|
||||
// This function doesn't need to set all the fields (e.g. seq ID). The Log
|
||||
// function will set the fields when necessary.
|
||||
clientHeader := &binlogpb.ClientHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
MethodName: c.MethodName,
|
||||
Authority: c.Authority,
|
||||
}
|
||||
if c.Timeout > 0 {
|
||||
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
|
||||
}
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Payload: &binlogpb.GrpcLogEntry_ClientHeader{
|
||||
ClientHeader: clientHeader,
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ServerHeader configs the binary log entry to be a ServerHeader entry.
|
||||
type ServerHeader struct {
|
||||
OnClientSide bool
|
||||
Header metadata.MD
|
||||
// PeerAddr is required only when it's on client side.
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Payload: &binlogpb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &binlogpb.ServerHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ClientMessage configs the binary log entry to be a ClientMessage entry.
|
||||
type ClientMessage struct {
|
||||
OnClientSide bool
|
||||
// Message can be a proto.Message or []byte. Other messages formats are not
|
||||
// supported.
|
||||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
)
|
||||
if m, ok := c.Message.(proto.Message); ok {
|
||||
data, err = proto.Marshal(m)
|
||||
if err != nil {
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
} else if b, ok := c.Message.([]byte); ok {
|
||||
data = b
|
||||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Payload: &binlogpb.GrpcLogEntry_Message{
|
||||
Message: &binlogpb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ServerMessage configs the binary log entry to be a ServerMessage entry.
|
||||
type ServerMessage struct {
|
||||
OnClientSide bool
|
||||
// Message can be a proto.Message or []byte. Other messages formats are not
|
||||
// supported.
|
||||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
)
|
||||
if m, ok := c.Message.(proto.Message); ok {
|
||||
data, err = proto.Marshal(m)
|
||||
if err != nil {
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
} else if b, ok := c.Message.([]byte); ok {
|
||||
data = b
|
||||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Payload: &binlogpb.GrpcLogEntry_Message{
|
||||
Message: &binlogpb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
|
||||
type ClientHalfClose struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
Payload: nil, // No payload here.
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
|
||||
type ServerTrailer struct {
|
||||
OnClientSide bool
|
||||
Trailer metadata.MD
|
||||
// Err is the status error.
|
||||
Err error
|
||||
// PeerAddr is required only when it's on client side and the RPC is trailer
|
||||
// only.
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
|
||||
st, ok := status.FromError(c.Err)
|
||||
if !ok {
|
||||
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
|
||||
}
|
||||
var (
|
||||
detailsBytes []byte
|
||||
err error
|
||||
)
|
||||
stProto := st.Proto()
|
||||
if stProto != nil && len(stProto.Details) != 0 {
|
||||
detailsBytes, err = proto.Marshal(stProto)
|
||||
if err != nil {
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
}
|
||||
}
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Payload: &binlogpb.GrpcLogEntry_Trailer{
|
||||
Trailer: &binlogpb.Trailer{
|
||||
Metadata: mdToMetadataProto(c.Trailer),
|
||||
StatusCode: uint32(st.Code()),
|
||||
StatusMessage: st.Message(),
|
||||
StatusDetails: detailsBytes,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Cancel configs the binary log entry to be a Cancel entry.
|
||||
type Cancel struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
Payload: nil,
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// metadataKeyOmit returns whether the metadata entry with this key should be
|
||||
// omitted.
|
||||
func metadataKeyOmit(key string) bool {
|
||||
switch key {
|
||||
case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
|
||||
return true
|
||||
case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(key, "grpc-")
|
||||
}
|
||||
|
||||
func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
|
||||
ret := &binlogpb.Metadata{}
|
||||
for k, vv := range md {
|
||||
if metadataKeyOmit(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
ret.Entry = append(ret.Entry,
|
||||
&binlogpb.MetadataEntry{
|
||||
Key: k,
|
||||
Value: []byte(v),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func addrToProto(addr net.Addr) *binlogpb.Address {
|
||||
ret := &binlogpb.Address{}
|
||||
switch a := addr.(type) {
|
||||
case *net.TCPAddr:
|
||||
if a.IP.To4() != nil {
|
||||
ret.Type = binlogpb.Address_TYPE_IPV4
|
||||
} else if a.IP.To16() != nil {
|
||||
ret.Type = binlogpb.Address_TYPE_IPV6
|
||||
} else {
|
||||
ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||||
// Do not set address and port fields.
|
||||
break
|
||||
}
|
||||
ret.Address = a.IP.String()
|
||||
ret.IpPort = uint32(a.Port)
|
||||
case *net.UnixAddr:
|
||||
ret.Type = binlogpb.Address_TYPE_UNIX
|
||||
ret.Address = a.String()
|
||||
default:
|
||||
ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||||
}
|
||||
return ret
|
||||
}
|
170
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
Normal file
170
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultSink is the sink where the logs will be written to. It's exported
|
||||
// for the binarylog package to update.
|
||||
DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
|
||||
)
|
||||
|
||||
// Sink writes log entry into the binary log sink.
|
||||
//
|
||||
// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
|
||||
type Sink interface {
|
||||
// Write will be called to write the log entry into the sink.
|
||||
//
|
||||
// It should be thread-safe so it can be called in parallel.
|
||||
Write(*binlogpb.GrpcLogEntry) error
|
||||
// Close will be called when the Sink is replaced by a new Sink.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type noopSink struct{}
|
||||
|
||||
func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
|
||||
func (ns *noopSink) Close() error { return nil }
|
||||
|
||||
// newWriterSink creates a binary log sink with the given writer.
|
||||
//
|
||||
// Write() marshals the proto message and writes it to the given writer. Each
|
||||
// message is prefixed with a 4 byte big endian unsigned integer as the length.
|
||||
//
|
||||
// No buffer is done, Close() doesn't try to close the writer.
|
||||
func newWriterSink(w io.Writer) Sink {
|
||||
return &writerSink{out: w}
|
||||
}
|
||||
|
||||
type writerSink struct {
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||||
b, err := proto.Marshal(e)
|
||||
if err != nil {
|
||||
grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
|
||||
return err
|
||||
}
|
||||
hdr := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(hdr, uint32(len(b)))
|
||||
if _, err := ws.out.Write(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := ws.out.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ws *writerSink) Close() error { return nil }
|
||||
|
||||
type bufferedSink struct {
|
||||
mu sync.Mutex
|
||||
closer io.Closer
|
||||
out Sink // out is built on buf.
|
||||
buf *bufio.Writer // buf is kept for flush.
|
||||
flusherStarted bool
|
||||
|
||||
writeTicker *time.Ticker
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if !fs.flusherStarted {
|
||||
// Start the write loop when Write is called.
|
||||
fs.startFlushGoroutine()
|
||||
fs.flusherStarted = true
|
||||
}
|
||||
if err := fs.out.Write(e); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
bufFlushDuration = 60 * time.Second
|
||||
)
|
||||
|
||||
func (fs *bufferedSink) startFlushGoroutine() {
|
||||
fs.writeTicker = time.NewTicker(bufFlushDuration)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-fs.done:
|
||||
return
|
||||
case <-fs.writeTicker.C:
|
||||
}
|
||||
fs.mu.Lock()
|
||||
if err := fs.buf.Flush(); err != nil {
|
||||
grpclogLogger.Warningf("failed to flush to Sink: %v", err)
|
||||
}
|
||||
fs.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (fs *bufferedSink) Close() error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if fs.writeTicker != nil {
|
||||
fs.writeTicker.Stop()
|
||||
}
|
||||
close(fs.done)
|
||||
if err := fs.buf.Flush(); err != nil {
|
||||
grpclogLogger.Warningf("failed to flush to Sink: %v", err)
|
||||
}
|
||||
if err := fs.closer.Close(); err != nil {
|
||||
grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
|
||||
}
|
||||
if err := fs.out.Close(); err != nil {
|
||||
grpclogLogger.Warningf("failed to close the Sink: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewBufferedSink creates a binary log sink with the given WriteCloser.
|
||||
//
|
||||
// Write() marshals the proto message and writes it to the given writer. Each
|
||||
// message is prefixed with a 4 byte big endian unsigned integer as the length.
|
||||
//
|
||||
// Content is kept in a buffer, and is flushed every 60 seconds.
|
||||
//
|
||||
// Close closes the WriteCloser.
|
||||
func NewBufferedSink(o io.WriteCloser) Sink {
|
||||
bufW := bufio.NewWriter(o)
|
||||
return &bufferedSink{
|
||||
closer: o,
|
||||
out: newWriterSink(bufW),
|
||||
buf: bufW,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
85
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
Normal file
85
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package buffer provides an implementation of an unbounded buffer.
|
||||
package buffer
|
||||
|
||||
import "sync"
|
||||
|
||||
// Unbounded is an implementation of an unbounded buffer which does not use
|
||||
// extra goroutines. This is typically used for passing updates from one entity
|
||||
// to another within gRPC.
|
||||
//
|
||||
// All methods on this type are thread-safe and don't block on anything except
|
||||
// the underlying mutex used for synchronization.
|
||||
//
|
||||
// Unbounded supports values of any type to be stored in it by using a channel
|
||||
// of `interface{}`. This means that a call to Put() incurs an extra memory
|
||||
// allocation, and also that users need a type assertion while reading. For
|
||||
// performance critical code paths, using Unbounded is strongly discouraged and
|
||||
// defining a new type specific implementation of this buffer is preferred. See
|
||||
// internal/transport/transport.go for an example of this.
|
||||
type Unbounded struct {
|
||||
c chan interface{}
|
||||
mu sync.Mutex
|
||||
backlog []interface{}
|
||||
}
|
||||
|
||||
// NewUnbounded returns a new instance of Unbounded.
|
||||
func NewUnbounded() *Unbounded {
|
||||
return &Unbounded{c: make(chan interface{}, 1)}
|
||||
}
|
||||
|
||||
// Put adds t to the unbounded buffer.
|
||||
func (b *Unbounded) Put(t interface{}) {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Load sends the earliest buffered data, if any, onto the read channel
|
||||
// returned by Get(). Users are expected to call this every time they read a
|
||||
// value from the read channel.
|
||||
func (b *Unbounded) Load() {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = nil
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Get returns a read channel on which values added to the buffer, via Put(),
|
||||
// are sent on.
|
||||
//
|
||||
// Upon reading a value from this channel, users are expected to call Load() to
|
||||
// send the next buffered value onto the channel if there is any.
|
||||
func (b *Unbounded) Get() <-chan interface{} {
|
||||
return b.c
|
||||
}
|
789
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
Normal file
789
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
Normal file
@@ -0,0 +1,789 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package channelz defines APIs for enabling channelz service, entry
|
||||
// registration/deletion, and accessing channelz data. It also defines channelz
|
||||
// metric struct formats.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxTraceEntry int32 = 30
|
||||
)
|
||||
|
||||
var (
|
||||
db dbWrapper
|
||||
idGen idGenerator
|
||||
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
||||
EntryPerPage = int64(50)
|
||||
curState int32
|
||||
maxTraceEntry = defaultMaxTraceEntry
|
||||
)
|
||||
|
||||
// TurnOn turns on channelz data collection.
|
||||
func TurnOn() {
|
||||
if !IsOn() {
|
||||
db.set(newChannelMap())
|
||||
idGen.reset()
|
||||
atomic.StoreInt32(&curState, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// IsOn returns whether channelz data collection is on.
|
||||
func IsOn() bool {
|
||||
return atomic.CompareAndSwapInt32(&curState, 1, 1)
|
||||
}
|
||||
|
||||
// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
|
||||
// Setting it to 0 will disable channel tracing.
|
||||
func SetMaxTraceEntry(i int32) {
|
||||
atomic.StoreInt32(&maxTraceEntry, i)
|
||||
}
|
||||
|
||||
// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
|
||||
func ResetMaxTraceEntryToDefault() {
|
||||
atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
|
||||
}
|
||||
|
||||
func getMaxTraceEntry() int {
|
||||
i := atomic.LoadInt32(&maxTraceEntry)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
// dbWarpper wraps around a reference to internal channelz data storage, and
|
||||
// provide synchronized functionality to set and get the reference.
|
||||
type dbWrapper struct {
|
||||
mu sync.RWMutex
|
||||
DB *channelMap
|
||||
}
|
||||
|
||||
func (d *dbWrapper) set(db *channelMap) {
|
||||
d.mu.Lock()
|
||||
d.DB = db
|
||||
d.mu.Unlock()
|
||||
}
|
||||
|
||||
func (d *dbWrapper) get() *channelMap {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
return d.DB
|
||||
}
|
||||
|
||||
// NewChannelzStorageForTesting initializes channelz data storage and id
|
||||
// generator for testing purposes.
|
||||
//
|
||||
// Returns a cleanup function to be invoked by the test, which waits for up to
|
||||
// 10s for all channelz state to be reset by the grpc goroutines when those
|
||||
// entities get closed. This cleanup function helps with ensuring that tests
|
||||
// don't mess up each other.
|
||||
func NewChannelzStorageForTesting() (cleanup func() error) {
|
||||
db.set(newChannelMap())
|
||||
idGen.reset()
|
||||
|
||||
return func() error {
|
||||
cm := db.get()
|
||||
if cm == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
cm.mu.RLock()
|
||||
topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)
|
||||
cm.mu.RUnlock()
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets)
|
||||
}
|
||||
if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 {
|
||||
return nil
|
||||
}
|
||||
<-ticker.C
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
|
||||
// boolean indicating whether there's more top channels to be queried for.
|
||||
//
|
||||
// The arg id specifies that only top channel with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of the arg maxResults or
|
||||
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
|
||||
return db.get().GetTopChannels(id, maxResults)
|
||||
}
|
||||
|
||||
// GetServers returns a slice of server's ServerMetric, along with a
|
||||
// boolean indicating whether there's more servers to be queried for.
|
||||
//
|
||||
// The arg id specifies that only server with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of the arg maxResults or
|
||||
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
|
||||
return db.get().GetServers(id, maxResults)
|
||||
}
|
||||
|
||||
// GetServerSockets returns a slice of server's (identified by id) normal socket's
|
||||
// SocketMetric, along with a boolean indicating whether there's more sockets to
|
||||
// be queried for.
|
||||
//
|
||||
// The arg startID specifies that only sockets with id at or above it will be
|
||||
// included in the result. The returned slice is up to a length of the arg maxResults
|
||||
// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
|
||||
return db.get().GetServerSockets(id, startID, maxResults)
|
||||
}
|
||||
|
||||
// GetChannel returns the ChannelMetric for the channel (identified by id).
|
||||
func GetChannel(id int64) *ChannelMetric {
|
||||
return db.get().GetChannel(id)
|
||||
}
|
||||
|
||||
// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
|
||||
func GetSubChannel(id int64) *SubChannelMetric {
|
||||
return db.get().GetSubChannel(id)
|
||||
}
|
||||
|
||||
// GetSocket returns the SocketInternalMetric for the socket (identified by id).
|
||||
func GetSocket(id int64) *SocketMetric {
|
||||
return db.get().GetSocket(id)
|
||||
}
|
||||
|
||||
// GetServer returns the ServerMetric for the server (identified by id).
|
||||
func GetServer(id int64) *ServerMetric {
|
||||
return db.get().GetServer(id)
|
||||
}
|
||||
|
||||
// RegisterChannel registers the given channel c in the channelz database with
|
||||
// ref as its reference name, and adds it to the child list of its parent
|
||||
// (identified by pid). pid == nil means no parent.
|
||||
//
|
||||
// Returns a unique channelz identifier assigned to this channel.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
|
||||
id := idGen.genID()
|
||||
var parent int64
|
||||
isTopChannel := true
|
||||
if pid != nil {
|
||||
isTopChannel = false
|
||||
parent = pid.Int()
|
||||
}
|
||||
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefChannel, id, pid)
|
||||
}
|
||||
|
||||
cn := &channel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
subChans: make(map[int64]string),
|
||||
nestedChans: make(map[int64]string),
|
||||
id: id,
|
||||
pid: parent,
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
}
|
||||
db.get().addChannel(id, cn, isTopChannel, parent)
|
||||
return newIdentifer(RefChannel, id, pid)
|
||||
}
|
||||
|
||||
// RegisterSubChannel registers the given subChannel c in the channelz database
|
||||
// with ref as its reference name, and adds it to the child list of its parent
|
||||
// (identified by pid).
|
||||
//
|
||||
// Returns a unique channelz identifier assigned to this subChannel.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a SubChannel's parent id cannot be nil")
|
||||
}
|
||||
id := idGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefSubChannel, id, pid), nil
|
||||
}
|
||||
|
||||
sc := &subChannel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
sockets: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid.Int(),
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
}
|
||||
db.get().addSubChannel(id, sc, pid.Int())
|
||||
return newIdentifer(RefSubChannel, id, pid), nil
|
||||
}
|
||||
|
||||
// RegisterServer registers the given server s in channelz database. It returns
|
||||
// the unique channelz tracking id assigned to this server.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterServer(s Server, ref string) *Identifier {
|
||||
id := idGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefServer, id, nil)
|
||||
}
|
||||
|
||||
svr := &server{
|
||||
refName: ref,
|
||||
s: s,
|
||||
sockets: make(map[int64]string),
|
||||
listenSockets: make(map[int64]string),
|
||||
id: id,
|
||||
}
|
||||
db.get().addServer(id, svr)
|
||||
return newIdentifer(RefServer, id, nil)
|
||||
}
|
||||
|
||||
// RegisterListenSocket registers the given listen socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this listen socket.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a ListenSocket's parent id cannot be 0")
|
||||
}
|
||||
id := idGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefListenSocket, id, pid), nil
|
||||
}
|
||||
|
||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||
db.get().addListenSocket(id, ls, pid.Int())
|
||||
return newIdentifer(RefListenSocket, id, pid), nil
|
||||
}
|
||||
|
||||
// RegisterNormalSocket registers the given normal socket s in channelz database
|
||||
// with ref as its reference name, and adds it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this normal socket.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a NormalSocket's parent id cannot be 0")
|
||||
}
|
||||
id := idGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||
}
|
||||
|
||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||
db.get().addNormalSocket(id, ns, pid.Int())
|
||||
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||
}
|
||||
|
||||
// RemoveEntry removes an entry with unique channelz tracking id to be id from
|
||||
// channelz database.
|
||||
//
|
||||
// If channelz is not turned ON, this function is a no-op.
|
||||
func RemoveEntry(id *Identifier) {
|
||||
if !IsOn() {
|
||||
return
|
||||
}
|
||||
db.get().removeEntry(id.Int())
|
||||
}
|
||||
|
||||
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe
|
||||
// the event to be added to the channel trace.
|
||||
//
|
||||
// The Parent field is optional. It is used for an event that will be recorded
|
||||
// in the entity's parent trace.
|
||||
type TraceEventDesc struct {
|
||||
Desc string
|
||||
Severity Severity
|
||||
Parent *TraceEventDesc
|
||||
}
|
||||
|
||||
// AddTraceEvent adds trace related to the entity with specified id, using the
|
||||
// provided TraceEventDesc.
|
||||
//
|
||||
// If channelz is not turned ON, this will simply log the event descriptions.
|
||||
func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) {
|
||||
// Log only the trace description associated with the bottom most entity.
|
||||
switch desc.Severity {
|
||||
case CtUnknown, CtInfo:
|
||||
l.InfoDepth(depth+1, withParens(id)+desc.Desc)
|
||||
case CtWarning:
|
||||
l.WarningDepth(depth+1, withParens(id)+desc.Desc)
|
||||
case CtError:
|
||||
l.ErrorDepth(depth+1, withParens(id)+desc.Desc)
|
||||
}
|
||||
|
||||
if getMaxTraceEntry() == 0 {
|
||||
return
|
||||
}
|
||||
if IsOn() {
|
||||
db.get().traceEvent(id.Int(), desc)
|
||||
}
|
||||
}
|
||||
|
||||
// channelMap is the storage data structure for channelz.
|
||||
// Methods of channelMap can be divided in two two categories with respect to locking.
|
||||
// 1. Methods acquire the global lock.
|
||||
// 2. Methods that can only be called when global lock is held.
|
||||
// A second type of method need always to be called inside a first type of method.
|
||||
type channelMap struct {
|
||||
mu sync.RWMutex
|
||||
topLevelChannels map[int64]struct{}
|
||||
servers map[int64]*server
|
||||
channels map[int64]*channel
|
||||
subChannels map[int64]*subChannel
|
||||
listenSockets map[int64]*listenSocket
|
||||
normalSockets map[int64]*normalSocket
|
||||
}
|
||||
|
||||
func newChannelMap() *channelMap {
|
||||
return &channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*channel),
|
||||
listenSockets: make(map[int64]*listenSocket),
|
||||
normalSockets: make(map[int64]*normalSocket),
|
||||
servers: make(map[int64]*server),
|
||||
subChannels: make(map[int64]*subChannel),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) addServer(id int64, s *server) {
|
||||
c.mu.Lock()
|
||||
s.cm = c
|
||||
c.servers[id] = s
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
|
||||
c.mu.Lock()
|
||||
cn.cm = c
|
||||
cn.trace.cm = c
|
||||
c.channels[id] = cn
|
||||
if isTopChannel {
|
||||
c.topLevelChannels[id] = struct{}{}
|
||||
} else {
|
||||
c.findEntry(pid).addChild(id, cn)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
|
||||
c.mu.Lock()
|
||||
sc.cm = c
|
||||
sc.trace.cm = c
|
||||
c.subChannels[id] = sc
|
||||
c.findEntry(pid).addChild(id, sc)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
|
||||
c.mu.Lock()
|
||||
ls.cm = c
|
||||
c.listenSockets[id] = ls
|
||||
c.findEntry(pid).addChild(id, ls)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
|
||||
c.mu.Lock()
|
||||
ns.cm = c
|
||||
c.normalSockets[id] = ns
|
||||
c.findEntry(pid).addChild(id, ns)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
|
||||
// wait on the deletion of its children and until no other entity's channel trace references it.
|
||||
// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
|
||||
// shutting down server will lead to the server being also deleted.
|
||||
func (c *channelMap) removeEntry(id int64) {
|
||||
c.mu.Lock()
|
||||
c.findEntry(id).triggerDelete()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller
|
||||
func (c *channelMap) decrTraceRefCount(id int64) {
|
||||
e := c.findEntry(id)
|
||||
if v, ok := e.(tracedChannel); ok {
|
||||
v.decrTraceRefCount()
|
||||
e.deleteSelfIfReady()
|
||||
}
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller.
|
||||
func (c *channelMap) findEntry(id int64) entry {
|
||||
var v entry
|
||||
var ok bool
|
||||
if v, ok = c.channels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.subChannels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.servers[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.listenSockets[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.normalSockets[id]; ok {
|
||||
return v
|
||||
}
|
||||
return &dummyEntry{idNotFound: id}
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller
|
||||
// deleteEntry simply deletes an entry from the channelMap. Before calling this
|
||||
// method, caller must check this entry is ready to be deleted, i.e removeEntry()
|
||||
// has been called on it, and no children still exist.
|
||||
// Conditionals are ordered by the expected frequency of deletion of each entity
|
||||
// type, in order to optimize performance.
|
||||
func (c *channelMap) deleteEntry(id int64) {
|
||||
var ok bool
|
||||
if _, ok = c.normalSockets[id]; ok {
|
||||
delete(c.normalSockets, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.subChannels[id]; ok {
|
||||
delete(c.subChannels, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.channels[id]; ok {
|
||||
delete(c.channels, id)
|
||||
delete(c.topLevelChannels, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.listenSockets[id]; ok {
|
||||
delete(c.listenSockets, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.servers[id]; ok {
|
||||
delete(c.servers, id)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
|
||||
c.mu.Lock()
|
||||
child := c.findEntry(id)
|
||||
childTC, ok := child.(tracedChannel)
|
||||
if !ok {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
|
||||
if desc.Parent != nil {
|
||||
parent := c.findEntry(child.getParentID())
|
||||
var chanType RefChannelType
|
||||
switch child.(type) {
|
||||
case *channel:
|
||||
chanType = RefChannel
|
||||
case *subChannel:
|
||||
chanType = RefSubChannel
|
||||
}
|
||||
if parentTC, ok := parent.(tracedChannel); ok {
|
||||
parentTC.getChannelTrace().append(&TraceEvent{
|
||||
Desc: desc.Parent.Desc,
|
||||
Severity: desc.Parent.Severity,
|
||||
Timestamp: time.Now(),
|
||||
RefID: id,
|
||||
RefName: childTC.getRefName(),
|
||||
RefType: chanType,
|
||||
})
|
||||
childTC.incrTraceRefCount()
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
type int64Slice []int64
|
||||
|
||||
func (s int64Slice) Len() int { return len(s) }
|
||||
func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
func copyMap(m map[int64]string) map[int64]string {
|
||||
n := make(map[int64]string)
|
||||
for k, v := range m {
|
||||
n[k] = v
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func min(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
c.mu.RLock()
|
||||
l := int64(len(c.topLevelChannels))
|
||||
ids := make([]int64, 0, l)
|
||||
cns := make([]*channel, 0, min(l, maxResults))
|
||||
|
||||
for k := range c.topLevelChannels {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := int64(0)
|
||||
var end bool
|
||||
var t []*ChannelMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if cn, ok := c.channels[v]; ok {
|
||||
cns = append(cns, cn)
|
||||
t = append(t, &ChannelMetric{
|
||||
NestedChans: copyMap(cn.nestedChans),
|
||||
SubChans: copyMap(cn.subChans),
|
||||
})
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
|
||||
for i, cn := range cns {
|
||||
t[i].ChannelData = cn.c.ChannelzMetric()
|
||||
t[i].ID = cn.id
|
||||
t[i].RefName = cn.refName
|
||||
t[i].Trace = cn.trace.dumpData()
|
||||
}
|
||||
return t, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
c.mu.RLock()
|
||||
l := int64(len(c.servers))
|
||||
ids := make([]int64, 0, l)
|
||||
ss := make([]*server, 0, min(l, maxResults))
|
||||
for k := range c.servers {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := int64(0)
|
||||
var end bool
|
||||
var s []*ServerMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if svr, ok := c.servers[v]; ok {
|
||||
ss = append(ss, svr)
|
||||
s = append(s, &ServerMetric{
|
||||
ListenSockets: copyMap(svr.listenSockets),
|
||||
})
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
|
||||
for i, svr := range ss {
|
||||
s[i].ServerData = svr.s.ChannelzMetric()
|
||||
s[i].ID = svr.id
|
||||
s[i].RefName = svr.refName
|
||||
}
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
var svr *server
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if svr, ok = c.servers[id]; !ok {
|
||||
// server with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil, true
|
||||
}
|
||||
svrskts := svr.sockets
|
||||
l := int64(len(svrskts))
|
||||
ids := make([]int64, 0, l)
|
||||
sks := make([]*normalSocket, 0, min(l, maxResults))
|
||||
for k := range svrskts {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
|
||||
count := int64(0)
|
||||
var end bool
|
||||
for i, v := range ids[idx:] {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if ns, ok := c.normalSockets[v]; ok {
|
||||
sks = append(sks, ns)
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
s := make([]*SocketMetric, 0, len(sks))
|
||||
for _, ns := range sks {
|
||||
sm := &SocketMetric{}
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
sm.ID = ns.id
|
||||
sm.RefName = ns.refName
|
||||
s = append(s, sm)
|
||||
}
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetChannel(id int64) *ChannelMetric {
|
||||
cm := &ChannelMetric{}
|
||||
var cn *channel
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if cn, ok = c.channels[id]; !ok {
|
||||
// channel with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
cm.NestedChans = copyMap(cn.nestedChans)
|
||||
cm.SubChans = copyMap(cn.subChans)
|
||||
// cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
|
||||
// holding the lock to prevent potential data race.
|
||||
chanCopy := cn.c
|
||||
c.mu.RUnlock()
|
||||
cm.ChannelData = chanCopy.ChannelzMetric()
|
||||
cm.ID = cn.id
|
||||
cm.RefName = cn.refName
|
||||
cm.Trace = cn.trace.dumpData()
|
||||
return cm
|
||||
}
|
||||
|
||||
func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
|
||||
cm := &SubChannelMetric{}
|
||||
var sc *subChannel
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if sc, ok = c.subChannels[id]; !ok {
|
||||
// subchannel with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
cm.Sockets = copyMap(sc.sockets)
|
||||
// sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
|
||||
// holding the lock to prevent potential data race.
|
||||
chanCopy := sc.c
|
||||
c.mu.RUnlock()
|
||||
cm.ChannelData = chanCopy.ChannelzMetric()
|
||||
cm.ID = sc.id
|
||||
cm.RefName = sc.refName
|
||||
cm.Trace = sc.trace.dumpData()
|
||||
return cm
|
||||
}
|
||||
|
||||
func (c *channelMap) GetSocket(id int64) *SocketMetric {
|
||||
sm := &SocketMetric{}
|
||||
c.mu.RLock()
|
||||
if ls, ok := c.listenSockets[id]; ok {
|
||||
c.mu.RUnlock()
|
||||
sm.SocketData = ls.s.ChannelzMetric()
|
||||
sm.ID = ls.id
|
||||
sm.RefName = ls.refName
|
||||
return sm
|
||||
}
|
||||
if ns, ok := c.normalSockets[id]; ok {
|
||||
c.mu.RUnlock()
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
sm.ID = ns.id
|
||||
sm.RefName = ns.refName
|
||||
return sm
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServer(id int64) *ServerMetric {
|
||||
sm := &ServerMetric{}
|
||||
var svr *server
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if svr, ok = c.servers[id]; !ok {
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
sm.ListenSockets = copyMap(svr.listenSockets)
|
||||
c.mu.RUnlock()
|
||||
sm.ID = svr.id
|
||||
sm.RefName = svr.refName
|
||||
sm.ServerData = svr.s.ChannelzMetric()
|
||||
return sm
|
||||
}
|
||||
|
||||
type idGenerator struct {
|
||||
id int64
|
||||
}
|
||||
|
||||
func (i *idGenerator) reset() {
|
||||
atomic.StoreInt64(&i.id, 0)
|
||||
}
|
||||
|
||||
func (i *idGenerator) genID() int64 {
|
||||
return atomic.AddInt64(&i.id, 1)
|
||||
}
|
75
vendor/google.golang.org/grpc/internal/channelz/id.go
generated
vendored
Normal file
75
vendor/google.golang.org/grpc/internal/channelz/id.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Identifier is an opaque identifier which uniquely identifies an entity in the
|
||||
// channelz database.
|
||||
type Identifier struct {
|
||||
typ RefChannelType
|
||||
id int64
|
||||
str string
|
||||
pid *Identifier
|
||||
}
|
||||
|
||||
// Type returns the entity type corresponding to id.
|
||||
func (id *Identifier) Type() RefChannelType {
|
||||
return id.typ
|
||||
}
|
||||
|
||||
// Int returns the integer identifier corresponding to id.
|
||||
func (id *Identifier) Int() int64 {
|
||||
return id.id
|
||||
}
|
||||
|
||||
// String returns a string representation of the entity corresponding to id.
|
||||
//
|
||||
// This includes some information about the parent as well. Examples:
|
||||
// Top-level channel: [Channel #channel-number]
|
||||
// Nested channel: [Channel #parent-channel-number Channel #channel-number]
|
||||
// Sub channel: [Channel #parent-channel SubChannel #subchannel-number]
|
||||
func (id *Identifier) String() string {
|
||||
return id.str
|
||||
}
|
||||
|
||||
// Equal returns true if other is the same as id.
|
||||
func (id *Identifier) Equal(other *Identifier) bool {
|
||||
if (id != nil) != (other != nil) {
|
||||
return false
|
||||
}
|
||||
if id == nil && other == nil {
|
||||
return true
|
||||
}
|
||||
return id.typ == other.typ && id.id == other.id && id.pid == other.pid
|
||||
}
|
||||
|
||||
// NewIdentifierForTesting returns a new opaque identifier to be used only for
|
||||
// testing purposes.
|
||||
func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||
return newIdentifer(typ, id, pid)
|
||||
}
|
||||
|
||||
func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||
str := fmt.Sprintf("%s #%d", typ, id)
|
||||
if pid != nil {
|
||||
str = fmt.Sprintf("%s %s", pid, str)
|
||||
}
|
||||
return &Identifier{typ: typ, id: id, str: str, pid: pid}
|
||||
}
|
79
vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
Normal file
79
vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("channelz")
|
||||
|
||||
func withParens(id *Identifier) string {
|
||||
return "[" + id.String() + "] "
|
||||
}
|
||||
|
||||
// Info logs and adds a trace event if channelz is on.
|
||||
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// Infof logs and adds a trace event if channelz is on.
|
||||
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// Warning logs and adds a trace event if channelz is on.
|
||||
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
}
|
||||
|
||||
// Warningf logs and adds a trace event if channelz is on.
|
||||
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
}
|
||||
|
||||
// Error logs and adds a trace event if channelz is on.
|
||||
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
}
|
||||
|
||||
// Errorf logs and adds a trace event if channelz is on.
|
||||
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
}
|
722
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
Normal file
722
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
Normal file
@@ -0,0 +1,722 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// entry represents a node in the channelz database.
|
||||
type entry interface {
|
||||
// addChild adds a child e, whose channelz id is id to child list
|
||||
addChild(id int64, e entry)
|
||||
// deleteChild deletes a child with channelz id to be id from child list
|
||||
deleteChild(id int64)
|
||||
// triggerDelete tries to delete self from channelz database. However, if child
|
||||
// list is not empty, then deletion from the database is on hold until the last
|
||||
// child is deleted from database.
|
||||
triggerDelete()
|
||||
// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
|
||||
// list is now empty. If both conditions are met, then delete self from database.
|
||||
deleteSelfIfReady()
|
||||
// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
|
||||
getParentID() int64
|
||||
}
|
||||
|
||||
// dummyEntry is a fake entry to handle entry not found case.
|
||||
type dummyEntry struct {
|
||||
idNotFound int64
|
||||
}
|
||||
|
||||
func (d *dummyEntry) addChild(id int64, e entry) {
|
||||
// Note: It is possible for a normal program to reach here under race condition.
|
||||
// For example, there could be a race between ClientConn.Close() info being propagated
|
||||
// to addrConn and http2Client. ClientConn.Close() cancel the context and result
|
||||
// in http2Client to error. The error info is then caught by transport monitor
|
||||
// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
|
||||
// the addrConn will create a new transport. And when registering the new transport in
|
||||
// channelz, its parent addrConn could have already been torn down and deleted
|
||||
// from channelz tracking, and thus reach the code here.
|
||||
logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) deleteChild(id int64) {
|
||||
// It is possible for a normal program to reach here under race condition.
|
||||
// Refer to the example described in addChild().
|
||||
logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) triggerDelete() {
|
||||
logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
|
||||
}
|
||||
|
||||
func (*dummyEntry) deleteSelfIfReady() {
|
||||
// code should not reach here. deleteSelfIfReady is always called on an existing entry.
|
||||
}
|
||||
|
||||
func (*dummyEntry) getParentID() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// ChannelMetric defines the info channelz provides for a specific Channel, which
|
||||
// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
type ChannelMetric struct {
|
||||
// ID is the channelz id of this channel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this channel.
|
||||
RefName string
|
||||
// ChannelData contains channel internal metric reported by the channel through
|
||||
// ChannelzMetric().
|
||||
ChannelData *ChannelInternalMetric
|
||||
// NestedChans tracks the nested channel type children of this channel in the format of
|
||||
// a map from nested channel channelz id to corresponding reference string.
|
||||
NestedChans map[int64]string
|
||||
// SubChans tracks the subchannel type children of this channel in the format of a
|
||||
// map from subchannel channelz id to corresponding reference string.
|
||||
SubChans map[int64]string
|
||||
// Sockets tracks the socket type children of this channel in the format of a map
|
||||
// from socket channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow channel having sockets directly,
|
||||
// therefore, this is field is unused.
|
||||
Sockets map[int64]string
|
||||
// Trace contains the most recent traced events.
|
||||
Trace *ChannelTrace
|
||||
}
|
||||
|
||||
// SubChannelMetric defines the info channelz provides for a specific SubChannel,
|
||||
// which includes ChannelInternalMetric and channelz-specific data, such as
|
||||
// channelz id, child list, etc.
|
||||
type SubChannelMetric struct {
|
||||
// ID is the channelz id of this subchannel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this subchannel.
|
||||
RefName string
|
||||
// ChannelData contains subchannel internal metric reported by the subchannel
|
||||
// through ChannelzMetric().
|
||||
ChannelData *ChannelInternalMetric
|
||||
// NestedChans tracks the nested channel type children of this subchannel in the format of
|
||||
// a map from nested channel channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow subchannel to have nested channels
|
||||
// as children, therefore, this field is unused.
|
||||
NestedChans map[int64]string
|
||||
// SubChans tracks the subchannel type children of this subchannel in the format of a
|
||||
// map from subchannel channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow subchannel to have subchannels
|
||||
// as children, therefore, this field is unused.
|
||||
SubChans map[int64]string
|
||||
// Sockets tracks the socket type children of this subchannel in the format of a map
|
||||
// from socket channelz id to corresponding reference string.
|
||||
Sockets map[int64]string
|
||||
// Trace contains the most recent traced events.
|
||||
Trace *ChannelTrace
|
||||
}
|
||||
|
||||
// ChannelInternalMetric defines the struct that the implementor of Channel interface
|
||||
// should return from ChannelzMetric().
|
||||
type ChannelInternalMetric struct {
|
||||
// current connectivity state of the channel.
|
||||
State connectivity.State
|
||||
// The target this channel originally tried to connect to. May be absent
|
||||
Target string
|
||||
// The number of calls started on the channel.
|
||||
CallsStarted int64
|
||||
// The number of calls that have completed with an OK status.
|
||||
CallsSucceeded int64
|
||||
// The number of calls that have a completed with a non-OK status.
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the channel.
|
||||
LastCallStartedTimestamp time.Time
|
||||
}
|
||||
|
||||
// ChannelTrace stores traced events on a channel/subchannel and related info.
|
||||
type ChannelTrace struct {
|
||||
// EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
|
||||
EventNum int64
|
||||
// CreationTime is the creation time of the trace.
|
||||
CreationTime time.Time
|
||||
// Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
|
||||
// oldest one)
|
||||
Events []*TraceEvent
|
||||
}
|
||||
|
||||
// TraceEvent represent a single trace event
|
||||
type TraceEvent struct {
|
||||
// Desc is a simple description of the trace event.
|
||||
Desc string
|
||||
// Severity states the severity of this trace event.
|
||||
Severity Severity
|
||||
// Timestamp is the event time.
|
||||
Timestamp time.Time
|
||||
// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
|
||||
// involved in this event.
|
||||
// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
|
||||
RefID int64
|
||||
// RefName is the reference name for the entity that gets referenced in the event.
|
||||
RefName string
|
||||
// RefType indicates the referenced entity type, i.e Channel or SubChannel.
|
||||
RefType RefChannelType
|
||||
}
|
||||
|
||||
// Channel is the interface that should be satisfied in order to be tracked by
|
||||
// channelz as Channel or SubChannel.
|
||||
type Channel interface {
|
||||
ChannelzMetric() *ChannelInternalMetric
|
||||
}
|
||||
|
||||
type dummyChannel struct{}
|
||||
|
||||
func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
|
||||
return &ChannelInternalMetric{}
|
||||
}
|
||||
|
||||
type channel struct {
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
nestedChans map[int64]string
|
||||
subChans map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
trace *channelTrace
|
||||
// traceRefCount is the number of trace events that reference this channel.
|
||||
// Non-zero traceRefCount means the trace of this channel cannot be deleted.
|
||||
traceRefCount int32
|
||||
}
|
||||
|
||||
func (c *channel) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *subChannel:
|
||||
c.subChans[id] = v.refName
|
||||
case *channel:
|
||||
c.nestedChans[id] = v.refName
|
||||
default:
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channel) deleteChild(id int64) {
|
||||
delete(c.subChans, id)
|
||||
delete(c.nestedChans, id)
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *channel) triggerDelete() {
|
||||
c.closeCalled = true
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *channel) getParentID() int64 {
|
||||
return c.pid
|
||||
}
|
||||
|
||||
// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
|
||||
// deleting the channel reference from its parent's child list.
|
||||
//
|
||||
// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
|
||||
// corresponding grpc object has been invoked, and the channel does not have any children left.
|
||||
//
|
||||
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||
func (c *channel) deleteSelfFromTree() (deleted bool) {
|
||||
if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
|
||||
return false
|
||||
}
|
||||
// not top channel
|
||||
if c.pid != 0 {
|
||||
c.cm.findEntry(c.pid).deleteChild(c.id)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
|
||||
// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
|
||||
// channel, and its memory will be garbage collected.
|
||||
//
|
||||
// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
|
||||
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||
//
|
||||
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||
//
|
||||
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||
func (c *channel) deleteSelfFromMap() (delete bool) {
|
||||
if c.getTraceRefCount() != 0 {
|
||||
c.c = &dummyChannel{}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||
// parent's child list.
|
||||
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||
// will return entry not found error.
|
||||
func (c *channel) deleteSelfIfReady() {
|
||||
if !c.deleteSelfFromTree() {
|
||||
return
|
||||
}
|
||||
if !c.deleteSelfFromMap() {
|
||||
return
|
||||
}
|
||||
c.cm.deleteEntry(c.id)
|
||||
c.trace.clear()
|
||||
}
|
||||
|
||||
func (c *channel) getChannelTrace() *channelTrace {
|
||||
return c.trace
|
||||
}
|
||||
|
||||
func (c *channel) incrTraceRefCount() {
|
||||
atomic.AddInt32(&c.traceRefCount, 1)
|
||||
}
|
||||
|
||||
func (c *channel) decrTraceRefCount() {
|
||||
atomic.AddInt32(&c.traceRefCount, -1)
|
||||
}
|
||||
|
||||
func (c *channel) getTraceRefCount() int {
|
||||
i := atomic.LoadInt32(&c.traceRefCount)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func (c *channel) getRefName() string {
|
||||
return c.refName
|
||||
}
|
||||
|
||||
type subChannel struct {
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
trace *channelTrace
|
||||
traceRefCount int32
|
||||
}
|
||||
|
||||
func (sc *subChannel) addChild(id int64, e entry) {
|
||||
if v, ok := e.(*normalSocket); ok {
|
||||
sc.sockets[id] = v.refName
|
||||
} else {
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *subChannel) deleteChild(id int64) {
|
||||
delete(sc.sockets, id)
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *subChannel) triggerDelete() {
|
||||
sc.closeCalled = true
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *subChannel) getParentID() int64 {
|
||||
return sc.pid
|
||||
}
|
||||
|
||||
// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
|
||||
// means deleting the subchannel reference from its parent's child list.
|
||||
//
|
||||
// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
|
||||
// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
|
||||
//
|
||||
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||
func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
|
||||
if !sc.closeCalled || len(sc.sockets) != 0 {
|
||||
return false
|
||||
}
|
||||
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
|
||||
// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
|
||||
// the subchannel, and its memory will be garbage collected.
|
||||
//
|
||||
// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
|
||||
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||
//
|
||||
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||
//
|
||||
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||
func (sc *subChannel) deleteSelfFromMap() (delete bool) {
|
||||
if sc.getTraceRefCount() != 0 {
|
||||
// free the grpc struct (i.e. addrConn)
|
||||
sc.c = &dummyChannel{}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||
// its parent's child list.
|
||||
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||
// by id will return entry not found error.
|
||||
func (sc *subChannel) deleteSelfIfReady() {
|
||||
if !sc.deleteSelfFromTree() {
|
||||
return
|
||||
}
|
||||
if !sc.deleteSelfFromMap() {
|
||||
return
|
||||
}
|
||||
sc.cm.deleteEntry(sc.id)
|
||||
sc.trace.clear()
|
||||
}
|
||||
|
||||
func (sc *subChannel) getChannelTrace() *channelTrace {
|
||||
return sc.trace
|
||||
}
|
||||
|
||||
func (sc *subChannel) incrTraceRefCount() {
|
||||
atomic.AddInt32(&sc.traceRefCount, 1)
|
||||
}
|
||||
|
||||
func (sc *subChannel) decrTraceRefCount() {
|
||||
atomic.AddInt32(&sc.traceRefCount, -1)
|
||||
}
|
||||
|
||||
func (sc *subChannel) getTraceRefCount() int {
|
||||
i := atomic.LoadInt32(&sc.traceRefCount)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func (sc *subChannel) getRefName() string {
|
||||
return sc.refName
|
||||
}
|
||||
|
||||
// SocketMetric defines the info channelz provides for a specific Socket, which
|
||||
// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
|
||||
type SocketMetric struct {
|
||||
// ID is the channelz id of this socket.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this socket.
|
||||
RefName string
|
||||
// SocketData contains socket internal metric reported by the socket through
|
||||
// ChannelzMetric().
|
||||
SocketData *SocketInternalMetric
|
||||
}
|
||||
|
||||
// SocketInternalMetric defines the struct that the implementor of Socket interface
|
||||
// should return from ChannelzMetric().
|
||||
type SocketInternalMetric struct {
|
||||
// The number of streams that have been started.
|
||||
StreamsStarted int64
|
||||
// The number of streams that have ended successfully:
|
||||
// On client side, receiving frame with eos bit set.
|
||||
// On server side, sending frame with eos bit set.
|
||||
StreamsSucceeded int64
|
||||
// The number of streams that have ended unsuccessfully:
|
||||
// On client side, termination without receiving frame with eos bit set.
|
||||
// On server side, termination without sending frame with eos bit set.
|
||||
StreamsFailed int64
|
||||
// The number of messages successfully sent on this socket.
|
||||
MessagesSent int64
|
||||
MessagesReceived int64
|
||||
// The number of keep alives sent. This is typically implemented with HTTP/2
|
||||
// ping messages.
|
||||
KeepAlivesSent int64
|
||||
// The last time a stream was created by this endpoint. Usually unset for
|
||||
// servers.
|
||||
LastLocalStreamCreatedTimestamp time.Time
|
||||
// The last time a stream was created by the remote endpoint. Usually unset
|
||||
// for clients.
|
||||
LastRemoteStreamCreatedTimestamp time.Time
|
||||
// The last time a message was sent by this endpoint.
|
||||
LastMessageSentTimestamp time.Time
|
||||
// The last time a message was received by this endpoint.
|
||||
LastMessageReceivedTimestamp time.Time
|
||||
// The amount of window, granted to the local endpoint by the remote endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
LocalFlowControlWindow int64
|
||||
// The amount of window, granted to the remote endpoint by the local endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
RemoteFlowControlWindow int64
|
||||
// The locally bound address.
|
||||
LocalAddr net.Addr
|
||||
// The remote bound address. May be absent.
|
||||
RemoteAddr net.Addr
|
||||
// Optional, represents the name of the remote endpoint, if different than
|
||||
// the original target name.
|
||||
RemoteName string
|
||||
SocketOptions *SocketOptionData
|
||||
Security credentials.ChannelzSecurityValue
|
||||
}
|
||||
|
||||
// Socket is the interface that should be satisfied in order to be tracked by
|
||||
// channelz as Socket.
|
||||
type Socket interface {
|
||||
ChannelzMetric() *SocketInternalMetric
|
||||
}
|
||||
|
||||
type listenSocket struct {
|
||||
refName string
|
||||
s Socket
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (ls *listenSocket) addChild(id int64, e entry) {
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteChild(id int64) {
|
||||
logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) triggerDelete() {
|
||||
ls.cm.deleteEntry(ls.id)
|
||||
ls.cm.findEntry(ls.pid).deleteChild(ls.id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteSelfIfReady() {
|
||||
logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||
}
|
||||
|
||||
func (ls *listenSocket) getParentID() int64 {
|
||||
return ls.pid
|
||||
}
|
||||
|
||||
type normalSocket struct {
|
||||
refName string
|
||||
s Socket
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (ns *normalSocket) addChild(id int64, e entry) {
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteChild(id int64) {
|
||||
logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) triggerDelete() {
|
||||
ns.cm.deleteEntry(ns.id)
|
||||
ns.cm.findEntry(ns.pid).deleteChild(ns.id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteSelfIfReady() {
|
||||
logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||
}
|
||||
|
||||
func (ns *normalSocket) getParentID() int64 {
|
||||
return ns.pid
|
||||
}
|
||||
|
||||
// ServerMetric defines the info channelz provides for a specific Server, which
|
||||
// includes ServerInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
type ServerMetric struct {
|
||||
// ID is the channelz id of this server.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this server.
|
||||
RefName string
|
||||
// ServerData contains server internal metric reported by the server through
|
||||
// ChannelzMetric().
|
||||
ServerData *ServerInternalMetric
|
||||
// ListenSockets tracks the listener socket type children of this server in the
|
||||
// format of a map from socket channelz id to corresponding reference string.
|
||||
ListenSockets map[int64]string
|
||||
}
|
||||
|
||||
// ServerInternalMetric defines the struct that the implementor of Server interface
|
||||
// should return from ChannelzMetric().
|
||||
type ServerInternalMetric struct {
|
||||
// The number of incoming calls started on the server.
|
||||
CallsStarted int64
|
||||
// The number of incoming calls that have completed with an OK status.
|
||||
CallsSucceeded int64
|
||||
// The number of incoming calls that have a completed with a non-OK status.
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the server.
|
||||
LastCallStartedTimestamp time.Time
|
||||
}
|
||||
|
||||
// Server is the interface to be satisfied in order to be tracked by channelz as
|
||||
// Server.
|
||||
type Server interface {
|
||||
ChannelzMetric() *ServerInternalMetric
|
||||
}
|
||||
|
||||
type server struct {
|
||||
refName string
|
||||
s Server
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
listenSockets map[int64]string
|
||||
id int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (s *server) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *normalSocket:
|
||||
s.sockets[id] = v.refName
|
||||
case *listenSocket:
|
||||
s.listenSockets[id] = v.refName
|
||||
default:
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) deleteChild(id int64) {
|
||||
delete(s.sockets, id)
|
||||
delete(s.listenSockets, id)
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *server) triggerDelete() {
|
||||
s.closeCalled = true
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *server) deleteSelfIfReady() {
|
||||
if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
|
||||
return
|
||||
}
|
||||
s.cm.deleteEntry(s.id)
|
||||
}
|
||||
|
||||
func (s *server) getParentID() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type tracedChannel interface {
|
||||
getChannelTrace() *channelTrace
|
||||
incrTraceRefCount()
|
||||
decrTraceRefCount()
|
||||
getRefName() string
|
||||
}
|
||||
|
||||
type channelTrace struct {
|
||||
cm *channelMap
|
||||
createdTime time.Time
|
||||
eventCount int64
|
||||
mu sync.Mutex
|
||||
events []*TraceEvent
|
||||
}
|
||||
|
||||
func (c *channelTrace) append(e *TraceEvent) {
|
||||
c.mu.Lock()
|
||||
if len(c.events) == getMaxTraceEntry() {
|
||||
del := c.events[0]
|
||||
c.events = c.events[1:]
|
||||
if del.RefID != 0 {
|
||||
// start recursive cleanup in a goroutine to not block the call originated from grpc.
|
||||
go func() {
|
||||
// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
|
||||
c.cm.mu.Lock()
|
||||
c.cm.decrTraceRefCount(del.RefID)
|
||||
c.cm.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
}
|
||||
e.Timestamp = time.Now()
|
||||
c.events = append(c.events, e)
|
||||
c.eventCount++
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelTrace) clear() {
|
||||
c.mu.Lock()
|
||||
for _, e := range c.events {
|
||||
if e.RefID != 0 {
|
||||
// caller should have already held the c.cm.mu lock.
|
||||
c.cm.decrTraceRefCount(e.RefID)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Severity is the severity level of a trace event.
|
||||
// The canonical enumeration of all valid values is here:
|
||||
// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
|
||||
type Severity int
|
||||
|
||||
const (
|
||||
// CtUnknown indicates unknown severity of a trace event.
|
||||
CtUnknown Severity = iota
|
||||
// CtInfo indicates info level severity of a trace event.
|
||||
CtInfo
|
||||
// CtWarning indicates warning level severity of a trace event.
|
||||
CtWarning
|
||||
// CtError indicates error level severity of a trace event.
|
||||
CtError
|
||||
)
|
||||
|
||||
// RefChannelType is the type of the entity being referenced in a trace event.
|
||||
type RefChannelType int
|
||||
|
||||
const (
|
||||
// RefUnknown indicates an unknown entity type, the zero value for this type.
|
||||
RefUnknown RefChannelType = iota
|
||||
// RefChannel indicates the referenced entity is a Channel.
|
||||
RefChannel
|
||||
// RefSubChannel indicates the referenced entity is a SubChannel.
|
||||
RefSubChannel
|
||||
// RefServer indicates the referenced entity is a Server.
|
||||
RefServer
|
||||
// RefListenSocket indicates the referenced entity is a ListenSocket.
|
||||
RefListenSocket
|
||||
// RefNormalSocket indicates the referenced entity is a NormalSocket.
|
||||
RefNormalSocket
|
||||
)
|
||||
|
||||
var refChannelTypeToString = map[RefChannelType]string{
|
||||
RefUnknown: "Unknown",
|
||||
RefChannel: "Channel",
|
||||
RefSubChannel: "SubChannel",
|
||||
RefServer: "Server",
|
||||
RefListenSocket: "ListenSocket",
|
||||
RefNormalSocket: "NormalSocket",
|
||||
}
|
||||
|
||||
func (r RefChannelType) String() string {
|
||||
return refChannelTypeToString[r]
|
||||
}
|
||||
|
||||
func (c *channelTrace) dumpData() *ChannelTrace {
|
||||
c.mu.Lock()
|
||||
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
|
||||
ct.Events = c.events[:len(c.events)]
|
||||
c.mu.Unlock()
|
||||
return ct
|
||||
}
|
51
vendor/google.golang.org/grpc/internal/channelz/types_linux.go
generated
vendored
Normal file
51
vendor/google.golang.org/grpc/internal/channelz/types_linux.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// SocketOptionData defines the struct to hold socket option data, and related
|
||||
// getter function to obtain info from fd.
|
||||
type SocketOptionData struct {
|
||||
Linger *unix.Linger
|
||||
RecvTimeout *unix.Timeval
|
||||
SendTimeout *unix.Timeval
|
||||
TCPInfo *unix.TCPInfo
|
||||
}
|
||||
|
||||
// Getsockopt defines the function to get socket options requested by channelz.
|
||||
// It is to be passed to syscall.RawConn.Control().
|
||||
func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||
if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
|
||||
s.Linger = v
|
||||
}
|
||||
if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
|
||||
s.RecvTimeout = v
|
||||
}
|
||||
if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
|
||||
s.SendTimeout = v
|
||||
}
|
||||
if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
|
||||
s.TCPInfo = v
|
||||
}
|
||||
}
|
43
vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
generated
vendored
Normal file
43
vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
|
||||
// SocketOptionData defines the struct to hold socket option data, and related
|
||||
// getter function to obtain info from fd.
|
||||
// Windows OS doesn't support Socket Option
|
||||
type SocketOptionData struct {
|
||||
}
|
||||
|
||||
// Getsockopt defines the function to get socket options requested by channelz.
|
||||
// It is to be passed to syscall.RawConn.Control().
|
||||
// Windows OS doesn't support Socket Option
|
||||
func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||
once.Do(func() {
|
||||
logger.Warning("Channelz: socket options are not supported on non-linux environments")
|
||||
})
|
||||
}
|
37
vendor/google.golang.org/grpc/internal/channelz/util_linux.go
generated
vendored
Normal file
37
vendor/google.golang.org/grpc/internal/channelz/util_linux.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(socket interface{}) *SocketOptionData {
|
||||
c, ok := socket.(syscall.Conn)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
data := &SocketOptionData{}
|
||||
if rawConn, err := c.SyscallConn(); err == nil {
|
||||
rawConn.Control(data.Getsockopt)
|
||||
return data
|
||||
}
|
||||
return nil
|
||||
}
|
27
vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
generated
vendored
Normal file
27
vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
// GetSocketOption gets the socket option info of the conn.
|
||||
func GetSocketOption(c interface{}) *SocketOptionData {
|
||||
return nil
|
||||
}
|
49
vendor/google.golang.org/grpc/internal/credentials/credentials.go
generated
vendored
Normal file
49
vendor/google.golang.org/grpc/internal/credentials/credentials.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// requestInfoKey is a struct to be used as the key to store RequestInfo in a
|
||||
// context.
|
||||
type requestInfoKey struct{}
|
||||
|
||||
// NewRequestInfoContext creates a context with ri.
|
||||
func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context {
|
||||
return context.WithValue(ctx, requestInfoKey{}, ri)
|
||||
}
|
||||
|
||||
// RequestInfoFromContext extracts the RequestInfo from ctx.
|
||||
func RequestInfoFromContext(ctx context.Context) interface{} {
|
||||
return ctx.Value(requestInfoKey{})
|
||||
}
|
||||
|
||||
// clientHandshakeInfoKey is a struct used as the key to store
|
||||
// ClientHandshakeInfo in a context.
|
||||
type clientHandshakeInfoKey struct{}
|
||||
|
||||
// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
|
||||
func ClientHandshakeInfoFromContext(ctx context.Context) interface{} {
|
||||
return ctx.Value(clientHandshakeInfoKey{})
|
||||
}
|
||||
|
||||
// NewClientHandshakeInfoContext creates a context with chi.
|
||||
func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context {
|
||||
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
|
||||
}
|
75
vendor/google.golang.org/grpc/internal/credentials/spiffe.go
generated
vendored
Normal file
75
vendor/google.golang.org/grpc/internal/credentials/spiffe.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package credentials defines APIs for parsing SPIFFE ID.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net/url"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("credentials")
|
||||
|
||||
// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
|
||||
// is invalid, return nil with warning.
|
||||
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
|
||||
if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return SPIFFEIDFromCert(state.PeerCertificates[0])
|
||||
}
|
||||
|
||||
// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
|
||||
// ID format is invalid, return nil with warning.
|
||||
func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
|
||||
if cert == nil || cert.URIs == nil {
|
||||
return nil
|
||||
}
|
||||
var spiffeID *url.URL
|
||||
for _, uri := range cert.URIs {
|
||||
if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
|
||||
continue
|
||||
}
|
||||
// From this point, we assume the uri is intended for a SPIFFE ID.
|
||||
if len(uri.String()) > 2048 {
|
||||
logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
|
||||
return nil
|
||||
}
|
||||
if len(uri.Host) == 0 || len(uri.Path) == 0 {
|
||||
logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
|
||||
return nil
|
||||
}
|
||||
if len(uri.Host) > 255 {
|
||||
logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
|
||||
return nil
|
||||
}
|
||||
// A valid SPIFFE certificate can only have exactly one URI SAN field.
|
||||
if len(cert.URIs) > 1 {
|
||||
logger.Warning("invalid SPIFFE ID: multiple URI SANs")
|
||||
return nil
|
||||
}
|
||||
spiffeID = uri
|
||||
}
|
||||
return spiffeID
|
||||
}
|
58
vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
generated
vendored
Normal file
58
vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"net"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type sysConn = syscall.Conn
|
||||
|
||||
// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
|
||||
// SyscallConn() (the method in interface syscall.Conn) is explicitly
|
||||
// implemented on this type,
|
||||
//
|
||||
// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
|
||||
// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
|
||||
// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
|
||||
// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
|
||||
// help here).
|
||||
type syscallConn struct {
|
||||
net.Conn
|
||||
// sysConn is a type alias of syscall.Conn. It's necessary because the name
|
||||
// `Conn` collides with `net.Conn`.
|
||||
sysConn
|
||||
}
|
||||
|
||||
// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
|
||||
// implements syscall.Conn. rawConn will be used to support syscall, and newConn
|
||||
// will be used for read/write.
|
||||
//
|
||||
// This function returns newConn if rawConn doesn't implement syscall.Conn.
|
||||
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
|
||||
sysConn, ok := rawConn.(syscall.Conn)
|
||||
if !ok {
|
||||
return newConn
|
||||
}
|
||||
return &syscallConn{
|
||||
Conn: newConn,
|
||||
sysConn: sysConn,
|
||||
}
|
||||
}
|
52
vendor/google.golang.org/grpc/internal/credentials/util.go
generated
vendored
Normal file
52
vendor/google.golang.org/grpc/internal/credentials/util.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
const alpnProtoStrH2 = "h2"
|
||||
|
||||
// AppendH2ToNextProtos appends h2 to next protos.
|
||||
func AppendH2ToNextProtos(ps []string) []string {
|
||||
for _, p := range ps {
|
||||
if p == alpnProtoStrH2 {
|
||||
return ps
|
||||
}
|
||||
}
|
||||
ret := make([]string, 0, len(ps)+1)
|
||||
ret = append(ret, ps...)
|
||||
return append(ret, alpnProtoStrH2)
|
||||
}
|
||||
|
||||
// CloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
||||
//
|
||||
// If cfg is nil, a new zero tls.Config is returned.
|
||||
//
|
||||
// TODO: inline this function if possible.
|
||||
func CloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
|
||||
return cfg.Clone()
|
||||
}
|
62
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
Normal file
62
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package envconfig contains grpc settings configured by environment variables.
|
||||
package envconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
|
||||
// AdvertiseCompressors is set if registered compressor should be advertised
|
||||
// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
|
||||
AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
|
||||
// RingHashCap indicates the maximum ring size which defaults to 4096
|
||||
// entries but may be overridden by setting the environment variable
|
||||
// "GRPC_RING_HASH_CAP". This does not override the default bounds
|
||||
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
||||
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
||||
)
|
||||
|
||||
func boolFromEnv(envVar string, def bool) bool {
|
||||
if def {
|
||||
// The default is true; return true unless the variable is "false".
|
||||
return !strings.EqualFold(os.Getenv(envVar), "false")
|
||||
}
|
||||
// The default is false; return false unless the variable is "true".
|
||||
return strings.EqualFold(os.Getenv(envVar), "true")
|
||||
}
|
||||
|
||||
func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
|
||||
v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
if v < min {
|
||||
return min
|
||||
}
|
||||
if v > max {
|
||||
return max
|
||||
}
|
||||
return v
|
||||
}
|
36
vendor/google.golang.org/grpc/internal/envconfig/observability.go
generated
vendored
Normal file
36
vendor/google.golang.org/grpc/internal/envconfig/observability.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package envconfig
|
||||
|
||||
import "os"
|
||||
|
||||
const (
|
||||
envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG"
|
||||
envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE"
|
||||
)
|
||||
|
||||
var (
|
||||
// ObservabilityConfig is the json configuration for the gcp/observability
|
||||
// package specified directly in the envObservabilityConfig env var.
|
||||
ObservabilityConfig = os.Getenv(envObservabilityConfig)
|
||||
// ObservabilityConfigFile is the json configuration for the
|
||||
// gcp/observability specified in a file with the location specified in
|
||||
// envObservabilityConfigFile env var.
|
||||
ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
|
||||
)
|
92
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
Normal file
92
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package envconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
|
||||
// Do not use this and read from env directly. Its value is read and kept in
|
||||
// variable XDSBootstrapFileName.
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
|
||||
// XDSBootstrapFileContentEnv is the env variable to set bootstrap file
|
||||
// content. Do not use this and read from env directly. Its value is read
|
||||
// and kept in variable XDSBootstrapFileContent.
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
|
||||
)
|
||||
|
||||
var (
|
||||
// XDSBootstrapFileName holds the name of the file which contains xDS
|
||||
// bootstrap configuration. Users can specify the location of the bootstrap
|
||||
// file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
|
||||
// XDSBootstrapFileContent holds the content of the xDS bootstrap
|
||||
// configuration. Users can specify the bootstrap config by setting the
|
||||
// environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
|
||||
// XDSRingHash indicates whether ring hash support is enabled, which can be
|
||||
// disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
||||
XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
|
||||
// XDSClientSideSecurity is used to control processing of security
|
||||
// configuration on the client-side.
|
||||
//
|
||||
// Note that there is no env var protection for the server-side because we
|
||||
// have a brand new API on the server-side and users explicitly need to use
|
||||
// the new API to get security integration on the server.
|
||||
XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
|
||||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster
|
||||
// and DNS cluster is enabled, which can be enabled by setting the
|
||||
// environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
|
||||
// "true".
|
||||
XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
|
||||
|
||||
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||||
// which can be disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
||||
XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
|
||||
// XDSOutlierDetection indicates whether outlier detection support is
|
||||
// enabled, which can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
|
||||
XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
|
||||
// XDSFederation indicates whether federation support is enabled, which can
|
||||
// be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
|
||||
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false)
|
||||
|
||||
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||||
// support for the RLS CLuster Specifier is enabled, which can be enabled by
|
||||
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||||
// "true".
|
||||
XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false)
|
||||
|
||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
|
||||
)
|
126
vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
generated
vendored
Normal file
126
vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpclog (internal) defines depth logging for grpc.
|
||||
package grpclog
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Logger is the logger used for the non-depth log functions.
|
||||
var Logger LoggerV2
|
||||
|
||||
// DepthLogger is the logger used for the depth log functions.
|
||||
var DepthLogger DepthLoggerV2
|
||||
|
||||
// InfoDepth logs to the INFO log at the specified depth.
|
||||
func InfoDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.InfoDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Infoln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// WarningDepth logs to the WARNING log at the specified depth.
|
||||
func WarningDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.WarningDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Warningln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorDepth logs to the ERROR log at the specified depth.
|
||||
func ErrorDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.ErrorDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Errorln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// FatalDepth logs to the FATAL log at the specified depth.
|
||||
func FatalDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.FatalDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Fatalln(args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// LoggerV2 does underlying logging work for grpclog.
|
||||
// This is a copy of the LoggerV2 defined in the external grpclog package. It
|
||||
// is defined here to avoid a circular dependency.
|
||||
type LoggerV2 interface {
|
||||
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
|
||||
Info(args ...interface{})
|
||||
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
|
||||
Infoln(args ...interface{})
|
||||
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||
Infof(format string, args ...interface{})
|
||||
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
|
||||
Warning(args ...interface{})
|
||||
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||
Warningln(args ...interface{})
|
||||
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||
Warningf(format string, args ...interface{})
|
||||
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
Error(args ...interface{})
|
||||
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
Errorln(args ...interface{})
|
||||
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
Errorf(format string, args ...interface{})
|
||||
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatal(args ...interface{})
|
||||
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalln(args ...interface{})
|
||||
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalf(format string, args ...interface{})
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
V(l int) bool
|
||||
}
|
||||
|
||||
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
|
||||
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
||||
// depth set for trivial functions the logger may ignore.
|
||||
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
|
||||
// It is defined here to avoid a circular dependency.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type DepthLoggerV2 interface {
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
}
|
81
vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
generated
vendored
Normal file
81
vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpclog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// PrefixLogger does logging with a prefix.
|
||||
//
|
||||
// Logging method on a nil logs without any prefix.
|
||||
type PrefixLogger struct {
|
||||
logger DepthLoggerV2
|
||||
prefix string
|
||||
}
|
||||
|
||||
// Infof does info logging.
|
||||
func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
// Handle nil, so the tests can pass in a nil logger.
|
||||
format = pl.prefix + format
|
||||
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Warningf does warning logging.
|
||||
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
format = pl.prefix + format
|
||||
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
WarningDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Errorf does error logging.
|
||||
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
format = pl.prefix + format
|
||||
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Debugf does info logging at verbose level 2.
|
||||
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
||||
if !Logger.V(2) {
|
||||
return
|
||||
}
|
||||
if pl != nil {
|
||||
// Handle nil, so the tests can pass in a nil logger.
|
||||
format = pl.prefix + format
|
||||
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// NewPrefixLogger creates a prefix logger with the given prefix.
|
||||
func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
|
||||
return &PrefixLogger{logger: logger, prefix: prefix}
|
||||
}
|
74
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
Normal file
74
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpcrand implements math/rand functions in a concurrent-safe way
|
||||
// with a global random source, independent of math/rand's global source.
|
||||
package grpcrand
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
r = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
// Int implements rand.Int on the grpcrand global source.
|
||||
func Int() int {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Int()
|
||||
}
|
||||
|
||||
// Int63n implements rand.Int63n on the grpcrand global source.
|
||||
func Int63n(n int64) int64 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Int63n(n)
|
||||
}
|
||||
|
||||
// Intn implements rand.Intn on the grpcrand global source.
|
||||
func Intn(n int) int {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Intn(n)
|
||||
}
|
||||
|
||||
// Int31n implements rand.Int31n on the grpcrand global source.
|
||||
func Int31n(n int32) int32 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Int31n(n)
|
||||
}
|
||||
|
||||
// Float64 implements rand.Float64 on the grpcrand global source.
|
||||
func Float64() float64 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Float64()
|
||||
}
|
||||
|
||||
// Uint64 implements rand.Uint64 on the grpcrand global source.
|
||||
func Uint64() uint64 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Uint64()
|
||||
}
|
61
vendor/google.golang.org/grpc/internal/grpcsync/event.go
generated
vendored
Normal file
61
vendor/google.golang.org/grpc/internal/grpcsync/event.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpcsync implements additional synchronization primitives built upon
|
||||
// the sync package.
|
||||
package grpcsync
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Event represents a one-time event that may occur in the future.
|
||||
type Event struct {
|
||||
fired int32
|
||||
c chan struct{}
|
||||
o sync.Once
|
||||
}
|
||||
|
||||
// Fire causes e to complete. It is safe to call multiple times, and
|
||||
// concurrently. It returns true iff this call to Fire caused the signaling
|
||||
// channel returned by Done to close.
|
||||
func (e *Event) Fire() bool {
|
||||
ret := false
|
||||
e.o.Do(func() {
|
||||
atomic.StoreInt32(&e.fired, 1)
|
||||
close(e.c)
|
||||
ret = true
|
||||
})
|
||||
return ret
|
||||
}
|
||||
|
||||
// Done returns a channel that will be closed when Fire is called.
|
||||
func (e *Event) Done() <-chan struct{} {
|
||||
return e.c
|
||||
}
|
||||
|
||||
// HasFired returns true if Fire has been called.
|
||||
func (e *Event) HasFired() bool {
|
||||
return atomic.LoadInt32(&e.fired) == 1
|
||||
}
|
||||
|
||||
// NewEvent returns a new, ready-to-use Event.
|
||||
func NewEvent() *Event {
|
||||
return &Event{c: make(chan struct{})}
|
||||
}
|
32
vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
generated
vendored
Normal file
32
vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcsync
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// OnceFunc returns a function wrapping f which ensures f is only executed
|
||||
// once even if the returned function is executed multiple times.
|
||||
func OnceFunc(f func()) func() {
|
||||
var once sync.Once
|
||||
return func() {
|
||||
once.Do(f)
|
||||
}
|
||||
}
|
47
vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
generated
vendored
Normal file
47
vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
)
|
||||
|
||||
// RegisteredCompressorNames holds names of the registered compressors.
|
||||
var RegisteredCompressorNames []string
|
||||
|
||||
// IsCompressorNameRegistered returns true when name is available in registry.
|
||||
func IsCompressorNameRegistered(name string) bool {
|
||||
for _, compressor := range RegisteredCompressorNames {
|
||||
if compressor == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RegisteredCompressors returns a string of registered compressor names
|
||||
// separated by comma.
|
||||
func RegisteredCompressors() string {
|
||||
if !envconfig.AdvertiseCompressors {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(RegisteredCompressorNames, ",")
|
||||
}
|
63
vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
generated
vendored
Normal file
63
vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const maxTimeoutValue int64 = 100000000 - 1
|
||||
|
||||
// div does integer division and round-up the result. Note that this is
|
||||
// equivalent to (d+r-1)/r but has less chance to overflow.
|
||||
func div(d, r time.Duration) int64 {
|
||||
if d%r > 0 {
|
||||
return int64(d/r + 1)
|
||||
}
|
||||
return int64(d / r)
|
||||
}
|
||||
|
||||
// EncodeDuration encodes the duration to the format grpc-timeout header
|
||||
// accepts.
|
||||
//
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
func EncodeDuration(t time.Duration) string {
|
||||
// TODO: This is simplistic and not bandwidth efficient. Improve it.
|
||||
if t <= 0 {
|
||||
return "0n"
|
||||
}
|
||||
if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "n"
|
||||
}
|
||||
if d := div(t, time.Microsecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "u"
|
||||
}
|
||||
if d := div(t, time.Millisecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "m"
|
||||
}
|
||||
if d := div(t, time.Second); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "S"
|
||||
}
|
||||
if d := div(t, time.Minute); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "M"
|
||||
}
|
||||
// Note that maxTimeoutValue * time.Hour > MaxInt64.
|
||||
return strconv.FormatInt(div(t, time.Hour), 10) + "H"
|
||||
}
|
20
vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
generated
vendored
Normal file
20
vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpcutil provides utility functions used across the gRPC codebase.
|
||||
package grpcutil
|
40
vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
generated
vendored
Normal file
40
vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
type mdExtraKey struct{}
|
||||
|
||||
// WithExtraMetadata creates a new context with incoming md attached.
|
||||
func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
|
||||
return context.WithValue(ctx, mdExtraKey{}, md)
|
||||
}
|
||||
|
||||
// ExtraMetadata returns the incoming metadata in ctx if it exists. The
|
||||
// returned MD should not be modified. Writing to it may cause races.
|
||||
// Modification should be made to copies of the returned MD.
|
||||
func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
|
||||
md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
|
||||
return
|
||||
}
|
88
vendor/google.golang.org/grpc/internal/grpcutil/method.go
generated
vendored
Normal file
88
vendor/google.golang.org/grpc/internal/grpcutil/method.go
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseMethod splits service and method from the input. It expects format
|
||||
// "/service/method".
|
||||
func ParseMethod(methodName string) (service, method string, _ error) {
|
||||
if !strings.HasPrefix(methodName, "/") {
|
||||
return "", "", errors.New("invalid method name: should start with /")
|
||||
}
|
||||
methodName = methodName[1:]
|
||||
|
||||
pos := strings.LastIndex(methodName, "/")
|
||||
if pos < 0 {
|
||||
return "", "", errors.New("invalid method name: suffix /method is missing")
|
||||
}
|
||||
return methodName[:pos], methodName[pos+1:], nil
|
||||
}
|
||||
|
||||
// baseContentType is the base content-type for gRPC. This is a valid
|
||||
// content-type on it's own, but can also include a content-subtype such as
|
||||
// "proto" as a suffix after "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
const baseContentType = "application/grpc"
|
||||
|
||||
// ContentSubtype returns the content-subtype for the given content-type. The
|
||||
// given content-type must be a valid content-type that starts with
|
||||
// "application/grpc". A content-subtype will follow "application/grpc" after a
|
||||
// "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// If contentType is not a valid content-type for gRPC, the boolean
|
||||
// will be false, otherwise true. If content-type == "application/grpc",
|
||||
// "application/grpc+", or "application/grpc;", the boolean will be true,
|
||||
// but no content-subtype will be returned.
|
||||
//
|
||||
// contentType is assumed to be lowercase already.
|
||||
func ContentSubtype(contentType string) (string, bool) {
|
||||
if contentType == baseContentType {
|
||||
return "", true
|
||||
}
|
||||
if !strings.HasPrefix(contentType, baseContentType) {
|
||||
return "", false
|
||||
}
|
||||
// guaranteed since != baseContentType and has baseContentType prefix
|
||||
switch contentType[len(baseContentType)] {
|
||||
case '+', ';':
|
||||
// this will return true for "application/grpc+" or "application/grpc;"
|
||||
// which the previous validContentType function tested to be valid, so we
|
||||
// just say that no content-subtype is specified in this case
|
||||
return contentType[len(baseContentType)+1:], true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// ContentType builds full content type with the given sub-type.
|
||||
//
|
||||
// contentSubtype is assumed to be lowercase
|
||||
func ContentType(contentSubtype string) string {
|
||||
if contentSubtype == "" {
|
||||
return baseContentType
|
||||
}
|
||||
return baseContentType + "+" + contentSubtype
|
||||
}
|
31
vendor/google.golang.org/grpc/internal/grpcutil/regex.go
generated
vendored
Normal file
31
vendor/google.golang.org/grpc/internal/grpcutil/regex.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import "regexp"
|
||||
|
||||
// FullMatchWithRegex returns whether the full text matches the regex provided.
|
||||
func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
|
||||
if len(text) == 0 {
|
||||
return re.MatchString(text)
|
||||
}
|
||||
re.Longest()
|
||||
rem := re.FindString(text)
|
||||
return len(rem) == len(text)
|
||||
}
|
160
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
Normal file
160
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
/*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package internal contains gRPC-internal code, to avoid polluting
|
||||
// the godoc of the top-level grpc package. It must not import any grpc
|
||||
// symbols to avoid circular dependencies.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
var (
|
||||
// WithHealthCheckFunc is set by dialoptions.go
|
||||
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
|
||||
// HealthCheckFunc is used to provide client-side LB channel health checking
|
||||
HealthCheckFunc HealthChecker
|
||||
// BalancerUnregister is exported by package balancer to unregister a balancer.
|
||||
BalancerUnregister func(name string)
|
||||
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
||||
// default, but tests may wish to set it lower for convenience.
|
||||
KeepaliveMinPingTime = 10 * time.Second
|
||||
// ParseServiceConfig parses a JSON representation of the service config.
|
||||
ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult
|
||||
// EqualServiceConfigForTesting is for testing service config generation and
|
||||
// parsing. Both a and b should be returned by ParseServiceConfig.
|
||||
// This function compares the config without rawJSON stripped, in case the
|
||||
// there's difference in white space.
|
||||
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
|
||||
// GetCertificateProviderBuilder returns the registered builder for the
|
||||
// given name. This is set by package certprovider for use from xDS
|
||||
// bootstrap code while parsing certificate provider configs in the
|
||||
// bootstrap file.
|
||||
GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
|
||||
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
|
||||
// stored in the passed in attributes. This is set by
|
||||
// credentials/xds/xds.go.
|
||||
GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo
|
||||
// GetServerCredentials returns the transport credentials configured on a
|
||||
// gRPC server. An xDS-enabled server needs to know what type of credentials
|
||||
// is configured on the underlying gRPC server. This is set by server.go.
|
||||
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
|
||||
// DrainServerTransports initiates a graceful close of existing connections
|
||||
// on a gRPC server accepted on the provided listener address. An
|
||||
// xDS-enabled server invokes this method on a grpc.Server when a particular
|
||||
// listener moves to "not-serving" mode.
|
||||
DrainServerTransports interface{} // func(*grpc.Server, string)
|
||||
// AddGlobalServerOptions adds an array of ServerOption that will be
|
||||
// effective globally for newly created servers. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
AddGlobalServerOptions interface{} // func(opt ...ServerOption)
|
||||
// ClearGlobalServerOptions clears the array of extra ServerOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
ClearGlobalServerOptions func()
|
||||
// AddGlobalDialOptions adds an array of DialOption that will be effective
|
||||
// globally for newly created client channels. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
AddGlobalDialOptions interface{} // func(opt ...DialOption)
|
||||
// ClearGlobalDialOptions clears the array of extra DialOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
ClearGlobalDialOptions func()
|
||||
// JoinDialOptions combines the dial options passed as arguments into a
|
||||
// single dial option.
|
||||
JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
|
||||
// JoinServerOptions combines the server options passed as arguments into a
|
||||
// single server option.
|
||||
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
|
||||
|
||||
// WithBinaryLogger returns a DialOption that specifies the binary logger
|
||||
// for a ClientConn.
|
||||
WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption
|
||||
// BinaryLogger returns a ServerOption that can set the binary logger for a
|
||||
// server.
|
||||
BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption
|
||||
|
||||
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
|
||||
// the provided xds bootstrap config instead of the global configuration from
|
||||
// the supported environment variables. The resolver.Builder is meant to be
|
||||
// used in conjunction with the grpc.WithResolvers DialOption.
|
||||
//
|
||||
// Testing Only
|
||||
//
|
||||
// This function should ONLY be used for testing and may not work with some
|
||||
// other features, including the CSDS service.
|
||||
NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error)
|
||||
|
||||
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
|
||||
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
|
||||
// variable.
|
||||
//
|
||||
// TODO: Remove this function once the RLS env var is removed.
|
||||
RegisterRLSClusterSpecifierPluginForTesting func()
|
||||
|
||||
// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
|
||||
// Specifier Plugin for testing purposes. This is needed because there is no way
|
||||
// to unregister the RLS Cluster Specifier Plugin after registering it solely
|
||||
// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the RLS env var is removed.
|
||||
UnregisterRLSClusterSpecifierPluginForTesting func()
|
||||
|
||||
// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
|
||||
// purposes, regardless of the RBAC environment variable.
|
||||
//
|
||||
// TODO: Remove this function once the RBAC env var is removed.
|
||||
RegisterRBACHTTPFilterForTesting func()
|
||||
|
||||
// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
|
||||
// testing purposes. This is needed because there is no way to unregister the
|
||||
// HTTP Filter after registering it solely for testing purposes using
|
||||
// RegisterRBACHTTPFilterForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the RBAC env var is removed.
|
||||
UnregisterRBACHTTPFilterForTesting func()
|
||||
)
|
||||
|
||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||
//
|
||||
// The implementation is expected to create a health checking RPC stream by
|
||||
// calling newStream(), watch for the health status of serviceName, and report
|
||||
// it's health back by calling setConnectivityState().
|
||||
//
|
||||
// The health checking protocol is defined at:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
|
||||
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
|
||||
|
||||
const (
|
||||
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
|
||||
CredsBundleModeFallback = "fallback"
|
||||
// CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
|
||||
// mode.
|
||||
CredsBundleModeBalancer = "balancer"
|
||||
// CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
|
||||
// that supports backend returned by grpclb balancer.
|
||||
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
|
||||
)
|
||||
|
||||
// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
|
||||
//
|
||||
// It currently has an experimental suffix which would be removed once
|
||||
// end-to-end testing of the policy is completed.
|
||||
const RLSLoadBalancingPolicyName = "rls_experimental"
|
120
vendor/google.golang.org/grpc/internal/metadata/metadata.go
generated
vendored
Normal file
120
vendor/google.golang.org/grpc/internal/metadata/metadata.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package metadata contains functions to set and get metadata from addresses.
|
||||
//
|
||||
// This package is experimental.
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
type mdKeyType string
|
||||
|
||||
const mdKey = mdKeyType("grpc.internal.address.metadata")
|
||||
|
||||
type mdValue metadata.MD
|
||||
|
||||
func (m mdValue) Equal(o interface{}) bool {
|
||||
om, ok := o.(mdValue)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if len(m) != len(om) {
|
||||
return false
|
||||
}
|
||||
for k, v := range m {
|
||||
ov := om[k]
|
||||
if len(ov) != len(v) {
|
||||
return false
|
||||
}
|
||||
for i, ve := range v {
|
||||
if ov[i] != ve {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Get returns the metadata of addr.
|
||||
func Get(addr resolver.Address) metadata.MD {
|
||||
attrs := addr.Attributes
|
||||
if attrs == nil {
|
||||
return nil
|
||||
}
|
||||
md, _ := attrs.Value(mdKey).(mdValue)
|
||||
return metadata.MD(md)
|
||||
}
|
||||
|
||||
// Set sets (overrides) the metadata in addr.
|
||||
//
|
||||
// When a SubConn is created with this address, the RPCs sent on it will all
|
||||
// have this metadata.
|
||||
func Set(addr resolver.Address, md metadata.MD) resolver.Address {
|
||||
addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
|
||||
return addr
|
||||
}
|
||||
|
||||
// Validate returns an error if the input md contains invalid keys or values.
|
||||
//
|
||||
// If the header is not a pseudo-header, the following items are checked:
|
||||
// - header names must contain one or more characters from this set [0-9 a-z _ - .].
|
||||
// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
|
||||
// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
|
||||
func Validate(md metadata.MD) error {
|
||||
for k, vals := range md {
|
||||
// pseudo-header will be ignored
|
||||
if k[0] == ':' {
|
||||
continue
|
||||
}
|
||||
// check key, for i that saving a conversion if not using for range
|
||||
for i := 0; i < len(k); i++ {
|
||||
r := k[i]
|
||||
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
|
||||
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(k, "-bin") {
|
||||
continue
|
||||
}
|
||||
// check value
|
||||
for _, val := range vals {
|
||||
if hasNotPrintable(val) {
|
||||
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
|
||||
func hasNotPrintable(msg string) bool {
|
||||
// for i that saving a conversion if not using for range
|
||||
for i := 0; i < len(msg); i++ {
|
||||
if msg[i] < 0x20 || msg[i] > 0x7E {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
82
vendor/google.golang.org/grpc/internal/pretty/pretty.go
generated
vendored
Normal file
82
vendor/google.golang.org/grpc/internal/pretty/pretty.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package pretty defines helper functions to pretty-print structs for logging.
|
||||
package pretty
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
protov1 "github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
protov2 "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const jsonIndent = " "
|
||||
|
||||
// ToJSON marshals the input into a json string.
|
||||
//
|
||||
// If marshal fails, it falls back to fmt.Sprintf("%+v").
|
||||
func ToJSON(e interface{}) string {
|
||||
switch ee := e.(type) {
|
||||
case protov1.Message:
|
||||
mm := jsonpb.Marshaler{Indent: jsonIndent}
|
||||
ret, err := mm.MarshalToString(ee)
|
||||
if err != nil {
|
||||
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
|
||||
// messages are not imported, and this will fail because the message
|
||||
// is not found.
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return ret
|
||||
case protov2.Message:
|
||||
mm := protojson.MarshalOptions{
|
||||
Multiline: true,
|
||||
Indent: jsonIndent,
|
||||
}
|
||||
ret, err := mm.Marshal(ee)
|
||||
if err != nil {
|
||||
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
|
||||
// messages are not imported, and this will fail because the message
|
||||
// is not found.
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return string(ret)
|
||||
default:
|
||||
ret, err := json.MarshalIndent(ee, "", jsonIndent)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return string(ret)
|
||||
}
|
||||
}
|
||||
|
||||
// FormatJSON formats the input json bytes with indentation.
|
||||
//
|
||||
// If Indent fails, it returns the unchanged input as string.
|
||||
func FormatJSON(b []byte) string {
|
||||
var out bytes.Buffer
|
||||
err := json.Indent(&out, b, "", jsonIndent)
|
||||
if err != nil {
|
||||
return string(b)
|
||||
}
|
||||
return out.String()
|
||||
}
|
167
vendor/google.golang.org/grpc/internal/resolver/config_selector.go
generated
vendored
Normal file
167
vendor/google.golang.org/grpc/internal/resolver/config_selector.go
generated
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package resolver provides internal resolver-related functionality.
|
||||
package resolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/internal/serviceconfig"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// ConfigSelector controls what configuration to use for every RPC.
|
||||
type ConfigSelector interface {
|
||||
// Selects the configuration for the RPC, or terminates it using the error.
|
||||
// This error will be converted by the gRPC library to a status error with
|
||||
// code UNKNOWN if it is not returned as a status error.
|
||||
SelectConfig(RPCInfo) (*RPCConfig, error)
|
||||
}
|
||||
|
||||
// RPCInfo contains RPC information needed by a ConfigSelector.
|
||||
type RPCInfo struct {
|
||||
// Context is the user's context for the RPC and contains headers and
|
||||
// application timeout. It is passed for interception purposes and for
|
||||
// efficiency reasons. SelectConfig should not be blocking.
|
||||
Context context.Context
|
||||
Method string // i.e. "/Service/Method"
|
||||
}
|
||||
|
||||
// RPCConfig describes the configuration to use for each RPC.
|
||||
type RPCConfig struct {
|
||||
// The context to use for the remainder of the RPC; can pass info to LB
|
||||
// policy or affect timeout or metadata.
|
||||
Context context.Context
|
||||
MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC
|
||||
OnCommitted func() // Called when the RPC has been committed (retries no longer possible)
|
||||
Interceptor ClientInterceptor
|
||||
}
|
||||
|
||||
// ClientStream is the same as grpc.ClientStream, but defined here for circular
|
||||
// dependency reasons.
|
||||
type ClientStream interface {
|
||||
// Header returns the header metadata received from the server if there
|
||||
// is any. It blocks if the metadata is not ready to read.
|
||||
Header() (metadata.MD, error)
|
||||
// Trailer returns the trailer metadata from the server, if there is any.
|
||||
// It must only be called after stream.CloseAndRecv has returned, or
|
||||
// stream.Recv has returned a non-nil error (including io.EOF).
|
||||
Trailer() metadata.MD
|
||||
// CloseSend closes the send direction of the stream. It closes the stream
|
||||
// when non-nil error is met. It is also not safe to call CloseSend
|
||||
// concurrently with SendMsg.
|
||||
CloseSend() error
|
||||
// Context returns the context for this stream.
|
||||
//
|
||||
// It should not be called until after Header or RecvMsg has returned. Once
|
||||
// called, subsequent client-side retries are disabled.
|
||||
Context() context.Context
|
||||
// SendMsg is generally called by generated code. On error, SendMsg aborts
|
||||
// the stream. If the error was generated by the client, the status is
|
||||
// returned directly; otherwise, io.EOF is returned and the status of
|
||||
// the stream may be discovered using RecvMsg.
|
||||
//
|
||||
// SendMsg blocks until:
|
||||
// - There is sufficient flow control to schedule m with the transport, or
|
||||
// - The stream is done, or
|
||||
// - The stream breaks.
|
||||
//
|
||||
// SendMsg does not wait until the message is received by the server. An
|
||||
// untimely stream closure may result in lost messages. To ensure delivery,
|
||||
// users should ensure the RPC completed successfully using RecvMsg.
|
||||
//
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||
// to call SendMsg on the same stream in different goroutines. It is also
|
||||
// not safe to call CloseSend concurrently with SendMsg.
|
||||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message into m or the stream is
|
||||
// done. It returns io.EOF when the stream completes successfully. On
|
||||
// any other error, the stream is aborted and the error contains the RPC
|
||||
// status.
|
||||
//
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not
|
||||
// safe to call RecvMsg on the same stream in different goroutines.
|
||||
RecvMsg(m interface{}) error
|
||||
}
|
||||
|
||||
// ClientInterceptor is an interceptor for gRPC client streams.
|
||||
type ClientInterceptor interface {
|
||||
// NewStream produces a ClientStream for an RPC which may optionally use
|
||||
// the provided function to produce a stream for delegation. Note:
|
||||
// RPCInfo.Context should not be used (will be nil).
|
||||
//
|
||||
// done is invoked when the RPC is finished using its connection, or could
|
||||
// not be assigned a connection. RPC operations may still occur on
|
||||
// ClientStream after done is called, since the interceptor is invoked by
|
||||
// application-layer operations. done must never be nil when called.
|
||||
NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
|
||||
}
|
||||
|
||||
// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side.
|
||||
type ServerInterceptor interface {
|
||||
// AllowRPC checks if an incoming RPC is allowed to proceed based on
|
||||
// information about connection RPC was received on, and HTTP Headers. This
|
||||
// information will be piped into context.
|
||||
AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting.
|
||||
}
|
||||
|
||||
type csKeyType string
|
||||
|
||||
const csKey = csKeyType("grpc.internal.resolver.configSelector")
|
||||
|
||||
// SetConfigSelector sets the config selector in state and returns the new
|
||||
// state.
|
||||
func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
|
||||
state.Attributes = state.Attributes.WithValue(csKey, cs)
|
||||
return state
|
||||
}
|
||||
|
||||
// GetConfigSelector retrieves the config selector from state, if present, and
|
||||
// returns it or nil if absent.
|
||||
func GetConfigSelector(state resolver.State) ConfigSelector {
|
||||
cs, _ := state.Attributes.Value(csKey).(ConfigSelector)
|
||||
return cs
|
||||
}
|
||||
|
||||
// SafeConfigSelector allows for safe switching of ConfigSelector
|
||||
// implementations such that previous values are guaranteed to not be in use
|
||||
// when UpdateConfigSelector returns.
|
||||
type SafeConfigSelector struct {
|
||||
mu sync.RWMutex
|
||||
cs ConfigSelector
|
||||
}
|
||||
|
||||
// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until
|
||||
// all uses of the previous ConfigSelector have completed.
|
||||
func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) {
|
||||
scs.mu.Lock()
|
||||
defer scs.mu.Unlock()
|
||||
scs.cs = cs
|
||||
}
|
||||
|
||||
// SelectConfig defers to the current ConfigSelector in scs.
|
||||
func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) {
|
||||
scs.mu.RLock()
|
||||
defer scs.mu.RUnlock()
|
||||
return scs.cs.SelectConfig(r)
|
||||
}
|
458
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
Normal file
458
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
Normal file
@@ -0,0 +1,458 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package dns implements a dns resolver to be installed as the default resolver
|
||||
// in grpc.
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
|
||||
// addresses from SRV records. Must not be changed after init time.
|
||||
var EnableSRVLookups = false
|
||||
|
||||
var logger = grpclog.Component("dns")
|
||||
|
||||
// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
|
||||
// single variable for testing the resolver?
|
||||
var (
|
||||
newTimer = time.NewTimer
|
||||
newTimerDNSResRate = time.NewTimer
|
||||
)
|
||||
|
||||
func init() {
|
||||
resolver.Register(NewBuilder())
|
||||
}
|
||||
|
||||
const (
|
||||
defaultPort = "443"
|
||||
defaultDNSSvrPort = "53"
|
||||
golang = "GO"
|
||||
// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
|
||||
txtPrefix = "_grpc_config."
|
||||
// In DNS, service config is encoded in a TXT record via the mechanism
|
||||
// described in RFC-1464 using the attribute name grpc_config.
|
||||
txtAttribute = "grpc_config="
|
||||
)
|
||||
|
||||
var (
|
||||
errMissingAddr = errors.New("dns resolver: missing address")
|
||||
|
||||
// Addresses ending with a colon that is supposed to be the separator
|
||||
// between host and port is not allowed. E.g. "::" is a valid address as
|
||||
// it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
|
||||
// a colon as the host and port separator
|
||||
errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
|
||||
)
|
||||
|
||||
var (
|
||||
defaultResolver netResolver = net.DefaultResolver
|
||||
// To prevent excessive re-resolution, we enforce a rate limit on DNS
|
||||
// resolution requests.
|
||||
minDNSResRate = 30 * time.Second
|
||||
)
|
||||
|
||||
var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
var dialer net.Dialer
|
||||
return dialer.DialContext(ctx, network, authority)
|
||||
}
|
||||
}
|
||||
|
||||
var customAuthorityResolver = func(authority string) (netResolver, error) {
|
||||
host, port, err := parseTarget(authority, defaultDNSSvrPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
authorityWithPort := net.JoinHostPort(host, port)
|
||||
|
||||
return &net.Resolver{
|
||||
PreferGo: true,
|
||||
Dial: customAuthorityDialler(authorityWithPort),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
|
||||
func NewBuilder() resolver.Builder {
|
||||
return &dnsBuilder{}
|
||||
}
|
||||
|
||||
type dnsBuilder struct{}
|
||||
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// IP address.
|
||||
if ipAddr, ok := formatIP(host); ok {
|
||||
addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
|
||||
cc.UpdateState(resolver.State{Addresses: addr})
|
||||
return deadResolver{}, nil
|
||||
}
|
||||
|
||||
// DNS address (non-IP).
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
d := &dnsResolver{
|
||||
host: host,
|
||||
port: port,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cc: cc,
|
||||
rn: make(chan struct{}, 1),
|
||||
disableServiceConfig: opts.DisableServiceConfig,
|
||||
}
|
||||
|
||||
if target.URL.Host == "" {
|
||||
d.resolver = defaultResolver
|
||||
} else {
|
||||
d.resolver, err = customAuthorityResolver(target.URL.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
d.wg.Add(1)
|
||||
go d.watcher()
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Scheme returns the naming scheme of this resolver builder, which is "dns".
|
||||
func (b *dnsBuilder) Scheme() string {
|
||||
return "dns"
|
||||
}
|
||||
|
||||
type netResolver interface {
|
||||
LookupHost(ctx context.Context, host string) (addrs []string, err error)
|
||||
LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
|
||||
LookupTXT(ctx context.Context, name string) (txts []string, err error)
|
||||
}
|
||||
|
||||
// deadResolver is a resolver that does nothing.
|
||||
type deadResolver struct{}
|
||||
|
||||
func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {}
|
||||
|
||||
func (deadResolver) Close() {}
|
||||
|
||||
// dnsResolver watches for the name resolution update for a non-IP target.
|
||||
type dnsResolver struct {
|
||||
host string
|
||||
port string
|
||||
resolver netResolver
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cc resolver.ClientConn
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||
rn chan struct{}
|
||||
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
|
||||
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
|
||||
// replace the real lookup functions with mocked ones to facilitate testing.
|
||||
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
|
||||
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
|
||||
// has data race with replaceNetFunc (WRITE the lookup function pointers).
|
||||
wg sync.WaitGroup
|
||||
disableServiceConfig bool
|
||||
}
|
||||
|
||||
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
|
||||
func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
|
||||
select {
|
||||
case d.rn <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the dnsResolver.
|
||||
func (d *dnsResolver) Close() {
|
||||
d.cancel()
|
||||
d.wg.Wait()
|
||||
}
|
||||
|
||||
func (d *dnsResolver) watcher() {
|
||||
defer d.wg.Done()
|
||||
backoffIndex := 1
|
||||
for {
|
||||
state, err := d.lookup()
|
||||
if err != nil {
|
||||
// Report error to the underlying grpc.ClientConn.
|
||||
d.cc.ReportError(err)
|
||||
} else {
|
||||
err = d.cc.UpdateState(*state)
|
||||
}
|
||||
|
||||
var timer *time.Timer
|
||||
if err == nil {
|
||||
// Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
|
||||
// to prevent constantly re-resolving.
|
||||
backoffIndex = 1
|
||||
timer = newTimerDNSResRate(minDNSResRate)
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-d.rn:
|
||||
}
|
||||
} else {
|
||||
// Poll on an error found in DNS Resolver or an error received from ClientConn.
|
||||
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
|
||||
backoffIndex++
|
||||
}
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
||||
if !EnableSRVLookups {
|
||||
return nil, nil
|
||||
}
|
||||
var newAddrs []resolver.Address
|
||||
_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
|
||||
if err != nil {
|
||||
err = handleDNSError(err, "SRV") // may become nil
|
||||
return nil, err
|
||||
}
|
||||
for _, s := range srvs {
|
||||
lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
|
||||
if err != nil {
|
||||
err = handleDNSError(err, "A") // may become nil
|
||||
if err == nil {
|
||||
// If there are other SRV records, look them up and ignore this
|
||||
// one that does not exist.
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
for _, a := range lbAddrs {
|
||||
ip, ok := formatIP(a)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
||||
}
|
||||
addr := ip + ":" + strconv.Itoa(int(s.Port))
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
|
||||
}
|
||||
}
|
||||
return newAddrs, nil
|
||||
}
|
||||
|
||||
func handleDNSError(err error, lookupType string) error {
|
||||
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
||||
// Timeouts and temporary errors should be communicated to gRPC to
|
||||
// attempt another DNS query (with backoff). Other errors should be
|
||||
// suppressed (they may represent the absence of a TXT record).
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
|
||||
logger.Info(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
||||
ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
|
||||
if err != nil {
|
||||
if envconfig.TXTErrIgnore {
|
||||
return nil
|
||||
}
|
||||
if err = handleDNSError(err, "TXT"); err != nil {
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
var res string
|
||||
for _, s := range ss {
|
||||
res += s
|
||||
}
|
||||
|
||||
// TXT record must have "grpc_config=" attribute in order to be used as service config.
|
||||
if !strings.HasPrefix(res, txtAttribute) {
|
||||
logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
// This is not an error; it is the equivalent of not having a service config.
|
||||
return nil
|
||||
}
|
||||
sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
|
||||
return d.cc.ParseServiceConfig(sc)
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
|
||||
addrs, err := d.resolver.LookupHost(d.ctx, d.host)
|
||||
if err != nil {
|
||||
err = handleDNSError(err, "A")
|
||||
return nil, err
|
||||
}
|
||||
newAddrs := make([]resolver.Address, 0, len(addrs))
|
||||
for _, a := range addrs {
|
||||
ip, ok := formatIP(a)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
||||
}
|
||||
addr := ip + ":" + d.port
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr})
|
||||
}
|
||||
return newAddrs, nil
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookup() (*resolver.State, error) {
|
||||
srv, srvErr := d.lookupSRV()
|
||||
addrs, hostErr := d.lookupHost()
|
||||
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
|
||||
return nil, hostErr
|
||||
}
|
||||
|
||||
state := resolver.State{Addresses: addrs}
|
||||
if len(srv) > 0 {
|
||||
state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
|
||||
}
|
||||
if !d.disableServiceConfig {
|
||||
state.ServiceConfig = d.lookupTXT()
|
||||
}
|
||||
return &state, nil
|
||||
}
|
||||
|
||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
||||
// If addr is an IPv4 address, return the addr and ok = true.
|
||||
// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
|
||||
func formatIP(addr string) (addrIP string, ok bool) {
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
return "", false
|
||||
}
|
||||
if ip.To4() != nil {
|
||||
return addr, true
|
||||
}
|
||||
return "[" + addr + "]", true
|
||||
}
|
||||
|
||||
// parseTarget takes the user input target string and default port, returns formatted host and port info.
|
||||
// If target doesn't specify a port, set the port to be the defaultPort.
|
||||
// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
|
||||
// are stripped when setting the host.
|
||||
// examples:
|
||||
// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
|
||||
// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
|
||||
// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443"
|
||||
// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
|
||||
func parseTarget(target, defaultPort string) (host, port string, err error) {
|
||||
if target == "" {
|
||||
return "", "", errMissingAddr
|
||||
}
|
||||
if ip := net.ParseIP(target); ip != nil {
|
||||
// target is an IPv4 or IPv6(without brackets) address
|
||||
return target, defaultPort, nil
|
||||
}
|
||||
if host, port, err = net.SplitHostPort(target); err == nil {
|
||||
if port == "" {
|
||||
// If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
|
||||
return "", "", errEndsWithColon
|
||||
}
|
||||
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
||||
if host == "" {
|
||||
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
|
||||
host = "localhost"
|
||||
}
|
||||
return host, port, nil
|
||||
}
|
||||
if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
|
||||
// target doesn't have port
|
||||
return host, port, nil
|
||||
}
|
||||
return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
|
||||
}
|
||||
|
||||
type rawChoice struct {
|
||||
ClientLanguage *[]string `json:"clientLanguage,omitempty"`
|
||||
Percentage *int `json:"percentage,omitempty"`
|
||||
ClientHostName *[]string `json:"clientHostName,omitempty"`
|
||||
ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"`
|
||||
}
|
||||
|
||||
func containsString(a *[]string, b string) bool {
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
for _, c := range *a {
|
||||
if c == b {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func chosenByPercentage(a *int) bool {
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
return grpcrand.Intn(100)+1 <= *a
|
||||
}
|
||||
|
||||
func canaryingSC(js string) string {
|
||||
if js == "" {
|
||||
return ""
|
||||
}
|
||||
var rcs []rawChoice
|
||||
err := json.Unmarshal([]byte(js), &rcs)
|
||||
if err != nil {
|
||||
logger.Warningf("dns: error parsing service config json: %v", err)
|
||||
return ""
|
||||
}
|
||||
cliHostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
logger.Warningf("dns: error getting client hostname: %v", err)
|
||||
return ""
|
||||
}
|
||||
var sc string
|
||||
for _, c := range rcs {
|
||||
if !containsString(c.ClientLanguage, golang) ||
|
||||
!chosenByPercentage(c.Percentage) ||
|
||||
!containsString(c.ClientHostName, cliHostname) ||
|
||||
c.ServiceConfig == nil {
|
||||
continue
|
||||
}
|
||||
sc = string(*c.ServiceConfig)
|
||||
break
|
||||
}
|
||||
return sc
|
||||
}
|
64
vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
generated
vendored
Normal file
64
vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package passthrough implements a pass-through resolver. It sends the target
|
||||
// name without scheme back to gRPC as resolved address.
|
||||
package passthrough
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const scheme = "passthrough"
|
||||
|
||||
type passthroughBuilder struct{}
|
||||
|
||||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
if target.Endpoint() == "" && opts.Dialer == nil {
|
||||
return nil, errors.New("passthrough: received empty target in Build()")
|
||||
}
|
||||
r := &passthroughResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
}
|
||||
r.start()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (*passthroughBuilder) Scheme() string {
|
||||
return scheme
|
||||
}
|
||||
|
||||
type passthroughResolver struct {
|
||||
target resolver.Target
|
||||
cc resolver.ClientConn
|
||||
}
|
||||
|
||||
func (r *passthroughResolver) start() {
|
||||
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
|
||||
}
|
||||
|
||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
|
||||
|
||||
func (*passthroughResolver) Close() {}
|
||||
|
||||
func init() {
|
||||
resolver.Register(&passthroughBuilder{})
|
||||
}
|
74
vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
generated
vendored
Normal file
74
vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package unix implements a resolver for unix targets.
|
||||
package unix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/internal/transport/networktype"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const unixScheme = "unix"
|
||||
const unixAbstractScheme = "unix-abstract"
|
||||
|
||||
type builder struct {
|
||||
scheme string
|
||||
}
|
||||
|
||||
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
if target.URL.Host != "" {
|
||||
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
|
||||
}
|
||||
|
||||
// gRPC was parsing the dial target manually before PR #4817, and we
|
||||
// switched to using url.Parse() in that PR. To avoid breaking existing
|
||||
// resolver implementations we ended up stripping the leading "/" from the
|
||||
// endpoint. This obviously does not work for the "unix" scheme. Hence we
|
||||
// end up using the parsed URL instead.
|
||||
endpoint := target.URL.Path
|
||||
if endpoint == "" {
|
||||
endpoint = target.URL.Opaque
|
||||
}
|
||||
addr := resolver.Address{Addr: endpoint}
|
||||
if b.scheme == unixAbstractScheme {
|
||||
// We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do
|
||||
// not want trailing \0 in address.
|
||||
addr.Addr = "@" + addr.Addr
|
||||
}
|
||||
cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
|
||||
return &nopResolver{}, nil
|
||||
}
|
||||
|
||||
func (b *builder) Scheme() string {
|
||||
return b.scheme
|
||||
}
|
||||
|
||||
type nopResolver struct {
|
||||
}
|
||||
|
||||
func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
|
||||
|
||||
func (*nopResolver) Close() {}
|
||||
|
||||
func init() {
|
||||
resolver.Register(&builder{scheme: unixScheme})
|
||||
resolver.Register(&builder{scheme: unixAbstractScheme})
|
||||
}
|
180
vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
Normal file
180
vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package serviceconfig contains utility functions to parse service config.
|
||||
package serviceconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
externalserviceconfig "google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
// BalancerConfig wraps the name and config associated with one load balancing
|
||||
// policy. It corresponds to a single entry of the loadBalancingConfig field
|
||||
// from ServiceConfig.
|
||||
//
|
||||
// It implements the json.Unmarshaler interface.
|
||||
//
|
||||
// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
|
||||
type BalancerConfig struct {
|
||||
Name string
|
||||
Config externalserviceconfig.LoadBalancingConfig
|
||||
}
|
||||
|
||||
type intermediateBalancerConfig []map[string]json.RawMessage
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
//
|
||||
// It marshals the balancer and config into a length-1 slice
|
||||
// ([]map[string]config).
|
||||
func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
|
||||
if bc.Config == nil {
|
||||
// If config is nil, return empty config `{}`.
|
||||
return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil
|
||||
}
|
||||
c, err := json.Marshal(bc.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
//
|
||||
// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
|
||||
// config. This method iterates through that list in order, and stops at the
|
||||
// first policy that is supported.
|
||||
// - If the config for the first supported policy is invalid, the whole service
|
||||
// config is invalid.
|
||||
// - If the list doesn't contain any supported policy, the whole service config
|
||||
// is invalid.
|
||||
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||||
var ir intermediateBalancerConfig
|
||||
err := json.Unmarshal(b, &ir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var names []string
|
||||
for i, lbcfg := range ir {
|
||||
if len(lbcfg) != 1 {
|
||||
return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
|
||||
}
|
||||
|
||||
var (
|
||||
name string
|
||||
jsonCfg json.RawMessage
|
||||
)
|
||||
// Get the key:value pair from the map. We have already made sure that
|
||||
// the map contains a single entry.
|
||||
for name, jsonCfg = range lbcfg {
|
||||
}
|
||||
|
||||
names = append(names, name)
|
||||
builder := balancer.Get(name)
|
||||
if builder == nil {
|
||||
// If the balancer is not registered, move on to the next config.
|
||||
// This is not an error.
|
||||
continue
|
||||
}
|
||||
bc.Name = name
|
||||
|
||||
parser, ok := builder.(balancer.ConfigParser)
|
||||
if !ok {
|
||||
if string(jsonCfg) != "{}" {
|
||||
logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
|
||||
}
|
||||
// Stop at this, though the builder doesn't support parsing config.
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg, err := parser.ParseConfig(jsonCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
|
||||
}
|
||||
bc.Config = cfg
|
||||
return nil
|
||||
}
|
||||
// This is reached when the for loop iterates over all entries, but didn't
|
||||
// return. This means we had a loadBalancingConfig slice but did not
|
||||
// encounter a registered policy. The config is considered invalid in this
|
||||
// case.
|
||||
return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names)
|
||||
}
|
||||
|
||||
// MethodConfig defines the configuration recommended by the service providers for a
|
||||
// particular method.
|
||||
type MethodConfig struct {
|
||||
// WaitForReady indicates whether RPCs sent to this method should wait until
|
||||
// the connection is ready by default (!failfast). The value specified via the
|
||||
// gRPC client API will override the value set here.
|
||||
WaitForReady *bool
|
||||
// Timeout is the default timeout for RPCs sent to this method. The actual
|
||||
// deadline used will be the minimum of the value specified here and the value
|
||||
// set by the application via the gRPC client API. If either one is not set,
|
||||
// then the other will be used. If neither is set, then the RPC has no deadline.
|
||||
Timeout *time.Duration
|
||||
// MaxReqSize is the maximum allowed payload size for an individual request in a
|
||||
// stream (client->server) in bytes. The size which is measured is the serialized
|
||||
// payload after per-message compression (but before stream compression) in bytes.
|
||||
// The actual value used is the minimum of the value specified here and the value set
|
||||
// by the application via the gRPC client API. If either one is not set, then the other
|
||||
// will be used. If neither is set, then the built-in default is used.
|
||||
MaxReqSize *int
|
||||
// MaxRespSize is the maximum allowed payload size for an individual response in a
|
||||
// stream (server->client) in bytes.
|
||||
MaxRespSize *int
|
||||
// RetryPolicy configures retry options for the method.
|
||||
RetryPolicy *RetryPolicy
|
||||
}
|
||||
|
||||
// RetryPolicy defines the go-native version of the retry policy defined by the
|
||||
// service config here:
|
||||
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
|
||||
type RetryPolicy struct {
|
||||
// MaxAttempts is the maximum number of attempts, including the original RPC.
|
||||
//
|
||||
// This field is required and must be two or greater.
|
||||
MaxAttempts int
|
||||
|
||||
// Exponential backoff parameters. The initial retry attempt will occur at
|
||||
// random(0, initialBackoff). In general, the nth attempt will occur at
|
||||
// random(0,
|
||||
// min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
|
||||
//
|
||||
// These fields are required and must be greater than zero.
|
||||
InitialBackoff time.Duration
|
||||
MaxBackoff time.Duration
|
||||
BackoffMultiplier float64
|
||||
|
||||
// The set of status codes which may be retried.
|
||||
//
|
||||
// Status codes are specified as strings, e.g., "UNAVAILABLE".
|
||||
//
|
||||
// This field is required and must be non-empty.
|
||||
// Note: a set is used to store this for easy lookup.
|
||||
RetryableStatusCodes map[codes.Code]bool
|
||||
}
|
176
vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
Normal file
176
vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package status implements errors returned by gRPC. These errors are
|
||||
// serialized and transmitted on the wire between server and client, and allow
|
||||
// for additional data to be transmitted via the Details field in the status
|
||||
// proto. gRPC service handlers should return an error created by this
|
||||
// package, and gRPC clients should expect a corresponding error to be
|
||||
// returned from the RPC call.
|
||||
//
|
||||
// This package upholds the invariants that a non-nil error may not
|
||||
// contain an OK code, and an OK code must result in a nil error.
|
||||
package status
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// Status represents an RPC status code, message, and details. It is immutable
|
||||
// and should be created with New, Newf, or FromProto.
|
||||
type Status struct {
|
||||
s *spb.Status
|
||||
}
|
||||
|
||||
// New returns a Status representing c and msg.
|
||||
func New(c codes.Code, msg string) *Status {
|
||||
return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
|
||||
}
|
||||
|
||||
// Newf returns New(c, fmt.Sprintf(format, a...)).
|
||||
func Newf(c codes.Code, format string, a ...interface{}) *Status {
|
||||
return New(c, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// FromProto returns a Status representing s.
|
||||
func FromProto(s *spb.Status) *Status {
|
||||
return &Status{s: proto.Clone(s).(*spb.Status)}
|
||||
}
|
||||
|
||||
// Err returns an error representing c and msg. If c is OK, returns nil.
|
||||
func Err(c codes.Code, msg string) error {
|
||||
return New(c, msg).Err()
|
||||
}
|
||||
|
||||
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
|
||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||
return Err(c, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// Code returns the status code contained in s.
|
||||
func (s *Status) Code() codes.Code {
|
||||
if s == nil || s.s == nil {
|
||||
return codes.OK
|
||||
}
|
||||
return codes.Code(s.s.Code)
|
||||
}
|
||||
|
||||
// Message returns the message contained in s.
|
||||
func (s *Status) Message() string {
|
||||
if s == nil || s.s == nil {
|
||||
return ""
|
||||
}
|
||||
return s.s.Message
|
||||
}
|
||||
|
||||
// Proto returns s's status as an spb.Status proto message.
|
||||
func (s *Status) Proto() *spb.Status {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return proto.Clone(s.s).(*spb.Status)
|
||||
}
|
||||
|
||||
// Err returns an immutable error representing s; returns nil if s.Code() is OK.
|
||||
func (s *Status) Err() error {
|
||||
if s.Code() == codes.OK {
|
||||
return nil
|
||||
}
|
||||
return &Error{s: s}
|
||||
}
|
||||
|
||||
// WithDetails returns a new status with the provided details messages appended to the status.
|
||||
// If any errors are encountered, it returns nil and the first error encountered.
|
||||
func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
|
||||
if s.Code() == codes.OK {
|
||||
return nil, errors.New("no error details for status with code OK")
|
||||
}
|
||||
// s.Code() != OK implies that s.Proto() != nil.
|
||||
p := s.Proto()
|
||||
for _, detail := range details {
|
||||
any, err := ptypes.MarshalAny(detail)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Details = append(p.Details, any)
|
||||
}
|
||||
return &Status{s: p}, nil
|
||||
}
|
||||
|
||||
// Details returns a slice of details messages attached to the status.
|
||||
// If a detail cannot be decoded, the error is returned in place of the detail.
|
||||
func (s *Status) Details() []interface{} {
|
||||
if s == nil || s.s == nil {
|
||||
return nil
|
||||
}
|
||||
details := make([]interface{}, 0, len(s.s.Details))
|
||||
for _, any := range s.s.Details {
|
||||
detail := &ptypes.DynamicAny{}
|
||||
if err := ptypes.UnmarshalAny(any, detail); err != nil {
|
||||
details = append(details, err)
|
||||
continue
|
||||
}
|
||||
details = append(details, detail.Message)
|
||||
}
|
||||
return details
|
||||
}
|
||||
|
||||
func (s *Status) String() string {
|
||||
return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message())
|
||||
}
|
||||
|
||||
// Error wraps a pointer of a status proto. It implements error and Status,
|
||||
// and a nil *Error should never be returned by this package.
|
||||
type Error struct {
|
||||
s *Status
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return e.s.String()
|
||||
}
|
||||
|
||||
// GRPCStatus returns the Status represented by se.
|
||||
func (e *Error) GRPCStatus() *Status {
|
||||
return e.s
|
||||
}
|
||||
|
||||
// Is implements future error.Is functionality.
|
||||
// A Error is equivalent if the code and message are identical.
|
||||
func (e *Error) Is(target error) bool {
|
||||
tse, ok := target.(*Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return proto.Equal(e.s.s, tse.s.s)
|
||||
}
|
||||
|
||||
// IsRestrictedControlPlaneCode returns whether the status includes a code
|
||||
// restricted for control plane usage as defined by gRFC A54.
|
||||
func IsRestrictedControlPlaneCode(s *Status) bool {
|
||||
switch s.Code() {
|
||||
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
112
vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
generated
vendored
Normal file
112
vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package syscall provides functionalities that grpc uses to get low-level operating system
|
||||
// stats/info.
|
||||
package syscall
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
// GetCPUTime returns the how much CPU time has passed since the start of this process.
|
||||
func GetCPUTime() int64 {
|
||||
var ts unix.Timespec
|
||||
if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
return ts.Nano()
|
||||
}
|
||||
|
||||
// Rusage is an alias for syscall.Rusage under linux environment.
|
||||
type Rusage = syscall.Rusage
|
||||
|
||||
// GetRusage returns the resource usage of current process.
|
||||
func GetRusage() *Rusage {
|
||||
rusage := new(Rusage)
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
|
||||
return rusage
|
||||
}
|
||||
|
||||
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
|
||||
// between two Rusage structs.
|
||||
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
|
||||
var (
|
||||
utimeDiffs = latest.Utime.Sec - first.Utime.Sec
|
||||
utimeDiffus = latest.Utime.Usec - first.Utime.Usec
|
||||
stimeDiffs = latest.Stime.Sec - first.Stime.Sec
|
||||
stimeDiffus = latest.Stime.Usec - first.Stime.Usec
|
||||
)
|
||||
|
||||
uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
|
||||
sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
|
||||
|
||||
return uTimeElapsed, sTimeElapsed
|
||||
}
|
||||
|
||||
// SetTCPUserTimeout sets the TCP user timeout on a connection's socket
|
||||
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
|
||||
tcpconn, ok := conn.(*net.TCPConn)
|
||||
if !ok {
|
||||
// not a TCP connection. exit early
|
||||
return nil
|
||||
}
|
||||
rawConn, err := tcpconn.SyscallConn()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting raw connection: %v", err)
|
||||
}
|
||||
err = rawConn.Control(func(fd uintptr) {
|
||||
err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond))
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error setting option on socket: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTCPUserTimeout gets the TCP user timeout on a connection's socket
|
||||
func GetTCPUserTimeout(conn net.Conn) (opt int, err error) {
|
||||
tcpconn, ok := conn.(*net.TCPConn)
|
||||
if !ok {
|
||||
err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn)
|
||||
return
|
||||
}
|
||||
rawConn, err := tcpconn.SyscallConn()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error getting raw connection: %v", err)
|
||||
return
|
||||
}
|
||||
err = rawConn.Control(func(fd uintptr) {
|
||||
opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT)
|
||||
})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error getting option on socket: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
77
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
Normal file
77
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package syscall provides functionalities that grpc uses to get low-level
|
||||
// operating system stats/info.
|
||||
package syscall
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
func log() {
|
||||
once.Do(func() {
|
||||
logger.Info("CPU time info is unavailable on non-linux environments.")
|
||||
})
|
||||
}
|
||||
|
||||
// GetCPUTime returns the how much CPU time has passed since the start of this
|
||||
// process. It always returns 0 under non-linux environments.
|
||||
func GetCPUTime() int64 {
|
||||
log()
|
||||
return 0
|
||||
}
|
||||
|
||||
// Rusage is an empty struct under non-linux environments.
|
||||
type Rusage struct{}
|
||||
|
||||
// GetRusage is a no-op function under non-linux environments.
|
||||
func GetRusage() *Rusage {
|
||||
log()
|
||||
return nil
|
||||
}
|
||||
|
||||
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
|
||||
// between two Rusage structs. It a no-op function for non-linux environments.
|
||||
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
|
||||
log()
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// SetTCPUserTimeout is a no-op function under non-linux environments.
|
||||
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
|
||||
log()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTCPUserTimeout is a no-op function under non-linux environments.
|
||||
// A negative return value indicates the operation is not supported
|
||||
func GetTCPUserTimeout(conn net.Conn) (int, error) {
|
||||
log()
|
||||
return -1, nil
|
||||
}
|
141
vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
generated
vendored
Normal file
141
vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// bdpLimit is the maximum value the flow control windows will be increased
|
||||
// to. TCP typically limits this to 4MB, but some systems go up to 16MB.
|
||||
// Since this is only a limit, it is safe to make it optimistic.
|
||||
bdpLimit = (1 << 20) * 16
|
||||
// alpha is a constant factor used to keep a moving average
|
||||
// of RTTs.
|
||||
alpha = 0.9
|
||||
// If the current bdp sample is greater than or equal to
|
||||
// our beta * our estimated bdp and the current bandwidth
|
||||
// sample is the maximum bandwidth observed so far, we
|
||||
// increase our bbp estimate by a factor of gamma.
|
||||
beta = 0.66
|
||||
// To put our bdp to be smaller than or equal to twice the real BDP,
|
||||
// we should multiply our current sample with 4/3, however to round things out
|
||||
// we use 2 as the multiplication factor.
|
||||
gamma = 2
|
||||
)
|
||||
|
||||
// Adding arbitrary data to ping so that its ack can be identified.
|
||||
// Easter-egg: what does the ping message say?
|
||||
var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
|
||||
|
||||
type bdpEstimator struct {
|
||||
// sentAt is the time when the ping was sent.
|
||||
sentAt time.Time
|
||||
|
||||
mu sync.Mutex
|
||||
// bdp is the current bdp estimate.
|
||||
bdp uint32
|
||||
// sample is the number of bytes received in one measurement cycle.
|
||||
sample uint32
|
||||
// bwMax is the maximum bandwidth noted so far (bytes/sec).
|
||||
bwMax float64
|
||||
// bool to keep track of the beginning of a new measurement cycle.
|
||||
isSent bool
|
||||
// Callback to update the window sizes.
|
||||
updateFlowControl func(n uint32)
|
||||
// sampleCount is the number of samples taken so far.
|
||||
sampleCount uint64
|
||||
// round trip time (seconds)
|
||||
rtt float64
|
||||
}
|
||||
|
||||
// timesnap registers the time bdp ping was sent out so that
|
||||
// network rtt can be calculated when its ack is received.
|
||||
// It is called (by controller) when the bdpPing is
|
||||
// being written on the wire.
|
||||
func (b *bdpEstimator) timesnap(d [8]byte) {
|
||||
if bdpPing.data != d {
|
||||
return
|
||||
}
|
||||
b.sentAt = time.Now()
|
||||
}
|
||||
|
||||
// add adds bytes to the current sample for calculating bdp.
|
||||
// It returns true only if a ping must be sent. This can be used
|
||||
// by the caller (handleData) to make decision about batching
|
||||
// a window update with it.
|
||||
func (b *bdpEstimator) add(n uint32) bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.bdp == bdpLimit {
|
||||
return false
|
||||
}
|
||||
if !b.isSent {
|
||||
b.isSent = true
|
||||
b.sample = n
|
||||
b.sentAt = time.Time{}
|
||||
b.sampleCount++
|
||||
return true
|
||||
}
|
||||
b.sample += n
|
||||
return false
|
||||
}
|
||||
|
||||
// calculate is called when an ack for a bdp ping is received.
|
||||
// Here we calculate the current bdp and bandwidth sample and
|
||||
// decide if the flow control windows should go up.
|
||||
func (b *bdpEstimator) calculate(d [8]byte) {
|
||||
// Check if the ping acked for was the bdp ping.
|
||||
if bdpPing.data != d {
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
rttSample := time.Since(b.sentAt).Seconds()
|
||||
if b.sampleCount < 10 {
|
||||
// Bootstrap rtt with an average of first 10 rtt samples.
|
||||
b.rtt += (rttSample - b.rtt) / float64(b.sampleCount)
|
||||
} else {
|
||||
// Heed to the recent past more.
|
||||
b.rtt += (rttSample - b.rtt) * float64(alpha)
|
||||
}
|
||||
b.isSent = false
|
||||
// The number of bytes accumulated so far in the sample is smaller
|
||||
// than or equal to 1.5 times the real BDP on a saturated connection.
|
||||
bwCurrent := float64(b.sample) / (b.rtt * float64(1.5))
|
||||
if bwCurrent > b.bwMax {
|
||||
b.bwMax = bwCurrent
|
||||
}
|
||||
// If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is
|
||||
// greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we
|
||||
// should update our perception of the network BDP.
|
||||
if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit {
|
||||
sampleFloat := float64(b.sample)
|
||||
b.bdp = uint32(gamma * sampleFloat)
|
||||
if b.bdp > bdpLimit {
|
||||
b.bdp = bdpLimit
|
||||
}
|
||||
bdp := b.bdp
|
||||
b.mu.Unlock()
|
||||
b.updateFlowControl(bdp)
|
||||
return
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
998
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
Normal file
998
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
Normal file
@@ -0,0 +1,998 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
|
||||
e.SetMaxDynamicTableSizeLimit(v)
|
||||
}
|
||||
|
||||
type itemNode struct {
|
||||
it interface{}
|
||||
next *itemNode
|
||||
}
|
||||
|
||||
type itemList struct {
|
||||
head *itemNode
|
||||
tail *itemNode
|
||||
}
|
||||
|
||||
func (il *itemList) enqueue(i interface{}) {
|
||||
n := &itemNode{it: i}
|
||||
if il.tail == nil {
|
||||
il.head, il.tail = n, n
|
||||
return
|
||||
}
|
||||
il.tail.next = n
|
||||
il.tail = n
|
||||
}
|
||||
|
||||
// peek returns the first item in the list without removing it from the
|
||||
// list.
|
||||
func (il *itemList) peek() interface{} {
|
||||
return il.head.it
|
||||
}
|
||||
|
||||
func (il *itemList) dequeue() interface{} {
|
||||
if il.head == nil {
|
||||
return nil
|
||||
}
|
||||
i := il.head.it
|
||||
il.head = il.head.next
|
||||
if il.head == nil {
|
||||
il.tail = nil
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (il *itemList) dequeueAll() *itemNode {
|
||||
h := il.head
|
||||
il.head, il.tail = nil, nil
|
||||
return h
|
||||
}
|
||||
|
||||
func (il *itemList) isEmpty() bool {
|
||||
return il.head == nil
|
||||
}
|
||||
|
||||
// The following defines various control items which could flow through
|
||||
// the control buffer of transport. They represent different aspects of
|
||||
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||
|
||||
// maxQueuedTransportResponseFrames is the most queued "transport response"
|
||||
// frames we will buffer before preventing new reads from occurring on the
|
||||
// transport. These are control frames sent in response to client requests,
|
||||
// such as RST_STREAM due to bad headers or settings acks.
|
||||
const maxQueuedTransportResponseFrames = 50
|
||||
|
||||
type cbItem interface {
|
||||
isTransportResponseFrame() bool
|
||||
}
|
||||
|
||||
// registerStream is used to register an incoming stream with loopy writer.
|
||||
type registerStream struct {
|
||||
streamID uint32
|
||||
wq *writeQuota
|
||||
}
|
||||
|
||||
func (*registerStream) isTransportResponseFrame() bool { return false }
|
||||
|
||||
// headerFrame is also used to register stream on the client-side.
|
||||
type headerFrame struct {
|
||||
streamID uint32
|
||||
hf []hpack.HeaderField
|
||||
endStream bool // Valid on server side.
|
||||
initStream func(uint32) error // Used only on the client side.
|
||||
onWrite func()
|
||||
wq *writeQuota // write quota for the stream created.
|
||||
cleanup *cleanupStream // Valid on the server side.
|
||||
onOrphaned func(error) // Valid on client-side
|
||||
}
|
||||
|
||||
func (h *headerFrame) isTransportResponseFrame() bool {
|
||||
return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
|
||||
}
|
||||
|
||||
type cleanupStream struct {
|
||||
streamID uint32
|
||||
rst bool
|
||||
rstCode http2.ErrCode
|
||||
onWrite func()
|
||||
}
|
||||
|
||||
func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
|
||||
|
||||
type earlyAbortStream struct {
|
||||
httpStatus uint32
|
||||
streamID uint32
|
||||
contentSubtype string
|
||||
status *status.Status
|
||||
rst bool
|
||||
}
|
||||
|
||||
func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type dataFrame struct {
|
||||
streamID uint32
|
||||
endStream bool
|
||||
h []byte
|
||||
d []byte
|
||||
// onEachWrite is called every time
|
||||
// a part of d is written out.
|
||||
onEachWrite func()
|
||||
}
|
||||
|
||||
func (*dataFrame) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type incomingWindowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type outgoingWindowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
|
||||
return false // window updates are throttled by thresholds
|
||||
}
|
||||
|
||||
type incomingSettings struct {
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
|
||||
|
||||
type outgoingSettings struct {
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
func (*outgoingSettings) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type incomingGoAway struct {
|
||||
}
|
||||
|
||||
func (*incomingGoAway) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type goAway struct {
|
||||
code http2.ErrCode
|
||||
debugData []byte
|
||||
headsUp bool
|
||||
closeConn error // if set, loopyWriter will exit, resulting in conn closure
|
||||
}
|
||||
|
||||
func (*goAway) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type ping struct {
|
||||
ack bool
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
func (*ping) isTransportResponseFrame() bool { return true }
|
||||
|
||||
type outFlowControlSizeRequest struct {
|
||||
resp chan uint32
|
||||
}
|
||||
|
||||
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
|
||||
|
||||
// closeConnection is an instruction to tell the loopy writer to flush the
|
||||
// framer and exit, which will cause the transport's connection to be closed
|
||||
// (by the client or server). The transport itself will close after the reader
|
||||
// encounters the EOF caused by the connection closure.
|
||||
type closeConnection struct{}
|
||||
|
||||
func (closeConnection) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type outStreamState int
|
||||
|
||||
const (
|
||||
active outStreamState = iota
|
||||
empty
|
||||
waitingOnStreamQuota
|
||||
)
|
||||
|
||||
type outStream struct {
|
||||
id uint32
|
||||
state outStreamState
|
||||
itl *itemList
|
||||
bytesOutStanding int
|
||||
wq *writeQuota
|
||||
|
||||
next *outStream
|
||||
prev *outStream
|
||||
}
|
||||
|
||||
func (s *outStream) deleteSelf() {
|
||||
if s.prev != nil {
|
||||
s.prev.next = s.next
|
||||
}
|
||||
if s.next != nil {
|
||||
s.next.prev = s.prev
|
||||
}
|
||||
s.next, s.prev = nil, nil
|
||||
}
|
||||
|
||||
type outStreamList struct {
|
||||
// Following are sentinel objects that mark the
|
||||
// beginning and end of the list. They do not
|
||||
// contain any item lists. All valid objects are
|
||||
// inserted in between them.
|
||||
// This is needed so that an outStream object can
|
||||
// deleteSelf() in O(1) time without knowing which
|
||||
// list it belongs to.
|
||||
head *outStream
|
||||
tail *outStream
|
||||
}
|
||||
|
||||
func newOutStreamList() *outStreamList {
|
||||
head, tail := new(outStream), new(outStream)
|
||||
head.next = tail
|
||||
tail.prev = head
|
||||
return &outStreamList{
|
||||
head: head,
|
||||
tail: tail,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *outStreamList) enqueue(s *outStream) {
|
||||
e := l.tail.prev
|
||||
e.next = s
|
||||
s.prev = e
|
||||
s.next = l.tail
|
||||
l.tail.prev = s
|
||||
}
|
||||
|
||||
// remove from the beginning of the list.
|
||||
func (l *outStreamList) dequeue() *outStream {
|
||||
b := l.head.next
|
||||
if b == l.tail {
|
||||
return nil
|
||||
}
|
||||
b.deleteSelf()
|
||||
return b
|
||||
}
|
||||
|
||||
// controlBuffer is a way to pass information to loopy.
|
||||
// Information is passed as specific struct types called control frames.
|
||||
// A control frame not only represents data, messages or headers to be sent out
|
||||
// but can also be used to instruct loopy to update its internal state.
|
||||
// It shouldn't be confused with an HTTP2 frame, although some of the control frames
|
||||
// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
|
||||
type controlBuffer struct {
|
||||
ch chan struct{}
|
||||
done <-chan struct{}
|
||||
mu sync.Mutex
|
||||
consumerWaiting bool
|
||||
list *itemList
|
||||
err error
|
||||
|
||||
// transportResponseFrames counts the number of queued items that represent
|
||||
// the response of an action initiated by the peer. trfChan is created
|
||||
// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
|
||||
// closed and nilled when transportResponseFrames drops below the
|
||||
// threshold. Both fields are protected by mu.
|
||||
transportResponseFrames int
|
||||
trfChan atomic.Value // chan struct{}
|
||||
}
|
||||
|
||||
func newControlBuffer(done <-chan struct{}) *controlBuffer {
|
||||
return &controlBuffer{
|
||||
ch: make(chan struct{}, 1),
|
||||
list: &itemList{},
|
||||
done: done,
|
||||
}
|
||||
}
|
||||
|
||||
// throttle blocks if there are too many incomingSettings/cleanupStreams in the
|
||||
// controlbuf.
|
||||
func (c *controlBuffer) throttle() {
|
||||
ch, _ := c.trfChan.Load().(chan struct{})
|
||||
if ch != nil {
|
||||
select {
|
||||
case <-ch:
|
||||
case <-c.done:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controlBuffer) put(it cbItem) error {
|
||||
_, err := c.executeAndPut(nil, it)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
|
||||
var wakeUp bool
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return false, c.err
|
||||
}
|
||||
if f != nil {
|
||||
if !f(it) { // f wasn't successful
|
||||
c.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
if c.consumerWaiting {
|
||||
wakeUp = true
|
||||
c.consumerWaiting = false
|
||||
}
|
||||
c.list.enqueue(it)
|
||||
if it.isTransportResponseFrame() {
|
||||
c.transportResponseFrames++
|
||||
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
|
||||
// We are adding the frame that puts us over the threshold; create
|
||||
// a throttling channel.
|
||||
c.trfChan.Store(make(chan struct{}))
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
if wakeUp {
|
||||
select {
|
||||
case c.ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Note argument f should never be nil.
|
||||
func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return false, c.err
|
||||
}
|
||||
if !f(it) { // f wasn't successful
|
||||
c.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||
for {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return nil, c.err
|
||||
}
|
||||
if !c.list.isEmpty() {
|
||||
h := c.list.dequeue().(cbItem)
|
||||
if h.isTransportResponseFrame() {
|
||||
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
|
||||
// We are removing the frame that put us over the
|
||||
// threshold; close and clear the throttling channel.
|
||||
ch := c.trfChan.Load().(chan struct{})
|
||||
close(ch)
|
||||
c.trfChan.Store((chan struct{})(nil))
|
||||
}
|
||||
c.transportResponseFrames--
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return h, nil
|
||||
}
|
||||
if !block {
|
||||
c.mu.Unlock()
|
||||
return nil, nil
|
||||
}
|
||||
c.consumerWaiting = true
|
||||
c.mu.Unlock()
|
||||
select {
|
||||
case <-c.ch:
|
||||
case <-c.done:
|
||||
return nil, errors.New("transport closed by client")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controlBuffer) finish() {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
c.err = ErrConnClosing
|
||||
// There may be headers for streams in the control buffer.
|
||||
// These streams need to be cleaned out since the transport
|
||||
// is still not aware of these yet.
|
||||
for head := c.list.dequeueAll(); head != nil; head = head.next {
|
||||
hdr, ok := head.it.(*headerFrame)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if hdr.onOrphaned != nil { // It will be nil on the server-side.
|
||||
hdr.onOrphaned(ErrConnClosing)
|
||||
}
|
||||
}
|
||||
// In case throttle() is currently in flight, it needs to be unblocked.
|
||||
// Otherwise, the transport may not close, since the transport is closed by
|
||||
// the reader encountering the connection error.
|
||||
ch, _ := c.trfChan.Load().(chan struct{})
|
||||
if ch != nil {
|
||||
close(ch)
|
||||
}
|
||||
c.trfChan.Store((chan struct{})(nil))
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
type side int
|
||||
|
||||
const (
|
||||
clientSide side = iota
|
||||
serverSide
|
||||
)
|
||||
|
||||
// Loopy receives frames from the control buffer.
|
||||
// Each frame is handled individually; most of the work done by loopy goes
|
||||
// into handling data frames. Loopy maintains a queue of active streams, and each
|
||||
// stream maintains a queue of data frames; as loopy receives data frames
|
||||
// it gets added to the queue of the relevant stream.
|
||||
// Loopy goes over this list of active streams by processing one node every iteration,
|
||||
// thereby closely resemebling to a round-robin scheduling over all streams. While
|
||||
// processing a stream, loopy writes out data bytes from this stream capped by the min
|
||||
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
|
||||
type loopyWriter struct {
|
||||
side side
|
||||
cbuf *controlBuffer
|
||||
sendQuota uint32
|
||||
oiws uint32 // outbound initial window size.
|
||||
// estdStreams is map of all established streams that are not cleaned-up yet.
|
||||
// On client-side, this is all streams whose headers were sent out.
|
||||
// On server-side, this is all streams whose headers were received.
|
||||
estdStreams map[uint32]*outStream // Established streams.
|
||||
// activeStreams is a linked-list of all streams that have data to send and some
|
||||
// stream-level flow control quota.
|
||||
// Each of these streams internally have a list of data items(and perhaps trailers
|
||||
// on the server-side) to be sent out.
|
||||
activeStreams *outStreamList
|
||||
framer *framer
|
||||
hBuf *bytes.Buffer // The buffer for HPACK encoding.
|
||||
hEnc *hpack.Encoder // HPACK encoder.
|
||||
bdpEst *bdpEstimator
|
||||
draining bool
|
||||
|
||||
// Side-specific handlers
|
||||
ssGoAwayHandler func(*goAway) (bool, error)
|
||||
}
|
||||
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
|
||||
var buf bytes.Buffer
|
||||
l := &loopyWriter{
|
||||
side: s,
|
||||
cbuf: cbuf,
|
||||
sendQuota: defaultWindowSize,
|
||||
oiws: defaultWindowSize,
|
||||
estdStreams: make(map[uint32]*outStream),
|
||||
activeStreams: newOutStreamList(),
|
||||
framer: fr,
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
bdpEst: bdpEst,
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
const minBatchSize = 1000
|
||||
|
||||
// run should be run in a separate goroutine.
|
||||
// It reads control frames from controlBuf and processes them by:
|
||||
// 1. Updating loopy's internal state, or/and
|
||||
// 2. Writing out HTTP2 frames on the wire.
|
||||
//
|
||||
// Loopy keeps all active streams with data to send in a linked-list.
|
||||
// All streams in the activeStreams linked-list must have both:
|
||||
// 1. Data to send, and
|
||||
// 2. Stream level flow control quota available.
|
||||
//
|
||||
// In each iteration of run loop, other than processing the incoming control
|
||||
// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
|
||||
// This results in writing of HTTP2 frames into an underlying write buffer.
|
||||
// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
|
||||
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
|
||||
// if the batch size is too low to give stream goroutines a chance to fill it up.
|
||||
func (l *loopyWriter) run() (err error) {
|
||||
// Always flush the writer before exiting in case there are pending frames
|
||||
// to be sent.
|
||||
defer l.framer.writer.Flush()
|
||||
for {
|
||||
it, err := l.cbuf.get(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = l.handle(it); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = l.processData(); err != nil {
|
||||
return err
|
||||
}
|
||||
gosched := true
|
||||
hasdata:
|
||||
for {
|
||||
it, err := l.cbuf.get(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if it != nil {
|
||||
if err = l.handle(it); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = l.processData(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue hasdata
|
||||
}
|
||||
isEmpty, err := l.processData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isEmpty {
|
||||
continue hasdata
|
||||
}
|
||||
if gosched {
|
||||
gosched = false
|
||||
if l.framer.writer.offset < minBatchSize {
|
||||
runtime.Gosched()
|
||||
continue hasdata
|
||||
}
|
||||
}
|
||||
l.framer.writer.Flush()
|
||||
break hasdata
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
|
||||
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
|
||||
// Otherwise update the quota.
|
||||
if w.streamID == 0 {
|
||||
l.sendQuota += w.increment
|
||||
return nil
|
||||
}
|
||||
// Find the stream and update it.
|
||||
if str, ok := l.estdStreams[w.streamID]; ok {
|
||||
str.bytesOutStanding -= int(w.increment)
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
||||
return l.framer.fr.WriteSettings(s.ss...)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
|
||||
if err := l.applySettings(s.ss); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.framer.fr.WriteSettingsAck()
|
||||
}
|
||||
|
||||
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
||||
str := &outStream{
|
||||
id: h.streamID,
|
||||
state: empty,
|
||||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
l.estdStreams[h.streamID] = str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
if l.side == serverSide {
|
||||
str, ok := l.estdStreams[h.streamID]
|
||||
if !ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Case 1.A: Server is responding back with headers.
|
||||
if !h.endStream {
|
||||
return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
|
||||
}
|
||||
// else: Case 1.B: Server wants to close stream.
|
||||
|
||||
if str.state != empty { // either active or waiting on stream quota.
|
||||
// add it str's list of items.
|
||||
str.itl.enqueue(h)
|
||||
return nil
|
||||
}
|
||||
if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.cleanupStreamHandler(h.cleanup)
|
||||
}
|
||||
// Case 2: Client wants to originate stream.
|
||||
str := &outStream{
|
||||
id: h.streamID,
|
||||
state: empty,
|
||||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
return l.originateStream(str, h)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
|
||||
// l.draining is set when handling GoAway. In which case, we want to avoid
|
||||
// creating new streams.
|
||||
if l.draining {
|
||||
// TODO: provide a better error with the reason we are in draining.
|
||||
hdr.onOrphaned(errStreamDrain)
|
||||
return nil
|
||||
}
|
||||
if err := hdr.initStream(str.id); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
l.estdStreams[str.id] = str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
|
||||
if onWrite != nil {
|
||||
onWrite()
|
||||
}
|
||||
l.hBuf.Reset()
|
||||
for _, f := range hf {
|
||||
if err := l.hEnc.WriteField(f); err != nil {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
var (
|
||||
err error
|
||||
endHeaders, first bool
|
||||
)
|
||||
first = true
|
||||
for !endHeaders {
|
||||
size := l.hBuf.Len()
|
||||
if size > http2MaxFrameLen {
|
||||
size = http2MaxFrameLen
|
||||
} else {
|
||||
endHeaders = true
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
|
||||
StreamID: streamID,
|
||||
BlockFragment: l.hBuf.Next(size),
|
||||
EndStream: endStream,
|
||||
EndHeaders: endHeaders,
|
||||
})
|
||||
} else {
|
||||
err = l.framer.fr.WriteContinuation(
|
||||
streamID,
|
||||
endHeaders,
|
||||
l.hBuf.Next(size),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
||||
str, ok := l.estdStreams[df.streamID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
// If we got data for a stream it means that
|
||||
// stream was originated and the headers were sent out.
|
||||
str.itl.enqueue(df)
|
||||
if str.state == empty {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) pingHandler(p *ping) error {
|
||||
if !p.ack {
|
||||
l.bdpEst.timesnap(p.data)
|
||||
}
|
||||
return l.framer.fr.WritePing(p.ack, p.data)
|
||||
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
|
||||
o.resp <- l.sendQuota
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||
c.onWrite()
|
||||
if str, ok := l.estdStreams[c.streamID]; ok {
|
||||
// On the server side it could be a trailers-only response or
|
||||
// a RST_STREAM before stream initialization thus the stream might
|
||||
// not be established yet.
|
||||
delete(l.estdStreams, c.streamID)
|
||||
str.deleteSelf()
|
||||
}
|
||||
if c.rst { // If RST_STREAM needs to be sent.
|
||||
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.draining && len(l.estdStreams) == 0 {
|
||||
return errors.New("finished processing active streams while in draining mode")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
|
||||
if l.side == clientSide {
|
||||
return errors.New("earlyAbortStream not handled on client")
|
||||
}
|
||||
// In case the caller forgets to set the http status, default to 200.
|
||||
if eas.httpStatus == 0 {
|
||||
eas.httpStatus = 200
|
||||
}
|
||||
headerFields := []hpack.HeaderField{
|
||||
{Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
|
||||
{Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
|
||||
{Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
|
||||
{Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
|
||||
}
|
||||
|
||||
if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if eas.rst {
|
||||
if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
||||
if l.side == clientSide {
|
||||
l.draining = true
|
||||
if len(l.estdStreams) == 0 {
|
||||
return errors.New("received GOAWAY with no active streams")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
||||
// Handling of outgoing GoAway is very specific to side.
|
||||
if l.ssGoAwayHandler != nil {
|
||||
draining, err := l.ssGoAwayHandler(g)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.draining = draining
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) closeConnectionHandler() error {
|
||||
// Exit loopyWriter entirely by returning an error here. This will lead to
|
||||
// the transport closing the connection, and, ultimately, transport
|
||||
// closure.
|
||||
return ErrConnClosing
|
||||
}
|
||||
|
||||
func (l *loopyWriter) handle(i interface{}) error {
|
||||
switch i := i.(type) {
|
||||
case *incomingWindowUpdate:
|
||||
return l.incomingWindowUpdateHandler(i)
|
||||
case *outgoingWindowUpdate:
|
||||
return l.outgoingWindowUpdateHandler(i)
|
||||
case *incomingSettings:
|
||||
return l.incomingSettingsHandler(i)
|
||||
case *outgoingSettings:
|
||||
return l.outgoingSettingsHandler(i)
|
||||
case *headerFrame:
|
||||
return l.headerHandler(i)
|
||||
case *registerStream:
|
||||
return l.registerStreamHandler(i)
|
||||
case *cleanupStream:
|
||||
return l.cleanupStreamHandler(i)
|
||||
case *earlyAbortStream:
|
||||
return l.earlyAbortStreamHandler(i)
|
||||
case *incomingGoAway:
|
||||
return l.incomingGoAwayHandler(i)
|
||||
case *dataFrame:
|
||||
return l.preprocessData(i)
|
||||
case *ping:
|
||||
return l.pingHandler(i)
|
||||
case *goAway:
|
||||
return l.goAwayHandler(i)
|
||||
case *outFlowControlSizeRequest:
|
||||
return l.outFlowControlSizeRequestHandler(i)
|
||||
case closeConnection:
|
||||
return l.closeConnectionHandler()
|
||||
default:
|
||||
return fmt.Errorf("transport: unknown control message type %T", i)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
||||
for _, s := range ss {
|
||||
switch s.ID {
|
||||
case http2.SettingInitialWindowSize:
|
||||
o := l.oiws
|
||||
l.oiws = s.Val
|
||||
if o < l.oiws {
|
||||
// If the new limit is greater make all depleted streams active.
|
||||
for _, stream := range l.estdStreams {
|
||||
if stream.state == waitingOnStreamQuota {
|
||||
stream.state = active
|
||||
l.activeStreams.enqueue(stream)
|
||||
}
|
||||
}
|
||||
}
|
||||
case http2.SettingHeaderTableSize:
|
||||
updateHeaderTblSize(l.hEnc, s.Val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processData removes the first stream from active streams, writes out at most 16KB
|
||||
// of its data and then puts it at the end of activeStreams if there's still more data
|
||||
// to be sent and stream has some stream-level flow control.
|
||||
func (l *loopyWriter) processData() (bool, error) {
|
||||
if l.sendQuota == 0 {
|
||||
return true, nil
|
||||
}
|
||||
str := l.activeStreams.dequeue() // Remove the first stream.
|
||||
if str == nil {
|
||||
return true, nil
|
||||
}
|
||||
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
|
||||
// A data item is represented by a dataFrame, since it later translates into
|
||||
// multiple HTTP2 data frames.
|
||||
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
|
||||
// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
|
||||
// maximum possible HTTP2 frame size.
|
||||
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
|
||||
// Client sends out empty data frame with endStream = true
|
||||
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
|
||||
return false, err
|
||||
}
|
||||
str.itl.dequeue() // remove the empty data item from stream
|
||||
if str.itl.isEmpty() {
|
||||
str.state = empty
|
||||
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
|
||||
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
} else {
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
var (
|
||||
buf []byte
|
||||
)
|
||||
// Figure out the maximum size we can send
|
||||
maxSize := http2MaxFrameLen
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
|
||||
str.state = waitingOnStreamQuota
|
||||
return false, nil
|
||||
} else if maxSize > strQuota {
|
||||
maxSize = strQuota
|
||||
}
|
||||
if maxSize > int(l.sendQuota) { // connection-level flow control.
|
||||
maxSize = int(l.sendQuota)
|
||||
}
|
||||
// Compute how much of the header and data we can send within quota and max frame length
|
||||
hSize := min(maxSize, len(dataItem.h))
|
||||
dSize := min(maxSize-hSize, len(dataItem.d))
|
||||
if hSize != 0 {
|
||||
if dSize == 0 {
|
||||
buf = dataItem.h
|
||||
} else {
|
||||
// We can add some data to grpc message header to distribute bytes more equally across frames.
|
||||
// Copy on the stack to avoid generating garbage
|
||||
var localBuf [http2MaxFrameLen]byte
|
||||
copy(localBuf[:hSize], dataItem.h)
|
||||
copy(localBuf[hSize:], dataItem.d[:dSize])
|
||||
buf = localBuf[:hSize+dSize]
|
||||
}
|
||||
} else {
|
||||
buf = dataItem.d
|
||||
}
|
||||
|
||||
size := hSize + dSize
|
||||
|
||||
// Now that outgoing flow controls are checked we can replenish str's write quota
|
||||
str.wq.replenish(size)
|
||||
var endStream bool
|
||||
// If this is the last data message on this stream and all of it can be written in this iteration.
|
||||
if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
|
||||
endStream = true
|
||||
}
|
||||
if dataItem.onEachWrite != nil {
|
||||
dataItem.onEachWrite()
|
||||
}
|
||||
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
|
||||
return false, err
|
||||
}
|
||||
str.bytesOutStanding += size
|
||||
l.sendQuota -= uint32(size)
|
||||
dataItem.h = dataItem.h[hSize:]
|
||||
dataItem.d = dataItem.d[dSize:]
|
||||
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
|
||||
str.itl.dequeue()
|
||||
}
|
||||
if str.itl.isEmpty() {
|
||||
str.state = empty
|
||||
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
|
||||
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
|
||||
str.state = waitingOnStreamQuota
|
||||
} else { // Otherwise add it back to the list of active streams.
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
55
vendor/google.golang.org/grpc/internal/transport/defaults.go
generated
vendored
Normal file
55
vendor/google.golang.org/grpc/internal/transport/defaults.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// The default value of flow control window size in HTTP2 spec.
|
||||
defaultWindowSize = 65535
|
||||
// The initial window size for flow control.
|
||||
initialWindowSize = defaultWindowSize // for an RPC
|
||||
infinity = time.Duration(math.MaxInt64)
|
||||
defaultClientKeepaliveTime = infinity
|
||||
defaultClientKeepaliveTimeout = 20 * time.Second
|
||||
defaultMaxStreamsClient = 100
|
||||
defaultMaxConnectionIdle = infinity
|
||||
defaultMaxConnectionAge = infinity
|
||||
defaultMaxConnectionAgeGrace = infinity
|
||||
defaultServerKeepaliveTime = 2 * time.Hour
|
||||
defaultServerKeepaliveTimeout = 20 * time.Second
|
||||
defaultKeepalivePolicyMinTime = 5 * time.Minute
|
||||
// max window limit set by HTTP2 Specs.
|
||||
maxWindowSize = math.MaxInt32
|
||||
// defaultWriteQuota is the default value for number of data
|
||||
// bytes that each stream can schedule before some of it being
|
||||
// flushed out.
|
||||
defaultWriteQuota = 64 * 1024
|
||||
defaultClientMaxHeaderListSize = uint32(16 << 20)
|
||||
defaultServerMaxHeaderListSize = uint32(16 << 20)
|
||||
)
|
||||
|
||||
// MaxStreamID is the upper bound for the stream ID before the current
|
||||
// transport gracefully closes and new transport is created for subsequent RPCs.
|
||||
// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
|
||||
// integer. It's exported so that tests can override it.
|
||||
var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
|
215
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
Normal file
215
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
Normal file
@@ -0,0 +1,215 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// writeQuota is a soft limit on the amount of data a stream can
|
||||
// schedule before some of it is written out.
|
||||
type writeQuota struct {
|
||||
quota int32
|
||||
// get waits on read from when quota goes less than or equal to zero.
|
||||
// replenish writes on it when quota goes positive again.
|
||||
ch chan struct{}
|
||||
// done is triggered in error case.
|
||||
done <-chan struct{}
|
||||
// replenish is called by loopyWriter to give quota back to.
|
||||
// It is implemented as a field so that it can be updated
|
||||
// by tests.
|
||||
replenish func(n int)
|
||||
}
|
||||
|
||||
func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
|
||||
w := &writeQuota{
|
||||
quota: sz,
|
||||
ch: make(chan struct{}, 1),
|
||||
done: done,
|
||||
}
|
||||
w.replenish = w.realReplenish
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *writeQuota) get(sz int32) error {
|
||||
for {
|
||||
if atomic.LoadInt32(&w.quota) > 0 {
|
||||
atomic.AddInt32(&w.quota, -sz)
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-w.ch:
|
||||
continue
|
||||
case <-w.done:
|
||||
return errStreamDone
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *writeQuota) realReplenish(n int) {
|
||||
sz := int32(n)
|
||||
a := atomic.AddInt32(&w.quota, sz)
|
||||
b := a - sz
|
||||
if b <= 0 && a > 0 {
|
||||
select {
|
||||
case w.ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type trInFlow struct {
|
||||
limit uint32
|
||||
unacked uint32
|
||||
effectiveWindowSize uint32
|
||||
}
|
||||
|
||||
func (f *trInFlow) newLimit(n uint32) uint32 {
|
||||
d := n - f.limit
|
||||
f.limit = n
|
||||
f.updateEffectiveWindowSize()
|
||||
return d
|
||||
}
|
||||
|
||||
func (f *trInFlow) onData(n uint32) uint32 {
|
||||
f.unacked += n
|
||||
if f.unacked >= f.limit/4 {
|
||||
w := f.unacked
|
||||
f.unacked = 0
|
||||
f.updateEffectiveWindowSize()
|
||||
return w
|
||||
}
|
||||
f.updateEffectiveWindowSize()
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *trInFlow) reset() uint32 {
|
||||
w := f.unacked
|
||||
f.unacked = 0
|
||||
f.updateEffectiveWindowSize()
|
||||
return w
|
||||
}
|
||||
|
||||
func (f *trInFlow) updateEffectiveWindowSize() {
|
||||
atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
|
||||
}
|
||||
|
||||
func (f *trInFlow) getSize() uint32 {
|
||||
return atomic.LoadUint32(&f.effectiveWindowSize)
|
||||
}
|
||||
|
||||
// TODO(mmukhi): Simplify this code.
|
||||
// inFlow deals with inbound flow control
|
||||
type inFlow struct {
|
||||
mu sync.Mutex
|
||||
// The inbound flow control limit for pending data.
|
||||
limit uint32
|
||||
// pendingData is the overall data which have been received but not been
|
||||
// consumed by applications.
|
||||
pendingData uint32
|
||||
// The amount of data the application has consumed but grpc has not sent
|
||||
// window update for them. Used to reduce window update frequency.
|
||||
pendingUpdate uint32
|
||||
// delta is the extra window update given by receiver when an application
|
||||
// is reading data bigger in size than the inFlow limit.
|
||||
delta uint32
|
||||
}
|
||||
|
||||
// newLimit updates the inflow window to a new value n.
|
||||
// It assumes that n is always greater than the old limit.
|
||||
func (f *inFlow) newLimit(n uint32) {
|
||||
f.mu.Lock()
|
||||
f.limit = n
|
||||
f.mu.Unlock()
|
||||
}
|
||||
|
||||
func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
||||
if n > uint32(math.MaxInt32) {
|
||||
n = uint32(math.MaxInt32)
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
// estSenderQuota is the receiver's view of the maximum number of bytes the sender
|
||||
// can send without a window update.
|
||||
estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
|
||||
// estUntransmittedData is the maximum number of bytes the sends might not have put
|
||||
// on the wire yet. A value of 0 or less means that we have already received all or
|
||||
// more bytes than the application is requesting to read.
|
||||
estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
|
||||
// This implies that unless we send a window update, the sender won't be able to send all the bytes
|
||||
// for this message. Therefore we must send an update over the limit since there's an active read
|
||||
// request from the application.
|
||||
if estUntransmittedData > estSenderQuota {
|
||||
// Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
|
||||
if f.limit+n > maxWindowSize {
|
||||
f.delta = maxWindowSize - f.limit
|
||||
} else {
|
||||
// Send a window update for the whole message and not just the difference between
|
||||
// estUntransmittedData and estSenderQuota. This will be helpful in case the message
|
||||
// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
|
||||
f.delta = n
|
||||
}
|
||||
return f.delta
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// onData is invoked when some data frame is received. It updates pendingData.
|
||||
func (f *inFlow) onData(n uint32) error {
|
||||
f.mu.Lock()
|
||||
f.pendingData += n
|
||||
if f.pendingData+f.pendingUpdate > f.limit+f.delta {
|
||||
limit := f.limit
|
||||
rcvd := f.pendingData + f.pendingUpdate
|
||||
f.mu.Unlock()
|
||||
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// onRead is invoked when the application reads the data. It returns the window size
|
||||
// to be sent to the peer.
|
||||
func (f *inFlow) onRead(n uint32) uint32 {
|
||||
f.mu.Lock()
|
||||
if f.pendingData == 0 {
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
||||
f.pendingData -= n
|
||||
if n > f.delta {
|
||||
n -= f.delta
|
||||
f.delta = 0
|
||||
} else {
|
||||
f.delta -= n
|
||||
n = 0
|
||||
}
|
||||
f.pendingUpdate += n
|
||||
if f.pendingUpdate >= f.limit/4 {
|
||||
wu := f.pendingUpdate
|
||||
f.pendingUpdate = 0
|
||||
f.mu.Unlock()
|
||||
return wu
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return 0
|
||||
}
|
477
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
Normal file
477
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
Normal file
@@ -0,0 +1,477 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// This file is the implementation of a gRPC server using HTTP/2 which
|
||||
// uses the standard Go http2 Server implementation (via the
|
||||
// http.Handler interface), rather than speaking low-level HTTP/2
|
||||
// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
|
||||
// inside an http.Handler, or writes an HTTP error to w and returns an error.
|
||||
// It requires that the http Server supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
msg := "gRPC requires HTTP/2"
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
// TODO: do we assume contentType is lowercase? we did before
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
|
||||
if !validContentType {
|
||||
msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
|
||||
http.Error(w, msg, http.StatusUnsupportedMediaType)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
msg := "gRPC requires a ResponseWriter supporting http.Flusher"
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
st := &serverHandlerTransport{
|
||||
rw: w,
|
||||
req: r,
|
||||
closedCh: make(chan struct{}),
|
||||
writes: make(chan func()),
|
||||
contentType: contentType,
|
||||
contentSubtype: contentSubtype,
|
||||
stats: stats,
|
||||
}
|
||||
|
||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
to, err := decodeTimeout(v)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, status.Error(codes.Internal, msg)
|
||||
}
|
||||
st.timeoutSet = true
|
||||
st.timeout = to
|
||||
}
|
||||
|
||||
metakv := []string{"content-type", contentType}
|
||||
if r.Host != "" {
|
||||
metakv = append(metakv, ":authority", r.Host)
|
||||
}
|
||||
for k, vv := range r.Header {
|
||||
k = strings.ToLower(k)
|
||||
if isReservedHeader(k) && !isWhitelistedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
v, err := decodeMetadataHeader(k, v)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, status.Error(codes.Internal, msg)
|
||||
}
|
||||
metakv = append(metakv, k, v)
|
||||
}
|
||||
}
|
||||
st.headerMD = metadata.Pairs(metakv...)
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// serverHandlerTransport is an implementation of ServerTransport
|
||||
// which replies to exactly one gRPC request (exactly one HTTP request),
|
||||
// using the net/http.Handler interface. This http.Handler is guaranteed
|
||||
// at this point to be speaking over HTTP/2, so it's able to speak valid
|
||||
// gRPC.
|
||||
type serverHandlerTransport struct {
|
||||
rw http.ResponseWriter
|
||||
req *http.Request
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
|
||||
headerMD metadata.MD
|
||||
|
||||
closeOnce sync.Once
|
||||
closedCh chan struct{} // closed on Close
|
||||
|
||||
// writes is a channel of code to run serialized in the
|
||||
// ServeHTTP (HandleStreams) goroutine. The channel is closed
|
||||
// when WriteStatus is called.
|
||||
writes chan func()
|
||||
|
||||
// block concurrent WriteStatus calls
|
||||
// e.g. grpc/(*serverStream).SendMsg/RecvMsg
|
||||
writeStatusMu sync.Mutex
|
||||
|
||||
// we just mirror the request content-type
|
||||
contentType string
|
||||
// we store both contentType and contentSubtype so we don't keep recreating them
|
||||
// TODO make sure this is consistent across handler_server and http2_server
|
||||
contentSubtype string
|
||||
|
||||
stats []stats.Handler
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close(err error) {
|
||||
ht.closeOnce.Do(func() {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("Closing serverHandlerTransport: %v", err)
|
||||
}
|
||||
close(ht.closedCh)
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
||||
|
||||
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
||||
// the empty string if unknown.
|
||||
type strAddr string
|
||||
|
||||
func (a strAddr) Network() string {
|
||||
if a != "" {
|
||||
// Per the documentation on net/http.Request.RemoteAddr, if this is
|
||||
// set, it's set to the IP:port of the peer (hence, TCP):
|
||||
// https://golang.org/pkg/net/http/#Request
|
||||
//
|
||||
// If we want to support Unix sockets later, we can
|
||||
// add our own grpc-specific convention within the
|
||||
// grpc codebase to set RemoteAddr to a different
|
||||
// format, or probably better: we can attach it to the
|
||||
// context and use that from serverHandlerTransport.RemoteAddr.
|
||||
return "tcp"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (a strAddr) String() string { return string(a) }
|
||||
|
||||
// do runs fn in the ServeHTTP goroutine.
|
||||
func (ht *serverHandlerTransport) do(fn func()) error {
|
||||
select {
|
||||
case <-ht.closedCh:
|
||||
return ErrConnClosing
|
||||
case ht.writes <- fn:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
|
||||
ht.writeStatusMu.Lock()
|
||||
defer ht.writeStatusMu.Unlock()
|
||||
|
||||
headersWritten := s.updateHeaderSent()
|
||||
err := ht.do(func() {
|
||||
if !headersWritten {
|
||||
ht.writePendingHeaders(s)
|
||||
}
|
||||
|
||||
// And flush, in case no header or body has been sent yet.
|
||||
// This forces a separation of headers and trailers if this is the
|
||||
// first call (for example, in end2end tests's TestNoService).
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
|
||||
h := ht.rw.Header()
|
||||
h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code()))
|
||||
if m := st.Message(); m != "" {
|
||||
h.Set("Grpc-Message", encodeGrpcMessage(m))
|
||||
}
|
||||
|
||||
if p := st.Proto(); p != nil && len(p.Details) > 0 {
|
||||
stBytes, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
// TODO: return error instead, when callers are able to handle it.
|
||||
panic(err)
|
||||
}
|
||||
|
||||
h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
|
||||
}
|
||||
|
||||
if md := s.Trailer(); len(md) > 0 {
|
||||
for k, vv := range md {
|
||||
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
// http2 ResponseWriter mechanism to send undeclared Trailers after
|
||||
// the headers have possibly been written.
|
||||
h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if err == nil { // transport has not been closed
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
for _, sh := range ht.stats {
|
||||
sh.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
}
|
||||
ht.Close(errors.New("finished writing status"))
|
||||
return err
|
||||
}
|
||||
|
||||
// writePendingHeaders sets common and custom headers on the first
|
||||
// write call (Write, WriteHeader, or WriteStatus)
|
||||
func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) {
|
||||
ht.writeCommonHeaders(s)
|
||||
ht.writeCustomHeaders(s)
|
||||
}
|
||||
|
||||
// writeCommonHeaders sets common headers on the first write
|
||||
// call (Write, WriteHeader, or WriteStatus).
|
||||
func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
||||
h := ht.rw.Header()
|
||||
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||
h.Set("Content-Type", ht.contentType)
|
||||
|
||||
// Predeclare trailers we'll set later in WriteStatus (after the body).
|
||||
// This is a SHOULD in the HTTP RFC, and the way you add (known)
|
||||
// Trailers per the net/http.ResponseWriter contract.
|
||||
// See https://golang.org/pkg/net/http/#ResponseWriter
|
||||
// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
|
||||
h.Add("Trailer", "Grpc-Status")
|
||||
h.Add("Trailer", "Grpc-Message")
|
||||
h.Add("Trailer", "Grpc-Status-Details-Bin")
|
||||
|
||||
if s.sendCompress != "" {
|
||||
h.Set("Grpc-Encoding", s.sendCompress)
|
||||
}
|
||||
}
|
||||
|
||||
// writeCustomHeaders sets custom headers set on the stream via SetHeader
|
||||
// on the first write call (Write, WriteHeader, or WriteStatus).
|
||||
func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
|
||||
h := ht.rw.Header()
|
||||
|
||||
s.hdrMu.Lock()
|
||||
for k, vv := range s.header {
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
h.Add(k, encodeMetadataHeader(k, v))
|
||||
}
|
||||
}
|
||||
|
||||
s.hdrMu.Unlock()
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
||||
headersWritten := s.updateHeaderSent()
|
||||
return ht.do(func() {
|
||||
if !headersWritten {
|
||||
ht.writePendingHeaders(s)
|
||||
}
|
||||
ht.rw.Write(hdr)
|
||||
ht.rw.Write(data)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
if err := s.SetHeader(md); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headersWritten := s.updateHeaderSent()
|
||||
err := ht.do(func() {
|
||||
if !headersWritten {
|
||||
ht.writePendingHeaders(s)
|
||||
}
|
||||
|
||||
ht.rw.WriteHeader(200)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
for _, sh := range ht.stats {
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
sh.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
Header: md.Copy(),
|
||||
Compression: s.sendCompress,
|
||||
})
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
|
||||
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||
|
||||
ctx := ht.req.Context()
|
||||
var cancel context.CancelFunc
|
||||
if ht.timeoutSet {
|
||||
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
}
|
||||
|
||||
// requestOver is closed when the status has been written via WriteStatus.
|
||||
requestOver := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-requestOver:
|
||||
case <-ht.closedCh:
|
||||
case <-ht.req.Context().Done():
|
||||
}
|
||||
cancel()
|
||||
ht.Close(errors.New("request is done processing"))
|
||||
}()
|
||||
|
||||
req := ht.req
|
||||
|
||||
s := &Stream{
|
||||
id: 0, // irrelevant
|
||||
requestRead: func(int) {},
|
||||
cancel: cancel,
|
||||
buf: newRecvBuffer(),
|
||||
st: ht,
|
||||
method: req.URL.Path,
|
||||
recvCompress: req.Header.Get("grpc-encoding"),
|
||||
contentSubtype: ht.contentSubtype,
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: ht.RemoteAddr(),
|
||||
}
|
||||
if req.TLS != nil {
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
|
||||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
for _, sh := range ht.stats {
|
||||
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: ht.RemoteAddr(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.trReader = &transportReader{
|
||||
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
|
||||
windowHandler: func(int) {},
|
||||
}
|
||||
|
||||
// readerDone is closed when the Body.Read-ing goroutine exits.
|
||||
readerDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(readerDone)
|
||||
|
||||
// TODO: minimize garbage, optimize recvBuffer code/ownership
|
||||
const readSize = 8196
|
||||
for buf := make([]byte, readSize); ; {
|
||||
n, err := req.Body.Read(buf)
|
||||
if n > 0 {
|
||||
s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
|
||||
buf = buf[n:]
|
||||
}
|
||||
if err != nil {
|
||||
s.buf.put(recvMsg{err: mapRecvMsgError(err)})
|
||||
return
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
buf = make([]byte, readSize)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// startStream is provided by the *grpc.Server's serveStreams.
|
||||
// It starts a goroutine serving s and exits immediately.
|
||||
// The goroutine that is started is the one that then calls
|
||||
// into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
|
||||
startStream(s)
|
||||
|
||||
ht.runStream()
|
||||
close(requestOver)
|
||||
|
||||
// Wait for reading goroutine to finish.
|
||||
req.Body.Close()
|
||||
<-readerDone
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) runStream() {
|
||||
for {
|
||||
select {
|
||||
case fn := <-ht.writes:
|
||||
fn()
|
||||
case <-ht.closedCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) IncrMsgSent() {}
|
||||
|
||||
func (ht *serverHandlerTransport) IncrMsgRecv() {}
|
||||
|
||||
func (ht *serverHandlerTransport) Drain() {
|
||||
panic("Drain() is not implemented")
|
||||
}
|
||||
|
||||
// mapRecvMsgError returns the non-nil err into the appropriate
|
||||
// error value as expected by callers of *grpc.parser.recvMsg.
|
||||
// In particular, in can only be:
|
||||
// - io.EOF
|
||||
// - io.ErrUnexpectedEOF
|
||||
// - of type transport.ConnectionError
|
||||
// - an error from the status package
|
||||
func mapRecvMsgError(err error) error {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
return err
|
||||
}
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
if code, ok := http2ErrConvTab[se.Code]; ok {
|
||||
return status.Error(code, se.Error())
|
||||
}
|
||||
}
|
||||
if strings.Contains(err.Error(), "body closed by handler") {
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
}
|
||||
return connectionErrorf(true, err, err.Error())
|
||||
}
|
1800
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
Normal file
1800
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1461
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
Normal file
1461
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
412
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
Normal file
412
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
Normal file
@@ -0,0 +1,412 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
// http2MaxFrameLen specifies the max length of a HTTP2 frame.
|
||||
http2MaxFrameLen = 16384 // 16KB frame
|
||||
// https://httpwg.org/specs/rfc7540.html#SettingValues
|
||||
http2InitHeaderTableSize = 4096
|
||||
)
|
||||
|
||||
var (
|
||||
clientPreface = []byte(http2.ClientPreface)
|
||||
http2ErrConvTab = map[http2.ErrCode]codes.Code{
|
||||
http2.ErrCodeNo: codes.Internal,
|
||||
http2.ErrCodeProtocol: codes.Internal,
|
||||
http2.ErrCodeInternal: codes.Internal,
|
||||
http2.ErrCodeFlowControl: codes.ResourceExhausted,
|
||||
http2.ErrCodeSettingsTimeout: codes.Internal,
|
||||
http2.ErrCodeStreamClosed: codes.Internal,
|
||||
http2.ErrCodeFrameSize: codes.Internal,
|
||||
http2.ErrCodeRefusedStream: codes.Unavailable,
|
||||
http2.ErrCodeCancel: codes.Canceled,
|
||||
http2.ErrCodeCompression: codes.Internal,
|
||||
http2.ErrCodeConnect: codes.Internal,
|
||||
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
||||
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||
http2.ErrCodeHTTP11Required: codes.Internal,
|
||||
}
|
||||
// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
|
||||
HTTPStatusConvTab = map[int]codes.Code{
|
||||
// 400 Bad Request - INTERNAL.
|
||||
http.StatusBadRequest: codes.Internal,
|
||||
// 401 Unauthorized - UNAUTHENTICATED.
|
||||
http.StatusUnauthorized: codes.Unauthenticated,
|
||||
// 403 Forbidden - PERMISSION_DENIED.
|
||||
http.StatusForbidden: codes.PermissionDenied,
|
||||
// 404 Not Found - UNIMPLEMENTED.
|
||||
http.StatusNotFound: codes.Unimplemented,
|
||||
// 429 Too Many Requests - UNAVAILABLE.
|
||||
http.StatusTooManyRequests: codes.Unavailable,
|
||||
// 502 Bad Gateway - UNAVAILABLE.
|
||||
http.StatusBadGateway: codes.Unavailable,
|
||||
// 503 Service Unavailable - UNAVAILABLE.
|
||||
http.StatusServiceUnavailable: codes.Unavailable,
|
||||
// 504 Gateway timeout - UNAVAILABLE.
|
||||
http.StatusGatewayTimeout: codes.Unavailable,
|
||||
}
|
||||
logger = grpclog.Component("transport")
|
||||
)
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
// reserved by gRPC protocol. Any other headers are classified as the
|
||||
// user-specified metadata.
|
||||
func isReservedHeader(hdr string) bool {
|
||||
if hdr != "" && hdr[0] == ':' {
|
||||
return true
|
||||
}
|
||||
switch hdr {
|
||||
case "content-type",
|
||||
"user-agent",
|
||||
"grpc-message-type",
|
||||
"grpc-encoding",
|
||||
"grpc-message",
|
||||
"grpc-status",
|
||||
"grpc-timeout",
|
||||
"grpc-status-details-bin",
|
||||
// Intentionally exclude grpc-previous-rpc-attempts and
|
||||
// grpc-retry-pushback-ms, which are "reserved", but their API
|
||||
// intentionally works via metadata.
|
||||
"te":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitelistedHeader checks whether hdr should be propagated into metadata
|
||||
// visible to users, even though it is classified as "reserved", above.
|
||||
func isWhitelistedHeader(hdr string) bool {
|
||||
switch hdr {
|
||||
case ":authority", "user-agent":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
const binHdrSuffix = "-bin"
|
||||
|
||||
func encodeBinHeader(v []byte) string {
|
||||
return base64.RawStdEncoding.EncodeToString(v)
|
||||
}
|
||||
|
||||
func decodeBinHeader(v string) ([]byte, error) {
|
||||
if len(v)%4 == 0 {
|
||||
// Input was padded, or padding was not necessary.
|
||||
return base64.StdEncoding.DecodeString(v)
|
||||
}
|
||||
return base64.RawStdEncoding.DecodeString(v)
|
||||
}
|
||||
|
||||
func encodeMetadataHeader(k, v string) string {
|
||||
if strings.HasSuffix(k, binHdrSuffix) {
|
||||
return encodeBinHeader(([]byte)(v))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func decodeMetadataHeader(k, v string) (string, error) {
|
||||
if strings.HasSuffix(k, binHdrSuffix) {
|
||||
b, err := decodeBinHeader(v)
|
||||
return string(b), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) {
|
||||
v, err := decodeBinHeader(rawDetails)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st := &spb.Status{}
|
||||
if err = proto.Unmarshal(v, st); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return status.FromProto(st), nil
|
||||
}
|
||||
|
||||
type timeoutUnit uint8
|
||||
|
||||
const (
|
||||
hour timeoutUnit = 'H'
|
||||
minute timeoutUnit = 'M'
|
||||
second timeoutUnit = 'S'
|
||||
millisecond timeoutUnit = 'm'
|
||||
microsecond timeoutUnit = 'u'
|
||||
nanosecond timeoutUnit = 'n'
|
||||
)
|
||||
|
||||
func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
|
||||
switch u {
|
||||
case hour:
|
||||
return time.Hour, true
|
||||
case minute:
|
||||
return time.Minute, true
|
||||
case second:
|
||||
return time.Second, true
|
||||
case millisecond:
|
||||
return time.Millisecond, true
|
||||
case microsecond:
|
||||
return time.Microsecond, true
|
||||
case nanosecond:
|
||||
return time.Nanosecond, true
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func decodeTimeout(s string) (time.Duration, error) {
|
||||
size := len(s)
|
||||
if size < 2 {
|
||||
return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
|
||||
}
|
||||
if size > 9 {
|
||||
// Spec allows for 8 digits plus the unit.
|
||||
return 0, fmt.Errorf("transport: timeout string is too long: %q", s)
|
||||
}
|
||||
unit := timeoutUnit(s[size-1])
|
||||
d, ok := timeoutUnitToDuration(unit)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
|
||||
}
|
||||
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
const maxHours = math.MaxInt64 / int64(time.Hour)
|
||||
if d == time.Hour && t > maxHours {
|
||||
// This timeout would overflow math.MaxInt64; clamp it.
|
||||
return time.Duration(math.MaxInt64), nil
|
||||
}
|
||||
return d * time.Duration(t), nil
|
||||
}
|
||||
|
||||
const (
|
||||
spaceByte = ' '
|
||||
tildeByte = '~'
|
||||
percentByte = '%'
|
||||
)
|
||||
|
||||
// encodeGrpcMessage is used to encode status code in header field
|
||||
// "grpc-message". It does percent encoding and also replaces invalid utf-8
|
||||
// characters with Unicode replacement character.
|
||||
//
|
||||
// It checks to see if each individual byte in msg is an allowable byte, and
|
||||
// then either percent encoding or passing it through. When percent encoding,
|
||||
// the byte is converted into hexadecimal notation with a '%' prepended.
|
||||
func encodeGrpcMessage(msg string) string {
|
||||
if msg == "" {
|
||||
return ""
|
||||
}
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
|
||||
return encodeGrpcMessageUnchecked(msg)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func encodeGrpcMessageUnchecked(msg string) string {
|
||||
var sb strings.Builder
|
||||
for len(msg) > 0 {
|
||||
r, size := utf8.DecodeRuneInString(msg)
|
||||
for _, b := range []byte(string(r)) {
|
||||
if size > 1 {
|
||||
// If size > 1, r is not ascii. Always do percent encoding.
|
||||
fmt.Fprintf(&sb, "%%%02X", b)
|
||||
continue
|
||||
}
|
||||
|
||||
// The for loop is necessary even if size == 1. r could be
|
||||
// utf8.RuneError.
|
||||
//
|
||||
// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
|
||||
if b >= spaceByte && b <= tildeByte && b != percentByte {
|
||||
sb.WriteByte(b)
|
||||
} else {
|
||||
fmt.Fprintf(&sb, "%%%02X", b)
|
||||
}
|
||||
}
|
||||
msg = msg[size:]
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
|
||||
func decodeGrpcMessage(msg string) string {
|
||||
if msg == "" {
|
||||
return ""
|
||||
}
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
if msg[i] == percentByte && i+2 < lenMsg {
|
||||
return decodeGrpcMessageUnchecked(msg)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func decodeGrpcMessageUnchecked(msg string) string {
|
||||
var sb strings.Builder
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if c == percentByte && i+2 < lenMsg {
|
||||
parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
|
||||
if err != nil {
|
||||
sb.WriteByte(c)
|
||||
} else {
|
||||
sb.WriteByte(byte(parsed))
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
sb.WriteByte(c)
|
||||
}
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
type bufWriter struct {
|
||||
buf []byte
|
||||
offset int
|
||||
batchSize int
|
||||
conn net.Conn
|
||||
err error
|
||||
}
|
||||
|
||||
func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
|
||||
return &bufWriter{
|
||||
buf: make([]byte, batchSize*2),
|
||||
batchSize: batchSize,
|
||||
conn: conn,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
if w.batchSize == 0 { // Buffer has been disabled.
|
||||
return w.conn.Write(b)
|
||||
}
|
||||
for len(b) > 0 {
|
||||
nn := copy(w.buf[w.offset:], b)
|
||||
b = b[nn:]
|
||||
w.offset += nn
|
||||
n += nn
|
||||
if w.offset >= w.batchSize {
|
||||
err = w.Flush()
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *bufWriter) Flush() error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
if w.offset == 0 {
|
||||
return nil
|
||||
}
|
||||
_, w.err = w.conn.Write(w.buf[:w.offset])
|
||||
w.offset = 0
|
||||
return w.err
|
||||
}
|
||||
|
||||
type framer struct {
|
||||
writer *bufWriter
|
||||
fr *http2.Framer
|
||||
}
|
||||
|
||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
|
||||
if writeBufferSize < 0 {
|
||||
writeBufferSize = 0
|
||||
}
|
||||
var r io.Reader = conn
|
||||
if readBufferSize > 0 {
|
||||
r = bufio.NewReaderSize(r, readBufferSize)
|
||||
}
|
||||
w := newBufWriter(conn, writeBufferSize)
|
||||
f := &framer{
|
||||
writer: w,
|
||||
fr: http2.NewFramer(w, r),
|
||||
}
|
||||
f.fr.SetMaxReadFrameSize(http2MaxFrameLen)
|
||||
// Opt-in to Frame reuse API on framer to reduce garbage.
|
||||
// Frames aren't safe to read from after a subsequent call to ReadFrame.
|
||||
f.fr.SetReuseFrames()
|
||||
f.fr.MaxHeaderListSize = maxHeaderListSize
|
||||
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
||||
return f
|
||||
}
|
||||
|
||||
// parseDialTarget returns the network and address to pass to dialer.
|
||||
func parseDialTarget(target string) (string, string) {
|
||||
net := "tcp"
|
||||
m1 := strings.Index(target, ":")
|
||||
m2 := strings.Index(target, ":/")
|
||||
// handle unix:addr which will fail with url.Parse
|
||||
if m1 >= 0 && m2 < 0 {
|
||||
if n := target[0:m1]; n == "unix" {
|
||||
return n, target[m1+1:]
|
||||
}
|
||||
}
|
||||
if m2 >= 0 {
|
||||
t, err := url.Parse(target)
|
||||
if err != nil {
|
||||
return net, target
|
||||
}
|
||||
scheme := t.Scheme
|
||||
addr := t.Path
|
||||
if scheme == "unix" {
|
||||
if addr == "" {
|
||||
addr = t.Host
|
||||
}
|
||||
return scheme, addr
|
||||
}
|
||||
}
|
||||
return net, target
|
||||
}
|
46
vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
generated
vendored
Normal file
46
vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package networktype declares the network type to be used in the default
|
||||
// dialer. Attribute of a resolver.Address.
|
||||
package networktype
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// keyType is the key to use for storing State in Attributes.
|
||||
type keyType string
|
||||
|
||||
const key = keyType("grpc.internal.transport.networktype")
|
||||
|
||||
// Set returns a copy of the provided address with attributes containing networkType.
|
||||
func Set(address resolver.Address, networkType string) resolver.Address {
|
||||
address.Attributes = address.Attributes.WithValue(key, networkType)
|
||||
return address
|
||||
}
|
||||
|
||||
// Get returns the network type in the resolver.Address and true, or "", false
|
||||
// if not present.
|
||||
func Get(address resolver.Address) (string, bool) {
|
||||
v := address.Attributes.Value(key)
|
||||
if v == nil {
|
||||
return "", false
|
||||
}
|
||||
return v.(string), true
|
||||
}
|
142
vendor/google.golang.org/grpc/internal/transport/proxy.go
generated
vendored
Normal file
142
vendor/google.golang.org/grpc/internal/transport/proxy.go
generated
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const proxyAuthHeaderKey = "Proxy-Authorization"
|
||||
|
||||
var (
|
||||
// The following variable will be overwritten in the tests.
|
||||
httpProxyFromEnvironment = http.ProxyFromEnvironment
|
||||
)
|
||||
|
||||
func mapAddress(address string) (*url.URL, error) {
|
||||
req := &http.Request{
|
||||
URL: &url.URL{
|
||||
Scheme: "https",
|
||||
Host: address,
|
||||
},
|
||||
}
|
||||
url, err := httpProxyFromEnvironment(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return url, nil
|
||||
}
|
||||
|
||||
// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
|
||||
// It's possible that this reader reads more than what's need for the response and stores
|
||||
// those bytes in the buffer.
|
||||
// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
|
||||
// bytes in the buffer.
|
||||
type bufConn struct {
|
||||
net.Conn
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (c *bufConn) Read(b []byte) (int, error) {
|
||||
return c.r.Read(b)
|
||||
}
|
||||
|
||||
func basicAuth(username, password string) string {
|
||||
auth := username + ":" + password
|
||||
return base64.StdEncoding.EncodeToString([]byte(auth))
|
||||
}
|
||||
|
||||
func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
req := &http.Request{
|
||||
Method: http.MethodConnect,
|
||||
URL: &url.URL{Host: backendAddr},
|
||||
Header: map[string][]string{"User-Agent": {grpcUA}},
|
||||
}
|
||||
if t := proxyURL.User; t != nil {
|
||||
u := t.Username()
|
||||
p, _ := t.Password()
|
||||
req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
|
||||
}
|
||||
|
||||
if err := sendHTTPRequest(ctx, req, conn); err != nil {
|
||||
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||
}
|
||||
|
||||
r := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(r, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading server HTTP response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
dump, err := httputil.DumpResponse(resp, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
|
||||
}
|
||||
|
||||
return &bufConn{Conn: conn, r: r}, nil
|
||||
}
|
||||
|
||||
// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
|
||||
// is necessary, dials, does the HTTP CONNECT handshake, and returns the
|
||||
// connection.
|
||||
func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
|
||||
newAddr := addr
|
||||
proxyURL, err := mapAddress(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
newAddr = proxyURL.Host
|
||||
}
|
||||
|
||||
conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if proxyURL != nil {
|
||||
// proxy is disabled if proxyURL is nil.
|
||||
conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
|
||||
req = req.WithContext(ctx)
|
||||
if err := req.Write(conn); err != nil {
|
||||
return fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
823
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
Normal file
823
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
Normal file
@@ -0,0 +1,823 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package transport defines and implements message oriented communication
|
||||
// channel to complete various transactions (e.g., an RPC). It is meant for
|
||||
// grpc-internal usage and is not intended to be imported directly by users.
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/tap"
|
||||
)
|
||||
|
||||
// ErrNoHeaders is used as a signal that a trailers only response was received,
|
||||
// and is not a real error.
|
||||
var ErrNoHeaders = errors.New("stream has no headers")
|
||||
|
||||
const logLevel = 2
|
||||
|
||||
type bufferPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func newBufferPool() *bufferPool {
|
||||
return &bufferPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *bufferPool) get() *bytes.Buffer {
|
||||
return p.pool.Get().(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func (p *bufferPool) put(b *bytes.Buffer) {
|
||||
p.pool.Put(b)
|
||||
}
|
||||
|
||||
// recvMsg represents the received msg from the transport. All transport
|
||||
// protocol specific info has been removed.
|
||||
type recvMsg struct {
|
||||
buffer *bytes.Buffer
|
||||
// nil: received some data
|
||||
// io.EOF: stream is completed. data is nil.
|
||||
// other non-nil error: transport failure. data is nil.
|
||||
err error
|
||||
}
|
||||
|
||||
// recvBuffer is an unbounded channel of recvMsg structs.
|
||||
//
|
||||
// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
|
||||
// holds a channel of recvMsg structs instead of objects implementing "item"
|
||||
// interface. recvBuffer is written to much more often and using strict recvMsg
|
||||
// structs helps avoid allocation in "recvBuffer.put"
|
||||
type recvBuffer struct {
|
||||
c chan recvMsg
|
||||
mu sync.Mutex
|
||||
backlog []recvMsg
|
||||
err error
|
||||
}
|
||||
|
||||
func newRecvBuffer() *recvBuffer {
|
||||
b := &recvBuffer{
|
||||
c: make(chan recvMsg, 1),
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *recvBuffer) put(r recvMsg) {
|
||||
b.mu.Lock()
|
||||
if b.err != nil {
|
||||
b.mu.Unlock()
|
||||
// An error had occurred earlier, don't accept more
|
||||
// data or errors.
|
||||
return
|
||||
}
|
||||
b.err = r.err
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- r:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, r)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *recvBuffer) load() {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = recvMsg{}
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// get returns the channel that receives a recvMsg in the buffer.
|
||||
//
|
||||
// Upon receipt of a recvMsg, the caller should call load to send another
|
||||
// recvMsg onto the channel if there is any.
|
||||
func (b *recvBuffer) get() <-chan recvMsg {
|
||||
return b.c
|
||||
}
|
||||
|
||||
// recvBufferReader implements io.Reader interface to read the data from
|
||||
// recvBuffer.
|
||||
type recvBufferReader struct {
|
||||
closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
|
||||
ctx context.Context
|
||||
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
|
||||
recv *recvBuffer
|
||||
last *bytes.Buffer // Stores the remaining data in the previous calls.
|
||||
err error
|
||||
freeBuffer func(*bytes.Buffer)
|
||||
}
|
||||
|
||||
// Read reads the next len(p) bytes from last. If last is drained, it tries to
|
||||
// read additional data from recv. It blocks if there no additional data available
|
||||
// in recv. If Read returns any non-nil error, it will continue to return that error.
|
||||
func (r *recvBufferReader) Read(p []byte) (n int, err error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
if r.last != nil {
|
||||
// Read remaining data left in last call.
|
||||
copied, _ := r.last.Read(p)
|
||||
if r.last.Len() == 0 {
|
||||
r.freeBuffer(r.last)
|
||||
r.last = nil
|
||||
}
|
||||
return copied, nil
|
||||
}
|
||||
if r.closeStream != nil {
|
||||
n, r.err = r.readClient(p)
|
||||
} else {
|
||||
n, r.err = r.read(p)
|
||||
}
|
||||
return n, r.err
|
||||
}
|
||||
|
||||
func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
||||
select {
|
||||
case <-r.ctxDone:
|
||||
return 0, ContextErr(r.ctx.Err())
|
||||
case m := <-r.recv.get():
|
||||
return r.readAdditional(m, p)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
|
||||
// If the context is canceled, then closes the stream with nil metadata.
|
||||
// closeStream writes its error parameter to r.recv as a recvMsg.
|
||||
// r.readAdditional acts on that message and returns the necessary error.
|
||||
select {
|
||||
case <-r.ctxDone:
|
||||
// Note that this adds the ctx error to the end of recv buffer, and
|
||||
// reads from the head. This will delay the error until recv buffer is
|
||||
// empty, thus will delay ctx cancellation in Recv().
|
||||
//
|
||||
// It's done this way to fix a race between ctx cancel and trailer. The
|
||||
// race was, stream.Recv() may return ctx error if ctxDone wins the
|
||||
// race, but stream.Trailer() may return a non-nil md because the stream
|
||||
// was not marked as done when trailer is received. This closeStream
|
||||
// call will mark stream as done, thus fix the race.
|
||||
//
|
||||
// TODO: delaying ctx error seems like a unnecessary side effect. What
|
||||
// we really want is to mark the stream as done, and return ctx error
|
||||
// faster.
|
||||
r.closeStream(ContextErr(r.ctx.Err()))
|
||||
m := <-r.recv.get()
|
||||
return r.readAdditional(m, p)
|
||||
case m := <-r.recv.get():
|
||||
return r.readAdditional(m, p)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
|
||||
r.recv.load()
|
||||
if m.err != nil {
|
||||
return 0, m.err
|
||||
}
|
||||
copied, _ := m.buffer.Read(p)
|
||||
if m.buffer.Len() == 0 {
|
||||
r.freeBuffer(m.buffer)
|
||||
r.last = nil
|
||||
} else {
|
||||
r.last = m.buffer
|
||||
}
|
||||
return copied, nil
|
||||
}
|
||||
|
||||
type streamState uint32
|
||||
|
||||
const (
|
||||
streamActive streamState = iota
|
||||
streamWriteDone // EndStream sent
|
||||
streamReadDone // EndStream received
|
||||
streamDone // the entire stream is finished.
|
||||
)
|
||||
|
||||
// Stream represents an RPC in the transport layer.
|
||||
type Stream struct {
|
||||
id uint32
|
||||
st ServerTransport // nil for client side Stream
|
||||
ct *http2Client // nil for server side Stream
|
||||
ctx context.Context // the associated context of the stream
|
||||
cancel context.CancelFunc // always nil for client side Stream
|
||||
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
|
||||
doneFunc func() // invoked at the end of stream on client side.
|
||||
ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
|
||||
method string // the associated RPC method of the stream
|
||||
recvCompress string
|
||||
sendCompress string
|
||||
buf *recvBuffer
|
||||
trReader io.Reader
|
||||
fc *inFlow
|
||||
wq *writeQuota
|
||||
|
||||
// Callback to state application's intentions to read data. This
|
||||
// is used to adjust flow control, if needed.
|
||||
requestRead func(int)
|
||||
|
||||
headerChan chan struct{} // closed to indicate the end of header metadata.
|
||||
headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
||||
// headerValid indicates whether a valid header was received. Only
|
||||
// meaningful after headerChan is closed (always call waitOnHeader() before
|
||||
// reading its value). Not valid on server side.
|
||||
headerValid bool
|
||||
|
||||
// hdrMu protects header and trailer metadata on the server-side.
|
||||
hdrMu sync.Mutex
|
||||
// On client side, header keeps the received header metadata.
|
||||
//
|
||||
// On server side, header keeps the header set by SetHeader(). The complete
|
||||
// header will merged into this after t.WriteHeader() is called.
|
||||
header metadata.MD
|
||||
trailer metadata.MD // the key-value map of trailer metadata.
|
||||
|
||||
noHeaders bool // set if the client never received headers (set only after the stream is done).
|
||||
|
||||
// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
|
||||
headerSent uint32
|
||||
|
||||
state streamState
|
||||
|
||||
// On client-side it is the status error received from the server.
|
||||
// On server-side it is unused.
|
||||
status *status.Status
|
||||
|
||||
bytesReceived uint32 // indicates whether any bytes have been received on this stream
|
||||
unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
|
||||
|
||||
// contentSubtype is the content-subtype for requests.
|
||||
// this must be lowercase or the behavior is undefined.
|
||||
contentSubtype string
|
||||
}
|
||||
|
||||
// isHeaderSent is only valid on the server-side.
|
||||
func (s *Stream) isHeaderSent() bool {
|
||||
return atomic.LoadUint32(&s.headerSent) == 1
|
||||
}
|
||||
|
||||
// updateHeaderSent updates headerSent and returns true
|
||||
// if it was alreay set. It is valid only on server-side.
|
||||
func (s *Stream) updateHeaderSent() bool {
|
||||
return atomic.SwapUint32(&s.headerSent, 1) == 1
|
||||
}
|
||||
|
||||
func (s *Stream) swapState(st streamState) streamState {
|
||||
return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
|
||||
}
|
||||
|
||||
func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
|
||||
return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
|
||||
}
|
||||
|
||||
func (s *Stream) getState() streamState {
|
||||
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
|
||||
}
|
||||
|
||||
func (s *Stream) waitOnHeader() {
|
||||
if s.headerChan == nil {
|
||||
// On the server headerChan is always nil since a stream originates
|
||||
// only after having received headers.
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
// Close the stream to prevent headers/trailers from changing after
|
||||
// this function returns.
|
||||
s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
|
||||
// headerChan could possibly not be closed yet if closeStream raced
|
||||
// with operateHeaders; wait until it is closed explicitly here.
|
||||
<-s.headerChan
|
||||
case <-s.headerChan:
|
||||
}
|
||||
}
|
||||
|
||||
// RecvCompress returns the compression algorithm applied to the inbound
|
||||
// message. It is empty string if there is no compression applied.
|
||||
func (s *Stream) RecvCompress() string {
|
||||
s.waitOnHeader()
|
||||
return s.recvCompress
|
||||
}
|
||||
|
||||
// SetSendCompress sets the compression algorithm to the stream.
|
||||
func (s *Stream) SetSendCompress(str string) {
|
||||
s.sendCompress = str
|
||||
}
|
||||
|
||||
// Done returns a channel which is closed when it receives the final status
|
||||
// from the server.
|
||||
func (s *Stream) Done() <-chan struct{} {
|
||||
return s.done
|
||||
}
|
||||
|
||||
// Header returns the header metadata of the stream.
|
||||
//
|
||||
// On client side, it acquires the key-value pairs of header metadata once it is
|
||||
// available. It blocks until i) the metadata is ready or ii) there is no header
|
||||
// metadata or iii) the stream is canceled/expired.
|
||||
//
|
||||
// On server side, it returns the out header after t.WriteHeader is called. It
|
||||
// does not block and must not be called until after WriteHeader.
|
||||
func (s *Stream) Header() (metadata.MD, error) {
|
||||
if s.headerChan == nil {
|
||||
// On server side, return the header in stream. It will be the out
|
||||
// header after t.WriteHeader is called.
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
s.waitOnHeader()
|
||||
|
||||
if !s.headerValid {
|
||||
return nil, s.status.Err()
|
||||
}
|
||||
|
||||
if s.noHeaders {
|
||||
return nil, ErrNoHeaders
|
||||
}
|
||||
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
|
||||
// TrailersOnly blocks until a header or trailers-only frame is received and
|
||||
// then returns true if the stream was trailers-only. If the stream ends
|
||||
// before headers are received, returns true, nil. Client-side only.
|
||||
func (s *Stream) TrailersOnly() bool {
|
||||
s.waitOnHeader()
|
||||
return s.noHeaders
|
||||
}
|
||||
|
||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||
// after the entire stream is done, it could return an empty MD. Client
|
||||
// side only.
|
||||
// It can be safely read only after stream has ended that is either read
|
||||
// or write have returned io.EOF.
|
||||
func (s *Stream) Trailer() metadata.MD {
|
||||
c := s.trailer.Copy()
|
||||
return c
|
||||
}
|
||||
|
||||
// ContentSubtype returns the content-subtype for a request. For example, a
|
||||
// content-subtype of "proto" will result in a content-type of
|
||||
// "application/grpc+proto". This will always be lowercase. See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
func (s *Stream) ContentSubtype() string {
|
||||
return s.contentSubtype
|
||||
}
|
||||
|
||||
// Context returns the context of the stream.
|
||||
func (s *Stream) Context() context.Context {
|
||||
return s.ctx
|
||||
}
|
||||
|
||||
// Method returns the method for the stream.
|
||||
func (s *Stream) Method() string {
|
||||
return s.method
|
||||
}
|
||||
|
||||
// Status returns the status received from the server.
|
||||
// Status can be read safely only after the stream has ended,
|
||||
// that is, after Done() is closed.
|
||||
func (s *Stream) Status() *status.Status {
|
||||
return s.status
|
||||
}
|
||||
|
||||
// SetHeader sets the header metadata. This can be called multiple times.
|
||||
// Server side only.
|
||||
// This should not be called in parallel to other data writes.
|
||||
func (s *Stream) SetHeader(md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
if s.isHeaderSent() || s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
s.hdrMu.Lock()
|
||||
s.header = metadata.Join(s.header, md)
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendHeader sends the given header metadata. The given metadata is
|
||||
// combined with any metadata set by previous calls to SetHeader and
|
||||
// then written to the transport stream.
|
||||
func (s *Stream) SendHeader(md metadata.MD) error {
|
||||
return s.st.WriteHeader(s, md)
|
||||
}
|
||||
|
||||
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
||||
// by the server. This can be called multiple times. Server side only.
|
||||
// This should not be called parallel to other data writes.
|
||||
func (s *Stream) SetTrailer(md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
if s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
s.hdrMu.Lock()
|
||||
s.trailer = metadata.Join(s.trailer, md)
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Stream) write(m recvMsg) {
|
||||
s.buf.put(m)
|
||||
}
|
||||
|
||||
// Read reads all p bytes from the wire for this stream.
|
||||
func (s *Stream) Read(p []byte) (n int, err error) {
|
||||
// Don't request a read if there was an error earlier
|
||||
if er := s.trReader.(*transportReader).er; er != nil {
|
||||
return 0, er
|
||||
}
|
||||
s.requestRead(len(p))
|
||||
return io.ReadFull(s.trReader, p)
|
||||
}
|
||||
|
||||
// tranportReader reads all the data available for this Stream from the transport and
|
||||
// passes them into the decoder, which converts them into a gRPC message stream.
|
||||
// The error is io.EOF when the stream is done or another non-nil error if
|
||||
// the stream broke.
|
||||
type transportReader struct {
|
||||
reader io.Reader
|
||||
// The handler to control the window update procedure for both this
|
||||
// particular stream and the associated transport.
|
||||
windowHandler func(int)
|
||||
er error
|
||||
}
|
||||
|
||||
func (t *transportReader) Read(p []byte) (n int, err error) {
|
||||
n, err = t.reader.Read(p)
|
||||
if err != nil {
|
||||
t.er = err
|
||||
return
|
||||
}
|
||||
t.windowHandler(n)
|
||||
return
|
||||
}
|
||||
|
||||
// BytesReceived indicates whether any bytes have been received on this stream.
|
||||
func (s *Stream) BytesReceived() bool {
|
||||
return atomic.LoadUint32(&s.bytesReceived) == 1
|
||||
}
|
||||
|
||||
// Unprocessed indicates whether the server did not process this stream --
|
||||
// i.e. it sent a refused stream or GOAWAY including this stream ID.
|
||||
func (s *Stream) Unprocessed() bool {
|
||||
return atomic.LoadUint32(&s.unprocessed) == 1
|
||||
}
|
||||
|
||||
// GoString is implemented by Stream so context.String() won't
|
||||
// race when printing %#v.
|
||||
func (s *Stream) GoString() string {
|
||||
return fmt.Sprintf("<stream: %p, %v>", s, s.method)
|
||||
}
|
||||
|
||||
// state of transport
|
||||
type transportState int
|
||||
|
||||
const (
|
||||
reachable transportState = iota
|
||||
closing
|
||||
draining
|
||||
)
|
||||
|
||||
// ServerConfig consists of all the configurations to establish a server transport.
|
||||
type ServerConfig struct {
|
||||
MaxStreams uint32
|
||||
ConnectionTimeout time.Duration
|
||||
Credentials credentials.TransportCredentials
|
||||
InTapHandle tap.ServerInHandle
|
||||
StatsHandlers []stats.Handler
|
||||
KeepaliveParams keepalive.ServerParameters
|
||||
KeepalivePolicy keepalive.EnforcementPolicy
|
||||
InitialWindowSize int32
|
||||
InitialConnWindowSize int32
|
||||
WriteBufferSize int
|
||||
ReadBufferSize int
|
||||
ChannelzParentID *channelz.Identifier
|
||||
MaxHeaderListSize *uint32
|
||||
HeaderTableSize *uint32
|
||||
}
|
||||
|
||||
// ConnectOptions covers all relevant options for communicating with the server.
|
||||
type ConnectOptions struct {
|
||||
// UserAgent is the application user agent.
|
||||
UserAgent string
|
||||
// Dialer specifies how to dial a network address.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
|
||||
FailOnNonTempDialError bool
|
||||
// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
|
||||
PerRPCCredentials []credentials.PerRPCCredentials
|
||||
// TransportCredentials stores the Authenticator required to setup a client
|
||||
// connection. Only one of TransportCredentials and CredsBundle is non-nil.
|
||||
TransportCredentials credentials.TransportCredentials
|
||||
// CredsBundle is the credentials bundle to be used. Only one of
|
||||
// TransportCredentials and CredsBundle is non-nil.
|
||||
CredsBundle credentials.Bundle
|
||||
// KeepaliveParams stores the keepalive parameters.
|
||||
KeepaliveParams keepalive.ClientParameters
|
||||
// StatsHandlers stores the handler for stats.
|
||||
StatsHandlers []stats.Handler
|
||||
// InitialWindowSize sets the initial window size for a stream.
|
||||
InitialWindowSize int32
|
||||
// InitialConnWindowSize sets the initial window size for a connection.
|
||||
InitialConnWindowSize int32
|
||||
// WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire.
|
||||
WriteBufferSize int
|
||||
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
||||
ReadBufferSize int
|
||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||
ChannelzParentID *channelz.Identifier
|
||||
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
||||
MaxHeaderListSize *uint32
|
||||
// UseProxy specifies if a proxy should be used.
|
||||
UseProxy bool
|
||||
}
|
||||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
// transmission.
|
||||
type Options struct {
|
||||
// Last indicates whether this write is the last piece for
|
||||
// this stream.
|
||||
Last bool
|
||||
}
|
||||
|
||||
// CallHdr carries the information of a particular RPC.
|
||||
type CallHdr struct {
|
||||
// Host specifies the peer's host.
|
||||
Host string
|
||||
|
||||
// Method specifies the operation to perform.
|
||||
Method string
|
||||
|
||||
// SendCompress specifies the compression algorithm applied on
|
||||
// outbound message.
|
||||
SendCompress string
|
||||
|
||||
// Creds specifies credentials.PerRPCCredentials for a call.
|
||||
Creds credentials.PerRPCCredentials
|
||||
|
||||
// ContentSubtype specifies the content-subtype for a request. For example, a
|
||||
// content-subtype of "proto" will result in a content-type of
|
||||
// "application/grpc+proto". The value of ContentSubtype must be all
|
||||
// lowercase, otherwise the behavior is undefined. See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
ContentSubtype string
|
||||
|
||||
PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
|
||||
|
||||
DoneFunc func() // called when the stream is finished
|
||||
}
|
||||
|
||||
// ClientTransport is the common interface for all gRPC client-side transport
|
||||
// implementations.
|
||||
type ClientTransport interface {
|
||||
// Close tears down this transport. Once it returns, the transport
|
||||
// should not be accessed any more. The caller must make sure this
|
||||
// is called only once.
|
||||
Close(err error)
|
||||
|
||||
// GracefulClose starts to tear down the transport: the transport will stop
|
||||
// accepting new RPCs and NewStream will return error. Once all streams are
|
||||
// finished, the transport will close.
|
||||
//
|
||||
// It does not block.
|
||||
GracefulClose()
|
||||
|
||||
// Write sends the data for the given stream. A nil stream indicates
|
||||
// the write is to be performed on the transport as a whole.
|
||||
Write(s *Stream, hdr []byte, data []byte, opts *Options) error
|
||||
|
||||
// NewStream creates a Stream for an RPC.
|
||||
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
|
||||
|
||||
// CloseStream clears the footprint of a stream when the stream is
|
||||
// not needed any more. The err indicates the error incurred when
|
||||
// CloseStream is called. Must be called when a stream is finished
|
||||
// unless the associated transport is closing.
|
||||
CloseStream(stream *Stream, err error)
|
||||
|
||||
// Error returns a channel that is closed when some I/O error
|
||||
// happens. Typically the caller should have a goroutine to monitor
|
||||
// this in order to take action (e.g., close the current transport
|
||||
// and create a new one) in error case. It should not return nil
|
||||
// once the transport is initiated.
|
||||
Error() <-chan struct{}
|
||||
|
||||
// GoAway returns a channel that is closed when ClientTransport
|
||||
// receives the draining signal from the server (e.g., GOAWAY frame in
|
||||
// HTTP/2).
|
||||
GoAway() <-chan struct{}
|
||||
|
||||
// GetGoAwayReason returns the reason why GoAway frame was received, along
|
||||
// with a human readable string with debug info.
|
||||
GetGoAwayReason() (GoAwayReason, string)
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
RemoteAddr() net.Addr
|
||||
|
||||
// IncrMsgSent increments the number of message sent through this transport.
|
||||
IncrMsgSent()
|
||||
|
||||
// IncrMsgRecv increments the number of message received through this transport.
|
||||
IncrMsgRecv()
|
||||
}
|
||||
|
||||
// ServerTransport is the common interface for all gRPC server-side transport
|
||||
// implementations.
|
||||
//
|
||||
// Methods may be called concurrently from multiple goroutines, but
|
||||
// Write methods for a given Stream will be called serially.
|
||||
type ServerTransport interface {
|
||||
// HandleStreams receives incoming streams using the given handler.
|
||||
HandleStreams(func(*Stream), func(context.Context, string) context.Context)
|
||||
|
||||
// WriteHeader sends the header metadata for the given stream.
|
||||
// WriteHeader may not be called on all streams.
|
||||
WriteHeader(s *Stream, md metadata.MD) error
|
||||
|
||||
// Write sends the data for the given stream.
|
||||
// Write may not be called on all streams.
|
||||
Write(s *Stream, hdr []byte, data []byte, opts *Options) error
|
||||
|
||||
// WriteStatus sends the status of a stream to the client. WriteStatus is
|
||||
// the final call made on a stream and always occurs.
|
||||
WriteStatus(s *Stream, st *status.Status) error
|
||||
|
||||
// Close tears down the transport. Once it is called, the transport
|
||||
// should not be accessed any more. All the pending streams and their
|
||||
// handlers will be terminated asynchronously.
|
||||
Close(err error)
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
RemoteAddr() net.Addr
|
||||
|
||||
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
||||
Drain()
|
||||
|
||||
// IncrMsgSent increments the number of message sent through this transport.
|
||||
IncrMsgSent()
|
||||
|
||||
// IncrMsgRecv increments the number of message received through this transport.
|
||||
IncrMsgRecv()
|
||||
}
|
||||
|
||||
// connectionErrorf creates an ConnectionError with the specified error description.
|
||||
func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
|
||||
return ConnectionError{
|
||||
Desc: fmt.Sprintf(format, a...),
|
||||
temp: temp,
|
||||
err: e,
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectionError is an error that results in the termination of the
|
||||
// entire connection and the retry of all the active streams.
|
||||
type ConnectionError struct {
|
||||
Desc string
|
||||
temp bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (e ConnectionError) Error() string {
|
||||
return fmt.Sprintf("connection error: desc = %q", e.Desc)
|
||||
}
|
||||
|
||||
// Temporary indicates if this connection error is temporary or fatal.
|
||||
func (e ConnectionError) Temporary() bool {
|
||||
return e.temp
|
||||
}
|
||||
|
||||
// Origin returns the original error of this connection error.
|
||||
func (e ConnectionError) Origin() error {
|
||||
// Never return nil error here.
|
||||
// If the original error is nil, return itself.
|
||||
if e.err == nil {
|
||||
return e
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Unwrap returns the original error of this connection error or nil when the
|
||||
// origin is nil.
|
||||
func (e ConnectionError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrConnClosing indicates that the transport is closing.
|
||||
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
|
||||
// errStreamDrain indicates that the stream is rejected because the
|
||||
// connection is draining. This could be caused by goaway or balancer
|
||||
// removing the address.
|
||||
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
|
||||
// errStreamDone is returned from write at the client side to indiacte application
|
||||
// layer of an error.
|
||||
errStreamDone = errors.New("the stream is done")
|
||||
// StatusGoAway indicates that the server sent a GOAWAY that included this
|
||||
// stream's ID in unprocessed RPCs.
|
||||
statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
|
||||
)
|
||||
|
||||
// GoAwayReason contains the reason for the GoAway frame received.
|
||||
type GoAwayReason uint8
|
||||
|
||||
const (
|
||||
// GoAwayInvalid indicates that no GoAway frame is received.
|
||||
GoAwayInvalid GoAwayReason = 0
|
||||
// GoAwayNoReason is the default value when GoAway frame is received.
|
||||
GoAwayNoReason GoAwayReason = 1
|
||||
// GoAwayTooManyPings indicates that a GoAway frame with
|
||||
// ErrCodeEnhanceYourCalm was received and that the debug data said
|
||||
// "too_many_pings".
|
||||
GoAwayTooManyPings GoAwayReason = 2
|
||||
)
|
||||
|
||||
// channelzData is used to store channelz related data for http2Client and http2Server.
|
||||
// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
|
||||
// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
|
||||
// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
|
||||
type channelzData struct {
|
||||
kpCount int64
|
||||
// The number of streams that have started, including already finished ones.
|
||||
streamsStarted int64
|
||||
// Client side: The number of streams that have ended successfully by receiving
|
||||
// EoS bit set frame from server.
|
||||
// Server side: The number of streams that have ended successfully by sending
|
||||
// frame with EoS bit set.
|
||||
streamsSucceeded int64
|
||||
streamsFailed int64
|
||||
// lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
|
||||
// instead of time.Time since it's more costly to atomically update time.Time variable than int64
|
||||
// variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
|
||||
lastStreamCreatedTime int64
|
||||
msgSent int64
|
||||
msgRecv int64
|
||||
lastMsgSentTime int64
|
||||
lastMsgRecvTime int64
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a status error.
|
||||
func ContextErr(err error) error {
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
}
|
||||
return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
}
|
40
vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
generated
vendored
Normal file
40
vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// handshakeClusterNameKey is the type used as the key to store cluster name in
|
||||
// the Attributes field of resolver.Address.
|
||||
type handshakeClusterNameKey struct{}
|
||||
|
||||
// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
|
||||
// is updated with the cluster name.
|
||||
func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
|
||||
addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
|
||||
return addr
|
||||
}
|
||||
|
||||
// GetXDSHandshakeClusterName returns cluster name stored in attr.
|
||||
func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
|
||||
v := attr.Value(handshakeClusterNameKey{})
|
||||
name, ok := v.(string)
|
||||
return name, ok
|
||||
}
|
Reference in New Issue
Block a user