mirror of
https://github.com/superseriousbusiness/gotosocial
synced 2025-06-05 21:59:39 +02:00
[chore] update dependencies (#4196)
- go.opentelemetry.io/contrib/exporters/autoexport v0.60.0 -> v0.61.0 - go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0 -> v0.61.0 Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4196 Co-authored-by: kim <grufwub@gmail.com> Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
50
go.mod
50
go.mod
@@ -75,8 +75,8 @@ require (
|
||||
github.com/uptrace/bun/extra/bunotel v1.2.11
|
||||
github.com/wagslane/go-password-validator v0.3.0
|
||||
github.com/yuin/goldmark v1.7.12
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.60.0
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.61.0
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/metric v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
@@ -110,7 +110,7 @@ require (
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
||||
github.com/bytedance/sonic v1.13.2 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
@@ -154,7 +154,7 @@ require (
|
||||
github.com/gorilla/handlers v1.5.2 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/gorilla/sessions v1.4.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
@@ -187,9 +187,9 @@ require (
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
@@ -211,30 +211,30 @@ require (
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.3 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.60.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.11.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/log v0.11.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.12.2 // indirect
|
||||
go.opentelemetry.io/otel/sdk/log v0.12.2 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/arch v0.16.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/tools v0.33.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
|
||||
google.golang.org/grpc v1.71.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/grpc v1.72.1 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
modernc.org/libc v1.65.7 // indirect
|
||||
|
102
go.sum
generated
102
go.sum
generated
@@ -88,8 +88,8 @@ github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
|
||||
github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
|
||||
@@ -245,8 +245,8 @@ github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzq
|
||||
github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
|
||||
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
@@ -352,12 +352,12 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY99imuIeoh8Vr0GSwAlYxPAhqZrpFc=
|
||||
@@ -499,50 +499,52 @@ go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeH
|
||||
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.60.0 h1:x7sPooQCwSg27SjtQee8GyIIRTQcF4s7eSkac6F2+VA=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.60.0/go.mod h1:4K5UXgiHxV484efGs42ejD7E2J/sIlepYgdGoPXe7hE=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.60.0 h1:GuQXpvSXNjpswpweIem84U9BNauqHHi2w1GtNAalvpM=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.60.0/go.mod h1:CkmxekdHco4d7thFJNPQ7Mby4jMBgZUclnrxT4e+ryk=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0 h1:0NgN/3SYkqYJ9NBlDfl/2lzVlwos/YQLvi8sUrzJRBE=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0/go.mod h1:oxpUfhTkhgQaYIjtBt3T3w135dLoxq//qo3WPlPIKkE=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 h1:RyrtJzu5MAmIcbRrwg75b+w3RlZCP0vJByDVzcpAe3M=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.61.0/go.mod h1:tirr4p9NXbzjlbruiRGp53IzlYrDk5CO2fdHj0sSSaY=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 h1:XfzKtKSrbtYk9TNCF8dkO0Y9M7IOfb4idCwBOTwGBiI=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.61.0/go.mod h1:N6otC+qXTD5bAnbK2O1f/1SXq3cX+3KYSWrkBUqG0cw=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0 h1:oIZsTHd0YcrvvUCN2AaQqyOcd685NQ+rFmrajveCIhA=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0/go.mod h1:X4KSPIvxnY/G5c9UOGXtFoL91t1gmlHpDQzeK5Zc/Bw=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 h1:HMUytBT3uGhPKYY/u/G5MR9itrlSO2SMOsSD3Tk3k7A=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0/go.mod h1:hdDXsiNLmdW/9BF2jQpnHHlhFajpWCEYfM6e5m2OAZg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0 h1:C/Wi2F8wEmbxJ9Kuzw/nhP+Z9XaHYMkyDmXy6yR2cjw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0/go.mod h1:0Lr9vmGKzadCTgsiBydxr6GEZ8SsZ7Ks53LzjWG5Ar4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 h1:0NIXxOCFx+SKbhCVxwl3ETG8ClLPAa0KuKV6p3yhxP8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0/go.mod h1:ChZSJbbfbl/DcRZNc9Gqh6DYGlfjw4PvO1pEOZH1ZsE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0 h1:AHh/lAP1BHrY5gBwk8ncc25FXWm/gmmY3BX258z5nuk=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0/go.mod h1:QpFWz1QxqevfjwzYdbMb4Y1NnlJvqSGwyuU0B4iuc9c=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0 h1:k6KdfZk72tVW/QVZf60xlDziDvYAePj5QHwoQvrB2m8=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0/go.mod h1:5Y3ZJLqzi/x/kYtrSrPSx7TFI/SGsL7q2kME027tH6I=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 h1:PB3Zrjs1sG1GBX51SXyTSoOTqcDglmsk7nT6tkKPb/k=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0/go.mod h1:U2R3XyVPzn0WX7wOIypPuptulsMcPDPs/oiSVOMVnHY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 h1:T0Ec2E+3YZf5bgTNQVet8iTDW7oIk03tXHq+wkwIDnE=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0/go.mod h1:30v2gqH+vYGJsesLWFov8u47EpYTcIQcBjKpI6pJThg=
|
||||
go.opentelemetry.io/otel/log v0.11.0 h1:c24Hrlk5WJ8JWcwbQxdBqxZdOK7PcP/LFtOtwpDTe3Y=
|
||||
go.opentelemetry.io/otel/log v0.11.0/go.mod h1:U/sxQ83FPmT29trrifhQg+Zj2lo1/IPN1PF6RTFqdwc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2/go.mod h1:DvPtKE63knkDVP88qpatBj81JxN+w1bqfVbsbCbj1WY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 h1:tPLwQlXbJ8NSOfZc4OkgU5h2A38M4c9kfHSVc4PFQGs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2/go.mod h1:QTnxBwT/1rBIgAG1goq6xMydfYOBKU6KTiYF4fp5zL8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 h1:zwdo1gS2eH26Rg+CoqVQpEK1h8gvt5qyU5Kk5Bixvow=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0/go.mod h1:rUKCPscaRWWcqGT6HnEmYrK+YNe5+Sw64xgQTOJ5b30=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 h1:gAU726w9J8fwr4qRDqu1GYMNNs4gXrU+Pv20/N1UpB4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0/go.mod h1:RboSDkp7N292rgu+T0MgVt2qgFGu6qa1RpZDOtpL76w=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.58.0 h1:CJAxWKFIqdBennqxJyOgnt5LqkeFRT+Mz3Yjz3hL+h8=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.58.0/go.mod h1:7qo/4CLI+zYSNbv0GMNquzuss2FVZo3OYrGh96n4HNc=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 h1:12vMqzLLNZtXuXbJhSENRg+Vvx+ynNilV8twBLBsXMY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2/go.mod h1:ZccPZoPOoq8x3Trik/fCsba7DEYDUnN6yX79pgp2BUQ=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwWBJF/mHZc7A02YHedfFDENwJEdRA0=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY=
|
||||
go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc=
|
||||
go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/log v0.11.0 h1:7bAOpjpGglWhdEzP8z0VXc4jObOiDEwr3IYbhBnjk2c=
|
||||
go.opentelemetry.io/otel/sdk/log v0.11.0/go.mod h1:dndLTxZbwBstZoqsJB3kGsRPkpAgaJrWfQg3lhlHFFY=
|
||||
go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0=
|
||||
go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY=
|
||||
go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc h1:uqxdywfHqqCl6LmZzI3pUnXT1RGFYyUgxj0AkWPFxi0=
|
||||
go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc/go.mod h1:TY/N/FT7dmFrP/r5ym3g0yysP1DefqGpAZr4f82P0dE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
@@ -650,12 +652,12 @@ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxb
|
||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
|
||||
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
|
||||
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
|
||||
google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type backOffContext struct {
|
||||
BackOff
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
||||
if b, ok := b.(*backOffContext); ok {
|
||||
return &backOffContext{
|
||||
BackOff: b.BackOff,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
return &backOffContext{
|
||||
BackOff: b,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func getContext(b BackOff) context.Context {
|
||||
if cb, ok := b.(BackOffContext); ok {
|
||||
return cb.Context()
|
||||
}
|
||||
if tb, ok := b.(*backOffTries); ok {
|
||||
return getContext(tb.delegate)
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (b *backOffContext) Context() context.Context {
|
||||
return b.ctx
|
||||
}
|
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
return Stop
|
||||
default:
|
||||
return b.BackOff.NextBackOff()
|
||||
}
|
||||
}
|
216
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
216
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
@@ -1,216 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
// After MaxElapsedTime the ExponentialBackOff returns Stop.
|
||||
// It never stops if MaxElapsedTime == 0.
|
||||
MaxElapsedTime time.Duration
|
||||
Stop time.Duration
|
||||
Clock Clock
|
||||
|
||||
currentInterval time.Duration
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
|
||||
type ExponentialBackOffOpts func(*ExponentialBackOff)
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
DefaultMaxElapsedTime = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Stop: Stop,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
for _, fn := range opts {
|
||||
fn(b)
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
// WithInitialInterval sets the initial interval between retries.
|
||||
func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.InitialInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
|
||||
func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.RandomizationFactor = randomizationFactor
|
||||
}
|
||||
}
|
||||
|
||||
// WithMultiplier sets the multiplier for increasing the interval after each retry.
|
||||
func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Multiplier = multiplier
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxInterval sets the maximum interval between retries.
|
||||
func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.MaxInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxElapsedTime sets the maximum total time for retries.
|
||||
func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.MaxElapsedTime = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryStopDuration sets the duration after which retries should stop.
|
||||
func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Stop = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithClockProvider sets the clock used to measure time.
|
||||
func WithClockProvider(clock Clock) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
||||
func (t systemClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SystemClock implements Clock interface that uses time.Now().
|
||||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
elapsed := b.GetElapsedTime()
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
|
||||
return b.Stop
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
// is created and is reset when Reset() is called.
|
||||
//
|
||||
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||
// safe to call even while the backoff policy is used by a running
|
||||
// ticker.
|
||||
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||
return b.Clock.Now().Sub(b.startTime)
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
if randomizationFactor == 0 {
|
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
}
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
@@ -1,146 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type OperationWithData[T any] func() (T, error)
|
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error
|
||||
|
||||
func (o Operation) withEmptyData() OperationWithData[struct{}] {
|
||||
return func() (struct{}, error) {
|
||||
return struct{}{}, o()
|
||||
}
|
||||
}
|
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
// NOTE that if the backoff policy stated to stop retrying,
|
||||
// the notify function isn't called.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error {
|
||||
return RetryNotify(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryWithData is like Retry but returns data in the response too.
|
||||
func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
|
||||
return RetryNotifyWithData(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithData is like RetryNotify but returns data in the response too.
|
||||
func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||
_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
|
||||
func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, t)
|
||||
}
|
||||
|
||||
func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
var (
|
||||
err error
|
||||
next time.Duration
|
||||
res T
|
||||
)
|
||||
if t == nil {
|
||||
t = &defaultTimer{}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
|
||||
ctx := getContext(b)
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
res, err = operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return res, permanent.Err
|
||||
}
|
||||
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
if cerr := ctx.Err(); cerr != nil {
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
if notify != nil {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
t.Start(next)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return res, ctx.Err()
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e *PermanentError) Is(target error) bool {
|
||||
_, ok := target.(*PermanentError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
/*
|
||||
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||
return Stop if NextBackOff() has been called too many times since
|
||||
the last time Reset() was called
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||
return &backOffTries{delegate: b, maxTries: max}
|
||||
}
|
||||
|
||||
type backOffTries struct {
|
||||
delegate BackOff
|
||||
maxTries uint64
|
||||
numTries uint64
|
||||
}
|
||||
|
||||
func (b *backOffTries) NextBackOff() time.Duration {
|
||||
if b.maxTries == 0 {
|
||||
return Stop
|
||||
}
|
||||
if b.maxTries > 0 {
|
||||
if b.maxTries <= b.numTries {
|
||||
return Stop
|
||||
}
|
||||
b.numTries++
|
||||
}
|
||||
return b.delegate.NextBackOff()
|
||||
}
|
||||
|
||||
func (b *backOffTries) Reset() {
|
||||
b.numTries = 0
|
||||
b.delegate.Reset()
|
||||
}
|
29
vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
generated
vendored
Normal file
29
vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [5.0.0] - 2024-12-19
|
||||
|
||||
### Added
|
||||
|
||||
- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry.
|
||||
|
||||
### Changed
|
||||
|
||||
- Retry function now accepts additional options for specifying max number of tries and max elapsed time.
|
||||
- Retry function now accepts a context.Context.
|
||||
- Operation function signature changed to return result (any type) and error.
|
||||
|
||||
### Removed
|
||||
|
||||
- RetryNotify* and RetryWithData functions. Only single Retry function remains.
|
||||
- Optional arguments from ExponentialBackoff constructor.
|
||||
- Clock and Timer interfaces.
|
||||
|
||||
### Fixed
|
||||
|
||||
- The original error is returned from Retry if there's a PermanentError. (#144)
|
||||
- The Retry function respects the wrapped PermanentError. (#140)
|
@@ -1,4 +1,4 @@
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc]
|
||||
|
||||
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||
|
||||
@@ -9,9 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold
|
||||
|
||||
## Usage
|
||||
|
||||
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
|
||||
Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end.
|
||||
|
||||
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
For most cases, use `Retry` function. See [example_test.go][example] for an example.
|
||||
|
||||
If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed.
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -19,12 +21,11 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
* Please don't send a PR without opening an issue and discussing it first.
|
||||
* If proposed change is not a common use case, I will probably not accept it.
|
||||
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5
|
||||
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||
|
||||
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||
|
||||
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
|
||||
[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go
|
||||
[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go
|
@@ -15,12 +15,12 @@ import "time"
|
||||
// BackOff is a backoff policy for retrying an operation.
|
||||
type BackOff interface {
|
||||
// NextBackOff returns the duration to wait before retrying the operation,
|
||||
// or backoff. Stop to indicate that no more retries should be made.
|
||||
// backoff.Stop to indicate that no more retries should be made.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// duration := backoff.NextBackOff();
|
||||
// if (duration == backoff.Stop) {
|
||||
// duration := backoff.NextBackOff()
|
||||
// if duration == backoff.Stop {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
46
vendor/github.com/cenkalti/backoff/v5/error.go
generated
vendored
Normal file
46
vendor/github.com/cenkalti/backoff/v5/error.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns a string representation of the Permanent error.
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// Unwrap returns the wrapped error.
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// RetryAfterError signals that the operation should be retried after the given duration.
|
||||
type RetryAfterError struct {
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying.
|
||||
func RetryAfter(seconds int) error {
|
||||
return &RetryAfterError{Duration: time.Duration(seconds) * time.Second}
|
||||
}
|
||||
|
||||
// Error returns a string representation of the RetryAfter error.
|
||||
func (e *RetryAfterError) Error() string {
|
||||
return fmt.Sprintf("retry after %s", e.Duration)
|
||||
}
|
125
vendor/github.com/cenkalti/backoff/v5/exponential.go
generated
vendored
Normal file
125
vendor/github.com/cenkalti/backoff/v5/exponential.go
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
|
||||
currentInterval time.Duration
|
||||
}
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
return &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
//
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
if b.currentInterval == 0 {
|
||||
b.currentInterval = b.InitialInterval
|
||||
}
|
||||
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
return next
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
//
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
if randomizationFactor == 0 {
|
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
}
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
139
vendor/github.com/cenkalti/backoff/v5/retry.go
generated
vendored
Normal file
139
vendor/github.com/cenkalti/backoff/v5/retry.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultMaxElapsedTime sets a default limit for the total retry duration.
|
||||
const DefaultMaxElapsedTime = 15 * time.Minute
|
||||
|
||||
// Operation is a function that attempts an operation and may be retried.
|
||||
type Operation[T any] func() (T, error)
|
||||
|
||||
// Notify is a function called on operation error with the error and backoff duration.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// retryOptions holds configuration settings for the retry mechanism.
|
||||
type retryOptions struct {
|
||||
BackOff BackOff // Strategy for calculating backoff periods.
|
||||
Timer timer // Timer to manage retry delays.
|
||||
Notify Notify // Optional function to notify on each retry error.
|
||||
MaxTries uint // Maximum number of retry attempts.
|
||||
MaxElapsedTime time.Duration // Maximum total time for all retries.
|
||||
}
|
||||
|
||||
type RetryOption func(*retryOptions)
|
||||
|
||||
// WithBackOff configures a custom backoff strategy.
|
||||
func WithBackOff(b BackOff) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.BackOff = b
|
||||
}
|
||||
}
|
||||
|
||||
// withTimer sets a custom timer for managing delays between retries.
|
||||
func withTimer(t timer) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.Timer = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithNotify sets a notification function to handle retry errors.
|
||||
func WithNotify(n Notify) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.Notify = n
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxTries limits the number of retry attempts.
|
||||
func WithMaxTries(n uint) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.MaxTries = n
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxElapsedTime limits the total duration for retry attempts.
|
||||
func WithMaxElapsedTime(d time.Duration) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.MaxElapsedTime = d
|
||||
}
|
||||
}
|
||||
|
||||
// Retry attempts the operation until success, a permanent error, or backoff completion.
|
||||
// It ensures the operation is executed at least once.
|
||||
//
|
||||
// Returns the operation result or error if retries are exhausted or context is cancelled.
|
||||
func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) {
|
||||
// Initialize default retry options.
|
||||
args := &retryOptions{
|
||||
BackOff: NewExponentialBackOff(),
|
||||
Timer: &defaultTimer{},
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
}
|
||||
|
||||
// Apply user-provided options to the default settings.
|
||||
for _, opt := range opts {
|
||||
opt(args)
|
||||
}
|
||||
|
||||
defer args.Timer.Stop()
|
||||
|
||||
startedAt := time.Now()
|
||||
args.BackOff.Reset()
|
||||
for numTries := uint(1); ; numTries++ {
|
||||
// Execute the operation.
|
||||
res, err := operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Stop retrying if maximum tries exceeded.
|
||||
if args.MaxTries > 0 && numTries >= args.MaxTries {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Handle permanent errors without retrying.
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Stop retrying if context is cancelled.
|
||||
if cerr := context.Cause(ctx); cerr != nil {
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
// Calculate next backoff duration.
|
||||
next := args.BackOff.NextBackOff()
|
||||
if next == Stop {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Reset backoff if RetryAfterError is encountered.
|
||||
var retryAfter *RetryAfterError
|
||||
if errors.As(err, &retryAfter) {
|
||||
next = retryAfter.Duration
|
||||
args.BackOff.Reset()
|
||||
}
|
||||
|
||||
// Stop retrying if maximum elapsed time exceeded.
|
||||
if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Notify on error if a notifier function is provided.
|
||||
if args.Notify != nil {
|
||||
args.Notify(err, next)
|
||||
}
|
||||
|
||||
// Wait for the next backoff period or context cancellation.
|
||||
args.Timer.Start(next)
|
||||
select {
|
||||
case <-args.Timer.C():
|
||||
case <-ctx.Done():
|
||||
return res, context.Cause(ctx)
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,7 +1,6 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -14,8 +13,7 @@ type Ticker struct {
|
||||
C <-chan time.Time
|
||||
c chan time.Time
|
||||
b BackOff
|
||||
ctx context.Context
|
||||
timer Timer
|
||||
timer timer
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
@@ -27,22 +25,12 @@ type Ticker struct {
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker {
|
||||
return NewTickerWithTimer(b, &defaultTimer{})
|
||||
}
|
||||
|
||||
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
|
||||
if timer == nil {
|
||||
timer = &defaultTimer{}
|
||||
}
|
||||
c := make(chan time.Time)
|
||||
t := &Ticker{
|
||||
C: c,
|
||||
c: c,
|
||||
b: b,
|
||||
ctx: getContext(b),
|
||||
timer: timer,
|
||||
timer: &defaultTimer{},
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.b.Reset()
|
||||
@@ -73,8 +61,6 @@ func (t *Ticker) run() {
|
||||
case <-t.stop:
|
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
@@ -2,7 +2,7 @@ package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
type Timer interface {
|
||||
type timer interface {
|
||||
Start(duration time.Duration)
|
||||
Stop()
|
||||
C() <-chan time.Time
|
8
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
8
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
@@ -148,10 +148,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
|
||||
}
|
||||
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Error("Failed to extract ServerMetadata from context")
|
||||
}
|
||||
|
||||
if ok {
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
|
||||
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||
@@ -165,6 +162,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
|
||||
handleForwardResponseTrailerHeader(w, mux, md)
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
}
|
||||
}
|
||||
|
||||
st := HTTPStatusFromCode(s.Code())
|
||||
if customStatus != nil {
|
||||
@@ -176,7 +174,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
|
||||
grpclog.Errorf("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
if doForwardTrailers {
|
||||
if ok && requestAcceptsTrailers(r) {
|
||||
handleForwardResponseTrailer(w, mux, md)
|
||||
}
|
||||
}
|
||||
|
10
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
10
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
@@ -153,11 +153,9 @@ type responseBody interface {
|
||||
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
||||
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Error("Failed to extract ServerMetadata from context")
|
||||
}
|
||||
|
||||
if ok {
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
}
|
||||
|
||||
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||
// Unless the request includes a TE header field indicating "trailers"
|
||||
@@ -166,7 +164,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
|
||||
// agent to receive.
|
||||
doForwardTrailers := requestAcceptsTrailers(req)
|
||||
|
||||
if doForwardTrailers {
|
||||
if ok && doForwardTrailers {
|
||||
handleForwardResponseTrailerHeader(w, mux, md)
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
}
|
||||
@@ -204,7 +202,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
|
||||
grpclog.Errorf("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
if doForwardTrailers {
|
||||
if ok && doForwardTrailers {
|
||||
handleForwardResponseTrailer(w, mux, md)
|
||||
}
|
||||
}
|
||||
|
4
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
4
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
@@ -345,8 +345,8 @@ func (p *TextParser) startLabelName() stateFn {
|
||||
}
|
||||
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
|
||||
// labels to 'real' labels.
|
||||
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
|
||||
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
|
||||
if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
|
||||
(p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
|
||||
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
|
||||
}
|
||||
// Check for duplicate label names.
|
||||
|
2
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
2
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
@@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool {
|
||||
return a.ResolvedAt(time.Now())
|
||||
}
|
||||
|
||||
// ResolvedAt returns true off the activity interval ended before
|
||||
// ResolvedAt returns true iff the activity interval ended before
|
||||
// the given timestamp.
|
||||
func (a *Alert) ResolvedAt(ts time.Time) bool {
|
||||
if a.EndsAt.IsZero() {
|
||||
|
5
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
5
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// AlertNameLabel is the name of the label containing the an alert's name.
|
||||
// AlertNameLabel is the name of the label containing the alert's name.
|
||||
AlertNameLabel = "alertname"
|
||||
|
||||
// ExportedLabelPrefix is the prefix to prepend to the label names present in
|
||||
@@ -122,7 +122,8 @@ func (ln LabelName) IsValidLegacy() bool {
|
||||
return false
|
||||
}
|
||||
for i, b := range ln {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
// TODO: Apply De Morgan's law. Make sure there are tests for this.
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
28
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
28
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
@@ -27,13 +27,25 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// NameValidationScheme determines the method of name validation to be used by
|
||||
// all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8
|
||||
// mode in isolation from other components that don't support UTF-8 may result
|
||||
// in bugs or other undefined behavior. This value can be set to
|
||||
// LegacyValidation during startup if a binary is not UTF-8-aware binaries. To
|
||||
// avoid need for locking, this value should be set once, ideally in an
|
||||
// init(), before multiple goroutines are started.
|
||||
// NameValidationScheme determines the global default method of the name
|
||||
// validation to be used by all calls to IsValidMetricName() and LabelName
|
||||
// IsValid().
|
||||
//
|
||||
// Deprecated: This variable should not be used and might be removed in the
|
||||
// far future. If you wish to stick to the legacy name validation use
|
||||
// `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
|
||||
// instead. This variable is here as an escape hatch for emergency cases,
|
||||
// given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
|
||||
// to delay UTF-8 migrations in time or aid in debugging unforeseen results of
|
||||
// the change. In such a case, a temporary assignment to `LegacyValidation`
|
||||
// value in the `init()` function in your main.go or so, could be considered.
|
||||
//
|
||||
// Historically we opted for a global variable for feature gating different
|
||||
// validation schemes in operations that were not otherwise easily adjustable
|
||||
// (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
|
||||
// Labels structure or package might have been a better choice. Given the
|
||||
// change was made and many upgraded the common already, we live this as-is
|
||||
// with this warning and learning for the future.
|
||||
NameValidationScheme = UTF8Validation
|
||||
|
||||
// NameEscapingScheme defines the default way that names will be escaped when
|
||||
@@ -50,7 +62,7 @@ var (
|
||||
type ValidationScheme int
|
||||
|
||||
const (
|
||||
// LegacyValidation is a setting that requirets that metric and label names
|
||||
// LegacyValidation is a setting that requires that all metric and label names
|
||||
// conform to the original Prometheus character requirements described by
|
||||
// MetricNameRE and LabelNameRE.
|
||||
LegacyValidation ValidationScheme = iota
|
||||
|
45
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
45
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
@@ -1,22 +1,45 @@
|
||||
---
|
||||
version: "2"
|
||||
linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- forbidigo
|
||||
- godot
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- testifylint
|
||||
- unused
|
||||
|
||||
linter-settings:
|
||||
settings:
|
||||
forbidigo:
|
||||
forbid:
|
||||
- pattern: ^fmt\.Print.*$
|
||||
msg: Do not commit print statements.
|
||||
godot:
|
||||
capital: true
|
||||
exclude:
|
||||
# Ignore "See: URL"
|
||||
# Ignore "See: URL".
|
||||
- 'See:'
|
||||
capital: true
|
||||
misspell:
|
||||
locale: US
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/prometheus/procfs
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
10
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
10
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
|
||||
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
||||
|
||||
GO_VERSION ?= $(shell $(GO) version)
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
|
||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||
|
||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.59.0
|
||||
GOLANGCI_LINT_VERSION ?= v2.0.2
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
@@ -275,3 +275,9 @@ $(1)_precheck:
|
||||
exit 1; \
|
||||
fi
|
||||
endef
|
||||
|
||||
govulncheck: install-govulncheck
|
||||
govulncheck ./...
|
||||
|
||||
install-govulncheck:
|
||||
command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
6
vendor/github.com/prometheus/procfs/README.md
generated
vendored
6
vendor/github.com/prometheus/procfs/README.md
generated
vendored
@@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`.
|
||||
The procfs library includes a set of test fixtures which include many example files from
|
||||
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
|
||||
which is extracted automatically during testing. To add/update the test fixtures, first
|
||||
ensure the `fixtures` directory is up to date by removing the existing directory and then
|
||||
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
|
||||
ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
|
||||
extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
|
||||
|
||||
```bash
|
||||
rm -rf testdata/fixtures
|
||||
make test
|
||||
```
|
||||
|
||||
Next, make the required changes to the extracted files in the `fixtures` directory. When
|
||||
Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When
|
||||
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
|
||||
based on the updated `fixtures` directory. And finally, verify the changes using
|
||||
`git diff testdata/fixtures.ttar`.
|
||||
|
4
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
4
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
@@ -23,9 +23,9 @@ import (
|
||||
|
||||
// Learned from include/uapi/linux/if_arp.h.
|
||||
const (
|
||||
// completed entry (ha valid).
|
||||
// Completed entry (ha valid).
|
||||
ATFComplete = 0x02
|
||||
// permanent entry.
|
||||
// Permanent entry.
|
||||
ATFPermanent = 0x04
|
||||
// Publish entry.
|
||||
ATFPublish = 0x08
|
||||
|
10
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
10
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
@@ -24,8 +24,14 @@ type FS struct {
|
||||
isReal bool
|
||||
}
|
||||
|
||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||
const DefaultMountPoint = fs.DefaultProcMountPoint
|
||||
const (
|
||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||
DefaultMountPoint = fs.DefaultProcMountPoint
|
||||
|
||||
// SectorSize represents the size of a sector in bytes.
|
||||
// It is specific to Linux block I/O operations.
|
||||
SectorSize = 512
|
||||
)
|
||||
|
||||
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
|
||||
// It will error if the mount point directory can't be read or is a file.
|
||||
|
4
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
4
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
@@ -17,7 +17,7 @@
|
||||
package procfs
|
||||
|
||||
// isRealProc returns true on architectures that don't have a Type argument
|
||||
// in their Statfs_t struct
|
||||
func isRealProc(mountPoint string) (bool, error) {
|
||||
// in their Statfs_t struct.
|
||||
func isRealProc(_ string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
6
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
6
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
@@ -162,7 +162,7 @@ type Fscacheinfo struct {
|
||||
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
|
||||
// Number of release reqs ignored due to in-progress store
|
||||
ReleaseRequestsIgnoredDueToInProgressStore uint64
|
||||
// Number of page stores cancelled due to release req
|
||||
// Number of page stores canceled due to release req
|
||||
PageStoresCancelledByReleaseRequests uint64
|
||||
VmscanWaiting uint64
|
||||
// Number of times async ops added to pending queues
|
||||
@@ -171,11 +171,11 @@ type Fscacheinfo struct {
|
||||
OpsRunning uint64
|
||||
// Number of times async ops queued for processing
|
||||
OpsEnqueued uint64
|
||||
// Number of async ops cancelled
|
||||
// Number of async ops canceled
|
||||
OpsCancelled uint64
|
||||
// Number of async ops rejected due to object lookup/create failure
|
||||
OpsRejected uint64
|
||||
// Number of async ops initialised
|
||||
// Number of async ops initialized
|
||||
OpsInitialised uint64
|
||||
// Number of async ops queued for deferred release
|
||||
OpsDeferred uint64
|
||||
|
3
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
3
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
@@ -28,6 +28,9 @@ const (
|
||||
|
||||
// DefaultConfigfsMountPoint is the common mount point of the configfs.
|
||||
DefaultConfigfsMountPoint = "/sys/kernel/config"
|
||||
|
||||
// DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
|
||||
DefaultSelinuxMountPoint = "/sys/fs/selinux"
|
||||
)
|
||||
|
||||
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
|
||||
|
14
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
14
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
@@ -14,6 +14,7 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -110,3 +111,16 @@ func ParseBool(b string) *bool {
|
||||
}
|
||||
return &truth
|
||||
}
|
||||
|
||||
// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
|
||||
func ReadHexFromFile(path string) (uint64, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hexString := strings.TrimSpace(string(data))
|
||||
if !strings.HasPrefix(hexString, "0x") {
|
||||
return 0, errors.New("invalid format: hex string does not start with '0x'")
|
||||
}
|
||||
return strconv.ParseUint(hexString[2:], 16, 64)
|
||||
}
|
||||
|
20
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
20
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
@@ -20,6 +20,8 @@ package util
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
@@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) {
|
||||
|
||||
return string(bytes.TrimSpace(b[:n])), nil
|
||||
}
|
||||
|
||||
// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
|
||||
func SysReadUintFromFile(path string) (uint64, error) {
|
||||
data, err := SysReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
||||
// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
|
||||
func SysReadIntFromFile(path string) (int64, error) {
|
||||
data, err := SysReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
27
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
27
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@@ -45,11 +45,11 @@ const (
|
||||
fieldTransport11TCPLen = 13
|
||||
fieldTransport11UDPLen = 10
|
||||
|
||||
// kernel version >= 4.14 MaxLen
|
||||
// Kernel version >= 4.14 MaxLen
|
||||
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
|
||||
fieldTransport11RDMAMaxLen = 28
|
||||
|
||||
// kernel version <= 4.2 MinLen
|
||||
// Kernel version <= 4.2 MinLen
|
||||
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
|
||||
fieldTransport11RDMAMinLen = 20
|
||||
)
|
||||
@@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||
switch statVersion {
|
||||
case statVersion10:
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
switch protocol {
|
||||
case "tcp":
|
||||
expectedLength = fieldTransport10TCPLen
|
||||
} else if protocol == "udp" {
|
||||
case "udp":
|
||||
expectedLength = fieldTransport10UDPLen
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
|
||||
}
|
||||
if len(ss) != expectedLength {
|
||||
@@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||
}
|
||||
case statVersion11:
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
switch protocol {
|
||||
case "tcp":
|
||||
expectedLength = fieldTransport11TCPLen
|
||||
} else if protocol == "udp" {
|
||||
case "udp":
|
||||
expectedLength = fieldTransport11UDPLen
|
||||
} else if protocol == "rdma" {
|
||||
case "rdma":
|
||||
expectedLength = fieldTransport11RDMAMinLen
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
|
||||
}
|
||||
if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
|
||||
@@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||
// For the udp RPC transport there is no connection count, connect idle time,
|
||||
// or idle time (fields #3, #4, and #5); all other fields are the same. So
|
||||
// we set them to 0 here.
|
||||
if protocol == "udp" {
|
||||
switch protocol {
|
||||
case "udp":
|
||||
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
|
||||
} else if protocol == "tcp" {
|
||||
case "tcp":
|
||||
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
|
||||
} else if protocol == "rdma" {
|
||||
case "rdma":
|
||||
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
|
||||
}
|
||||
|
||||
|
96
vendor/github.com/prometheus/procfs/net_dev_snmp6.go
generated
vendored
Normal file
96
vendor/github.com/prometheus/procfs/net_dev_snmp6.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc/<PID>/net/dev_snmp6/.
|
||||
// The outer map's keys are interface names and the inner map's keys are stat names.
|
||||
//
|
||||
// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
|
||||
type NetDevSNMP6 map[string]map[string]uint64
|
||||
|
||||
// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
|
||||
// directory.
|
||||
func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
|
||||
return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
|
||||
}
|
||||
|
||||
// Returns kernel/system statistics read from interface files within the /proc/<PID>/net/dev_snmp6/
|
||||
// directory.
|
||||
func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
|
||||
return newNetDevSNMP6(p.path("net/dev_snmp6"))
|
||||
}
|
||||
|
||||
// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
|
||||
func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
|
||||
netDevSNMP6 := make(NetDevSNMP6)
|
||||
|
||||
// The net/dev_snmp6 folders contain one file per interface
|
||||
ifaceFiles, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
// On systems with IPv6 disabled, this directory won't exist.
|
||||
// Do nothing.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
|
||||
for _, iFaceFile := range ifaceFiles {
|
||||
f, err := os.Open(dir + "/" + iFaceFile.Name())
|
||||
if err != nil {
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
|
||||
if err != nil {
|
||||
return netDevSNMP6, err
|
||||
}
|
||||
}
|
||||
|
||||
return netDevSNMP6, nil
|
||||
}
|
||||
|
||||
func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
|
||||
m := make(map[string]uint64)
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
stat := strings.Fields(scanner.Text())
|
||||
if len(stat) < 2 {
|
||||
continue
|
||||
}
|
||||
key, val := stat[0], stat[1]
|
||||
|
||||
// Expect stat name to contain "6" or be "ifIndex"
|
||||
if strings.Contains(key, "6") || key == "ifIndex" {
|
||||
v, err := strconv.ParseUint(val, 10, 64)
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
|
||||
m[key] = v
|
||||
}
|
||||
}
|
||||
return m, scanner.Err()
|
||||
}
|
8
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
8
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// readLimit is used by io.LimitReader while reading the content of the
|
||||
// Maximum size limit used by io.LimitReader while reading the content of the
|
||||
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
|
||||
// as each line represents a single used socket.
|
||||
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
|
||||
@@ -50,12 +50,12 @@ type (
|
||||
// UsedSockets shows the total number of parsed lines representing the
|
||||
// number of used sockets.
|
||||
UsedSockets uint64
|
||||
// Drops shows the total number of dropped packets of all UPD sockets.
|
||||
// Drops shows the total number of dropped packets of all UDP sockets.
|
||||
Drops *uint64
|
||||
}
|
||||
|
||||
// netIPSocketLine represents the fields parsed from a single line
|
||||
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
|
||||
// A single line parser for fields from /proc/net/{t,u}dp{,6}.
|
||||
// Fields which are not used by IPSocket are skipped.
|
||||
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
|
||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||
netIPSocketLine struct {
|
||||
|
21
vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
21
vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
@@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fields[4] == enabled {
|
||||
switch fields[4] {
|
||||
case enabled:
|
||||
line.Pressure = 1
|
||||
} else if fields[4] == disabled {
|
||||
case disabled:
|
||||
line.Pressure = 0
|
||||
} else {
|
||||
default:
|
||||
line.Pressure = -1
|
||||
}
|
||||
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fields[6] == enabled {
|
||||
switch fields[6] {
|
||||
case enabled:
|
||||
line.Slab = true
|
||||
} else if fields[6] == disabled {
|
||||
case disabled:
|
||||
line.Slab = false
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
|
||||
}
|
||||
line.ModuleName = fields[7]
|
||||
@@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
|
||||
}
|
||||
|
||||
for i := 0; i < len(capabilities); i++ {
|
||||
if capabilities[i] == "y" {
|
||||
switch capabilities[i] {
|
||||
case "y":
|
||||
*capabilityFields[i] = true
|
||||
} else if capabilities[i] == "n" {
|
||||
case "n":
|
||||
*capabilityFields[i] = false
|
||||
} else {
|
||||
default:
|
||||
return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
|
||||
}
|
||||
}
|
||||
|
4
vendor/github.com/prometheus/procfs/net_tcp.go
generated
vendored
4
vendor/github.com/prometheus/procfs/net_tcp.go
generated
vendored
@@ -25,24 +25,28 @@ type (
|
||||
|
||||
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
|
||||
// read from /proc/net/tcp.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
|
||||
func (fs FS) NetTCP() (NetTCP, error) {
|
||||
return newNetTCP(fs.proc.Path("net/tcp"))
|
||||
}
|
||||
|
||||
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
|
||||
// read from /proc/net/tcp6.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
|
||||
func (fs FS) NetTCP6() (NetTCP, error) {
|
||||
return newNetTCP(fs.proc.Path("net/tcp6"))
|
||||
}
|
||||
|
||||
// NetTCPSummary returns already computed statistics like the total queue lengths
|
||||
// for TCP datagrams read from /proc/net/tcp.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
|
||||
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
|
||||
return newNetTCPSummary(fs.proc.Path("net/tcp"))
|
||||
}
|
||||
|
||||
// NetTCP6Summary returns already computed statistics like the total queue lengths
|
||||
// for TCP datagrams read from /proc/net/tcp6.
|
||||
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
|
||||
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
|
||||
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
|
||||
}
|
||||
|
8
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
8
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
@@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
|
||||
return &nu, nil
|
||||
}
|
||||
|
||||
func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
|
||||
func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
|
||||
fields := strings.Fields(line)
|
||||
|
||||
l := len(fields)
|
||||
if l < min {
|
||||
return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
|
||||
if l < minFields {
|
||||
return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
|
||||
}
|
||||
|
||||
// Field offsets are as follows:
|
||||
@@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
|
||||
}
|
||||
|
||||
// Path field is optional.
|
||||
if l > min {
|
||||
if l > minFields {
|
||||
// Path occurs at either index 6 or 7 depending on whether inode is
|
||||
// already present.
|
||||
pathIdx := 7
|
||||
|
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@@ -37,9 +37,9 @@ type Proc struct {
|
||||
type Procs []Proc
|
||||
|
||||
var (
|
||||
ErrFileParse = errors.New("Error Parsing File")
|
||||
ErrFileRead = errors.New("Error Reading File")
|
||||
ErrMountPoint = errors.New("Error Accessing Mount point")
|
||||
ErrFileParse = errors.New("error parsing file")
|
||||
ErrFileRead = errors.New("error reading file")
|
||||
ErrMountPoint = errors.New("error accessing mount point")
|
||||
)
|
||||
|
||||
func (p Procs) Len() int { return len(p) }
|
||||
@@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) {
|
||||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
|
||||
pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
|
||||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
|
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
@@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
|
||||
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
|
||||
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
|
||||
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
|
||||
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
|
||||
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
|
||||
|
2
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
@@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) {
|
||||
|
||||
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
|
||||
"read_bytes: %d\nwrite_bytes: %d\n" +
|
||||
"cancelled_write_bytes: %d\n"
|
||||
"cancelled_write_bytes: %d\n" //nolint:misspell
|
||||
|
||||
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
|
||||
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
|
||||
|
224
vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
224
vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
@@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
|
||||
case "TcpExt":
|
||||
switch key {
|
||||
case "SyncookiesSent":
|
||||
procNetstat.TcpExt.SyncookiesSent = &value
|
||||
procNetstat.SyncookiesSent = &value
|
||||
case "SyncookiesRecv":
|
||||
procNetstat.TcpExt.SyncookiesRecv = &value
|
||||
procNetstat.SyncookiesRecv = &value
|
||||
case "SyncookiesFailed":
|
||||
procNetstat.TcpExt.SyncookiesFailed = &value
|
||||
procNetstat.SyncookiesFailed = &value
|
||||
case "EmbryonicRsts":
|
||||
procNetstat.TcpExt.EmbryonicRsts = &value
|
||||
procNetstat.EmbryonicRsts = &value
|
||||
case "PruneCalled":
|
||||
procNetstat.TcpExt.PruneCalled = &value
|
||||
procNetstat.PruneCalled = &value
|
||||
case "RcvPruned":
|
||||
procNetstat.TcpExt.RcvPruned = &value
|
||||
procNetstat.RcvPruned = &value
|
||||
case "OfoPruned":
|
||||
procNetstat.TcpExt.OfoPruned = &value
|
||||
procNetstat.OfoPruned = &value
|
||||
case "OutOfWindowIcmps":
|
||||
procNetstat.TcpExt.OutOfWindowIcmps = &value
|
||||
procNetstat.OutOfWindowIcmps = &value
|
||||
case "LockDroppedIcmps":
|
||||
procNetstat.TcpExt.LockDroppedIcmps = &value
|
||||
procNetstat.LockDroppedIcmps = &value
|
||||
case "ArpFilter":
|
||||
procNetstat.TcpExt.ArpFilter = &value
|
||||
procNetstat.ArpFilter = &value
|
||||
case "TW":
|
||||
procNetstat.TcpExt.TW = &value
|
||||
procNetstat.TW = &value
|
||||
case "TWRecycled":
|
||||
procNetstat.TcpExt.TWRecycled = &value
|
||||
procNetstat.TWRecycled = &value
|
||||
case "TWKilled":
|
||||
procNetstat.TcpExt.TWKilled = &value
|
||||
procNetstat.TWKilled = &value
|
||||
case "PAWSActive":
|
||||
procNetstat.TcpExt.PAWSActive = &value
|
||||
procNetstat.PAWSActive = &value
|
||||
case "PAWSEstab":
|
||||
procNetstat.TcpExt.PAWSEstab = &value
|
||||
procNetstat.PAWSEstab = &value
|
||||
case "DelayedACKs":
|
||||
procNetstat.TcpExt.DelayedACKs = &value
|
||||
procNetstat.DelayedACKs = &value
|
||||
case "DelayedACKLocked":
|
||||
procNetstat.TcpExt.DelayedACKLocked = &value
|
||||
procNetstat.DelayedACKLocked = &value
|
||||
case "DelayedACKLost":
|
||||
procNetstat.TcpExt.DelayedACKLost = &value
|
||||
procNetstat.DelayedACKLost = &value
|
||||
case "ListenOverflows":
|
||||
procNetstat.TcpExt.ListenOverflows = &value
|
||||
procNetstat.ListenOverflows = &value
|
||||
case "ListenDrops":
|
||||
procNetstat.TcpExt.ListenDrops = &value
|
||||
procNetstat.ListenDrops = &value
|
||||
case "TCPHPHits":
|
||||
procNetstat.TcpExt.TCPHPHits = &value
|
||||
procNetstat.TCPHPHits = &value
|
||||
case "TCPPureAcks":
|
||||
procNetstat.TcpExt.TCPPureAcks = &value
|
||||
procNetstat.TCPPureAcks = &value
|
||||
case "TCPHPAcks":
|
||||
procNetstat.TcpExt.TCPHPAcks = &value
|
||||
procNetstat.TCPHPAcks = &value
|
||||
case "TCPRenoRecovery":
|
||||
procNetstat.TcpExt.TCPRenoRecovery = &value
|
||||
procNetstat.TCPRenoRecovery = &value
|
||||
case "TCPSackRecovery":
|
||||
procNetstat.TcpExt.TCPSackRecovery = &value
|
||||
procNetstat.TCPSackRecovery = &value
|
||||
case "TCPSACKReneging":
|
||||
procNetstat.TcpExt.TCPSACKReneging = &value
|
||||
procNetstat.TCPSACKReneging = &value
|
||||
case "TCPSACKReorder":
|
||||
procNetstat.TcpExt.TCPSACKReorder = &value
|
||||
procNetstat.TCPSACKReorder = &value
|
||||
case "TCPRenoReorder":
|
||||
procNetstat.TcpExt.TCPRenoReorder = &value
|
||||
procNetstat.TCPRenoReorder = &value
|
||||
case "TCPTSReorder":
|
||||
procNetstat.TcpExt.TCPTSReorder = &value
|
||||
procNetstat.TCPTSReorder = &value
|
||||
case "TCPFullUndo":
|
||||
procNetstat.TcpExt.TCPFullUndo = &value
|
||||
procNetstat.TCPFullUndo = &value
|
||||
case "TCPPartialUndo":
|
||||
procNetstat.TcpExt.TCPPartialUndo = &value
|
||||
procNetstat.TCPPartialUndo = &value
|
||||
case "TCPDSACKUndo":
|
||||
procNetstat.TcpExt.TCPDSACKUndo = &value
|
||||
procNetstat.TCPDSACKUndo = &value
|
||||
case "TCPLossUndo":
|
||||
procNetstat.TcpExt.TCPLossUndo = &value
|
||||
procNetstat.TCPLossUndo = &value
|
||||
case "TCPLostRetransmit":
|
||||
procNetstat.TcpExt.TCPLostRetransmit = &value
|
||||
procNetstat.TCPLostRetransmit = &value
|
||||
case "TCPRenoFailures":
|
||||
procNetstat.TcpExt.TCPRenoFailures = &value
|
||||
procNetstat.TCPRenoFailures = &value
|
||||
case "TCPSackFailures":
|
||||
procNetstat.TcpExt.TCPSackFailures = &value
|
||||
procNetstat.TCPSackFailures = &value
|
||||
case "TCPLossFailures":
|
||||
procNetstat.TcpExt.TCPLossFailures = &value
|
||||
procNetstat.TCPLossFailures = &value
|
||||
case "TCPFastRetrans":
|
||||
procNetstat.TcpExt.TCPFastRetrans = &value
|
||||
procNetstat.TCPFastRetrans = &value
|
||||
case "TCPSlowStartRetrans":
|
||||
procNetstat.TcpExt.TCPSlowStartRetrans = &value
|
||||
procNetstat.TCPSlowStartRetrans = &value
|
||||
case "TCPTimeouts":
|
||||
procNetstat.TcpExt.TCPTimeouts = &value
|
||||
procNetstat.TCPTimeouts = &value
|
||||
case "TCPLossProbes":
|
||||
procNetstat.TcpExt.TCPLossProbes = &value
|
||||
procNetstat.TCPLossProbes = &value
|
||||
case "TCPLossProbeRecovery":
|
||||
procNetstat.TcpExt.TCPLossProbeRecovery = &value
|
||||
procNetstat.TCPLossProbeRecovery = &value
|
||||
case "TCPRenoRecoveryFail":
|
||||
procNetstat.TcpExt.TCPRenoRecoveryFail = &value
|
||||
procNetstat.TCPRenoRecoveryFail = &value
|
||||
case "TCPSackRecoveryFail":
|
||||
procNetstat.TcpExt.TCPSackRecoveryFail = &value
|
||||
procNetstat.TCPSackRecoveryFail = &value
|
||||
case "TCPRcvCollapsed":
|
||||
procNetstat.TcpExt.TCPRcvCollapsed = &value
|
||||
procNetstat.TCPRcvCollapsed = &value
|
||||
case "TCPDSACKOldSent":
|
||||
procNetstat.TcpExt.TCPDSACKOldSent = &value
|
||||
procNetstat.TCPDSACKOldSent = &value
|
||||
case "TCPDSACKOfoSent":
|
||||
procNetstat.TcpExt.TCPDSACKOfoSent = &value
|
||||
procNetstat.TCPDSACKOfoSent = &value
|
||||
case "TCPDSACKRecv":
|
||||
procNetstat.TcpExt.TCPDSACKRecv = &value
|
||||
procNetstat.TCPDSACKRecv = &value
|
||||
case "TCPDSACKOfoRecv":
|
||||
procNetstat.TcpExt.TCPDSACKOfoRecv = &value
|
||||
procNetstat.TCPDSACKOfoRecv = &value
|
||||
case "TCPAbortOnData":
|
||||
procNetstat.TcpExt.TCPAbortOnData = &value
|
||||
procNetstat.TCPAbortOnData = &value
|
||||
case "TCPAbortOnClose":
|
||||
procNetstat.TcpExt.TCPAbortOnClose = &value
|
||||
procNetstat.TCPAbortOnClose = &value
|
||||
case "TCPDeferAcceptDrop":
|
||||
procNetstat.TcpExt.TCPDeferAcceptDrop = &value
|
||||
procNetstat.TCPDeferAcceptDrop = &value
|
||||
case "IPReversePathFilter":
|
||||
procNetstat.TcpExt.IPReversePathFilter = &value
|
||||
procNetstat.IPReversePathFilter = &value
|
||||
case "TCPTimeWaitOverflow":
|
||||
procNetstat.TcpExt.TCPTimeWaitOverflow = &value
|
||||
procNetstat.TCPTimeWaitOverflow = &value
|
||||
case "TCPReqQFullDoCookies":
|
||||
procNetstat.TcpExt.TCPReqQFullDoCookies = &value
|
||||
procNetstat.TCPReqQFullDoCookies = &value
|
||||
case "TCPReqQFullDrop":
|
||||
procNetstat.TcpExt.TCPReqQFullDrop = &value
|
||||
procNetstat.TCPReqQFullDrop = &value
|
||||
case "TCPRetransFail":
|
||||
procNetstat.TcpExt.TCPRetransFail = &value
|
||||
procNetstat.TCPRetransFail = &value
|
||||
case "TCPRcvCoalesce":
|
||||
procNetstat.TcpExt.TCPRcvCoalesce = &value
|
||||
procNetstat.TCPRcvCoalesce = &value
|
||||
case "TCPRcvQDrop":
|
||||
procNetstat.TcpExt.TCPRcvQDrop = &value
|
||||
procNetstat.TCPRcvQDrop = &value
|
||||
case "TCPOFOQueue":
|
||||
procNetstat.TcpExt.TCPOFOQueue = &value
|
||||
procNetstat.TCPOFOQueue = &value
|
||||
case "TCPOFODrop":
|
||||
procNetstat.TcpExt.TCPOFODrop = &value
|
||||
procNetstat.TCPOFODrop = &value
|
||||
case "TCPOFOMerge":
|
||||
procNetstat.TcpExt.TCPOFOMerge = &value
|
||||
procNetstat.TCPOFOMerge = &value
|
||||
case "TCPChallengeACK":
|
||||
procNetstat.TcpExt.TCPChallengeACK = &value
|
||||
procNetstat.TCPChallengeACK = &value
|
||||
case "TCPSYNChallenge":
|
||||
procNetstat.TcpExt.TCPSYNChallenge = &value
|
||||
procNetstat.TCPSYNChallenge = &value
|
||||
case "TCPFastOpenActive":
|
||||
procNetstat.TcpExt.TCPFastOpenActive = &value
|
||||
procNetstat.TCPFastOpenActive = &value
|
||||
case "TCPFastOpenActiveFail":
|
||||
procNetstat.TcpExt.TCPFastOpenActiveFail = &value
|
||||
procNetstat.TCPFastOpenActiveFail = &value
|
||||
case "TCPFastOpenPassive":
|
||||
procNetstat.TcpExt.TCPFastOpenPassive = &value
|
||||
procNetstat.TCPFastOpenPassive = &value
|
||||
case "TCPFastOpenPassiveFail":
|
||||
procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
|
||||
procNetstat.TCPFastOpenPassiveFail = &value
|
||||
case "TCPFastOpenListenOverflow":
|
||||
procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
|
||||
procNetstat.TCPFastOpenListenOverflow = &value
|
||||
case "TCPFastOpenCookieReqd":
|
||||
procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
|
||||
procNetstat.TCPFastOpenCookieReqd = &value
|
||||
case "TCPFastOpenBlackhole":
|
||||
procNetstat.TcpExt.TCPFastOpenBlackhole = &value
|
||||
procNetstat.TCPFastOpenBlackhole = &value
|
||||
case "TCPSpuriousRtxHostQueues":
|
||||
procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
|
||||
procNetstat.TCPSpuriousRtxHostQueues = &value
|
||||
case "BusyPollRxPackets":
|
||||
procNetstat.TcpExt.BusyPollRxPackets = &value
|
||||
procNetstat.BusyPollRxPackets = &value
|
||||
case "TCPAutoCorking":
|
||||
procNetstat.TcpExt.TCPAutoCorking = &value
|
||||
procNetstat.TCPAutoCorking = &value
|
||||
case "TCPFromZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
|
||||
procNetstat.TCPFromZeroWindowAdv = &value
|
||||
case "TCPToZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPToZeroWindowAdv = &value
|
||||
procNetstat.TCPToZeroWindowAdv = &value
|
||||
case "TCPWantZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
|
||||
procNetstat.TCPWantZeroWindowAdv = &value
|
||||
case "TCPSynRetrans":
|
||||
procNetstat.TcpExt.TCPSynRetrans = &value
|
||||
procNetstat.TCPSynRetrans = &value
|
||||
case "TCPOrigDataSent":
|
||||
procNetstat.TcpExt.TCPOrigDataSent = &value
|
||||
procNetstat.TCPOrigDataSent = &value
|
||||
case "TCPHystartTrainDetect":
|
||||
procNetstat.TcpExt.TCPHystartTrainDetect = &value
|
||||
procNetstat.TCPHystartTrainDetect = &value
|
||||
case "TCPHystartTrainCwnd":
|
||||
procNetstat.TcpExt.TCPHystartTrainCwnd = &value
|
||||
procNetstat.TCPHystartTrainCwnd = &value
|
||||
case "TCPHystartDelayDetect":
|
||||
procNetstat.TcpExt.TCPHystartDelayDetect = &value
|
||||
procNetstat.TCPHystartDelayDetect = &value
|
||||
case "TCPHystartDelayCwnd":
|
||||
procNetstat.TcpExt.TCPHystartDelayCwnd = &value
|
||||
procNetstat.TCPHystartDelayCwnd = &value
|
||||
case "TCPACKSkippedSynRecv":
|
||||
procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
|
||||
procNetstat.TCPACKSkippedSynRecv = &value
|
||||
case "TCPACKSkippedPAWS":
|
||||
procNetstat.TcpExt.TCPACKSkippedPAWS = &value
|
||||
procNetstat.TCPACKSkippedPAWS = &value
|
||||
case "TCPACKSkippedSeq":
|
||||
procNetstat.TcpExt.TCPACKSkippedSeq = &value
|
||||
procNetstat.TCPACKSkippedSeq = &value
|
||||
case "TCPACKSkippedFinWait2":
|
||||
procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
|
||||
procNetstat.TCPACKSkippedFinWait2 = &value
|
||||
case "TCPACKSkippedTimeWait":
|
||||
procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
|
||||
procNetstat.TCPACKSkippedTimeWait = &value
|
||||
case "TCPACKSkippedChallenge":
|
||||
procNetstat.TcpExt.TCPACKSkippedChallenge = &value
|
||||
procNetstat.TCPACKSkippedChallenge = &value
|
||||
case "TCPWinProbe":
|
||||
procNetstat.TcpExt.TCPWinProbe = &value
|
||||
procNetstat.TCPWinProbe = &value
|
||||
case "TCPKeepAlive":
|
||||
procNetstat.TcpExt.TCPKeepAlive = &value
|
||||
procNetstat.TCPKeepAlive = &value
|
||||
case "TCPMTUPFail":
|
||||
procNetstat.TcpExt.TCPMTUPFail = &value
|
||||
procNetstat.TCPMTUPFail = &value
|
||||
case "TCPMTUPSuccess":
|
||||
procNetstat.TcpExt.TCPMTUPSuccess = &value
|
||||
procNetstat.TCPMTUPSuccess = &value
|
||||
case "TCPWqueueTooBig":
|
||||
procNetstat.TcpExt.TCPWqueueTooBig = &value
|
||||
procNetstat.TCPWqueueTooBig = &value
|
||||
}
|
||||
case "IpExt":
|
||||
switch key {
|
||||
case "InNoRoutes":
|
||||
procNetstat.IpExt.InNoRoutes = &value
|
||||
procNetstat.InNoRoutes = &value
|
||||
case "InTruncatedPkts":
|
||||
procNetstat.IpExt.InTruncatedPkts = &value
|
||||
procNetstat.InTruncatedPkts = &value
|
||||
case "InMcastPkts":
|
||||
procNetstat.IpExt.InMcastPkts = &value
|
||||
procNetstat.InMcastPkts = &value
|
||||
case "OutMcastPkts":
|
||||
procNetstat.IpExt.OutMcastPkts = &value
|
||||
procNetstat.OutMcastPkts = &value
|
||||
case "InBcastPkts":
|
||||
procNetstat.IpExt.InBcastPkts = &value
|
||||
procNetstat.InBcastPkts = &value
|
||||
case "OutBcastPkts":
|
||||
procNetstat.IpExt.OutBcastPkts = &value
|
||||
procNetstat.OutBcastPkts = &value
|
||||
case "InOctets":
|
||||
procNetstat.IpExt.InOctets = &value
|
||||
procNetstat.InOctets = &value
|
||||
case "OutOctets":
|
||||
procNetstat.IpExt.OutOctets = &value
|
||||
procNetstat.OutOctets = &value
|
||||
case "InMcastOctets":
|
||||
procNetstat.IpExt.InMcastOctets = &value
|
||||
procNetstat.InMcastOctets = &value
|
||||
case "OutMcastOctets":
|
||||
procNetstat.IpExt.OutMcastOctets = &value
|
||||
procNetstat.OutMcastOctets = &value
|
||||
case "InBcastOctets":
|
||||
procNetstat.IpExt.InBcastOctets = &value
|
||||
procNetstat.InBcastOctets = &value
|
||||
case "OutBcastOctets":
|
||||
procNetstat.IpExt.OutBcastOctets = &value
|
||||
procNetstat.OutBcastOctets = &value
|
||||
case "InCsumErrors":
|
||||
procNetstat.IpExt.InCsumErrors = &value
|
||||
procNetstat.InCsumErrors = &value
|
||||
case "InNoECTPkts":
|
||||
procNetstat.IpExt.InNoECTPkts = &value
|
||||
procNetstat.InNoECTPkts = &value
|
||||
case "InECT1Pkts":
|
||||
procNetstat.IpExt.InECT1Pkts = &value
|
||||
procNetstat.InECT1Pkts = &value
|
||||
case "InECT0Pkts":
|
||||
procNetstat.IpExt.InECT0Pkts = &value
|
||||
procNetstat.InECT0Pkts = &value
|
||||
case "InCEPkts":
|
||||
procNetstat.IpExt.InCEPkts = &value
|
||||
procNetstat.InCEPkts = &value
|
||||
case "ReasmOverlaps":
|
||||
procNetstat.IpExt.ReasmOverlaps = &value
|
||||
procNetstat.ReasmOverlaps = &value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
4
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
4
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
@@ -19,7 +19,6 @@ package procfs
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -29,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// match the header line before each mapped zone in `/proc/pid/smaps`.
|
||||
// Match the header line before each mapped zone in `/proc/pid/smaps`.
|
||||
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
|
||||
)
|
||||
|
||||
@@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
|
||||
func (s *ProcSMapsRollup) parseLine(line string) error {
|
||||
kv := strings.SplitN(line, ":", 2)
|
||||
if len(kv) != 2 {
|
||||
fmt.Println(line)
|
||||
return errors.New("invalid net/dev line, missing colon")
|
||||
}
|
||||
|
||||
|
120
vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
120
vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
@@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
|
||||
case "Ip":
|
||||
switch key {
|
||||
case "Forwarding":
|
||||
procSnmp.Ip.Forwarding = &value
|
||||
procSnmp.Forwarding = &value
|
||||
case "DefaultTTL":
|
||||
procSnmp.Ip.DefaultTTL = &value
|
||||
procSnmp.DefaultTTL = &value
|
||||
case "InReceives":
|
||||
procSnmp.Ip.InReceives = &value
|
||||
procSnmp.InReceives = &value
|
||||
case "InHdrErrors":
|
||||
procSnmp.Ip.InHdrErrors = &value
|
||||
procSnmp.InHdrErrors = &value
|
||||
case "InAddrErrors":
|
||||
procSnmp.Ip.InAddrErrors = &value
|
||||
procSnmp.InAddrErrors = &value
|
||||
case "ForwDatagrams":
|
||||
procSnmp.Ip.ForwDatagrams = &value
|
||||
procSnmp.ForwDatagrams = &value
|
||||
case "InUnknownProtos":
|
||||
procSnmp.Ip.InUnknownProtos = &value
|
||||
procSnmp.InUnknownProtos = &value
|
||||
case "InDiscards":
|
||||
procSnmp.Ip.InDiscards = &value
|
||||
procSnmp.InDiscards = &value
|
||||
case "InDelivers":
|
||||
procSnmp.Ip.InDelivers = &value
|
||||
procSnmp.InDelivers = &value
|
||||
case "OutRequests":
|
||||
procSnmp.Ip.OutRequests = &value
|
||||
procSnmp.OutRequests = &value
|
||||
case "OutDiscards":
|
||||
procSnmp.Ip.OutDiscards = &value
|
||||
procSnmp.OutDiscards = &value
|
||||
case "OutNoRoutes":
|
||||
procSnmp.Ip.OutNoRoutes = &value
|
||||
procSnmp.OutNoRoutes = &value
|
||||
case "ReasmTimeout":
|
||||
procSnmp.Ip.ReasmTimeout = &value
|
||||
procSnmp.ReasmTimeout = &value
|
||||
case "ReasmReqds":
|
||||
procSnmp.Ip.ReasmReqds = &value
|
||||
procSnmp.ReasmReqds = &value
|
||||
case "ReasmOKs":
|
||||
procSnmp.Ip.ReasmOKs = &value
|
||||
procSnmp.ReasmOKs = &value
|
||||
case "ReasmFails":
|
||||
procSnmp.Ip.ReasmFails = &value
|
||||
procSnmp.ReasmFails = &value
|
||||
case "FragOKs":
|
||||
procSnmp.Ip.FragOKs = &value
|
||||
procSnmp.FragOKs = &value
|
||||
case "FragFails":
|
||||
procSnmp.Ip.FragFails = &value
|
||||
procSnmp.FragFails = &value
|
||||
case "FragCreates":
|
||||
procSnmp.Ip.FragCreates = &value
|
||||
procSnmp.FragCreates = &value
|
||||
}
|
||||
case "Icmp":
|
||||
switch key {
|
||||
case "InMsgs":
|
||||
procSnmp.Icmp.InMsgs = &value
|
||||
procSnmp.InMsgs = &value
|
||||
case "InErrors":
|
||||
procSnmp.Icmp.InErrors = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp.Icmp.InCsumErrors = &value
|
||||
case "InDestUnreachs":
|
||||
procSnmp.Icmp.InDestUnreachs = &value
|
||||
procSnmp.InDestUnreachs = &value
|
||||
case "InTimeExcds":
|
||||
procSnmp.Icmp.InTimeExcds = &value
|
||||
procSnmp.InTimeExcds = &value
|
||||
case "InParmProbs":
|
||||
procSnmp.Icmp.InParmProbs = &value
|
||||
procSnmp.InParmProbs = &value
|
||||
case "InSrcQuenchs":
|
||||
procSnmp.Icmp.InSrcQuenchs = &value
|
||||
procSnmp.InSrcQuenchs = &value
|
||||
case "InRedirects":
|
||||
procSnmp.Icmp.InRedirects = &value
|
||||
procSnmp.InRedirects = &value
|
||||
case "InEchos":
|
||||
procSnmp.Icmp.InEchos = &value
|
||||
procSnmp.InEchos = &value
|
||||
case "InEchoReps":
|
||||
procSnmp.Icmp.InEchoReps = &value
|
||||
procSnmp.InEchoReps = &value
|
||||
case "InTimestamps":
|
||||
procSnmp.Icmp.InTimestamps = &value
|
||||
procSnmp.InTimestamps = &value
|
||||
case "InTimestampReps":
|
||||
procSnmp.Icmp.InTimestampReps = &value
|
||||
procSnmp.InTimestampReps = &value
|
||||
case "InAddrMasks":
|
||||
procSnmp.Icmp.InAddrMasks = &value
|
||||
procSnmp.InAddrMasks = &value
|
||||
case "InAddrMaskReps":
|
||||
procSnmp.Icmp.InAddrMaskReps = &value
|
||||
procSnmp.InAddrMaskReps = &value
|
||||
case "OutMsgs":
|
||||
procSnmp.Icmp.OutMsgs = &value
|
||||
procSnmp.OutMsgs = &value
|
||||
case "OutErrors":
|
||||
procSnmp.Icmp.OutErrors = &value
|
||||
procSnmp.OutErrors = &value
|
||||
case "OutDestUnreachs":
|
||||
procSnmp.Icmp.OutDestUnreachs = &value
|
||||
procSnmp.OutDestUnreachs = &value
|
||||
case "OutTimeExcds":
|
||||
procSnmp.Icmp.OutTimeExcds = &value
|
||||
procSnmp.OutTimeExcds = &value
|
||||
case "OutParmProbs":
|
||||
procSnmp.Icmp.OutParmProbs = &value
|
||||
procSnmp.OutParmProbs = &value
|
||||
case "OutSrcQuenchs":
|
||||
procSnmp.Icmp.OutSrcQuenchs = &value
|
||||
procSnmp.OutSrcQuenchs = &value
|
||||
case "OutRedirects":
|
||||
procSnmp.Icmp.OutRedirects = &value
|
||||
procSnmp.OutRedirects = &value
|
||||
case "OutEchos":
|
||||
procSnmp.Icmp.OutEchos = &value
|
||||
procSnmp.OutEchos = &value
|
||||
case "OutEchoReps":
|
||||
procSnmp.Icmp.OutEchoReps = &value
|
||||
procSnmp.OutEchoReps = &value
|
||||
case "OutTimestamps":
|
||||
procSnmp.Icmp.OutTimestamps = &value
|
||||
procSnmp.OutTimestamps = &value
|
||||
case "OutTimestampReps":
|
||||
procSnmp.Icmp.OutTimestampReps = &value
|
||||
procSnmp.OutTimestampReps = &value
|
||||
case "OutAddrMasks":
|
||||
procSnmp.Icmp.OutAddrMasks = &value
|
||||
procSnmp.OutAddrMasks = &value
|
||||
case "OutAddrMaskReps":
|
||||
procSnmp.Icmp.OutAddrMaskReps = &value
|
||||
procSnmp.OutAddrMaskReps = &value
|
||||
}
|
||||
case "IcmpMsg":
|
||||
switch key {
|
||||
case "InType3":
|
||||
procSnmp.IcmpMsg.InType3 = &value
|
||||
procSnmp.InType3 = &value
|
||||
case "OutType3":
|
||||
procSnmp.IcmpMsg.OutType3 = &value
|
||||
procSnmp.OutType3 = &value
|
||||
}
|
||||
case "Tcp":
|
||||
switch key {
|
||||
case "RtoAlgorithm":
|
||||
procSnmp.Tcp.RtoAlgorithm = &value
|
||||
procSnmp.RtoAlgorithm = &value
|
||||
case "RtoMin":
|
||||
procSnmp.Tcp.RtoMin = &value
|
||||
procSnmp.RtoMin = &value
|
||||
case "RtoMax":
|
||||
procSnmp.Tcp.RtoMax = &value
|
||||
procSnmp.RtoMax = &value
|
||||
case "MaxConn":
|
||||
procSnmp.Tcp.MaxConn = &value
|
||||
procSnmp.MaxConn = &value
|
||||
case "ActiveOpens":
|
||||
procSnmp.Tcp.ActiveOpens = &value
|
||||
procSnmp.ActiveOpens = &value
|
||||
case "PassiveOpens":
|
||||
procSnmp.Tcp.PassiveOpens = &value
|
||||
procSnmp.PassiveOpens = &value
|
||||
case "AttemptFails":
|
||||
procSnmp.Tcp.AttemptFails = &value
|
||||
procSnmp.AttemptFails = &value
|
||||
case "EstabResets":
|
||||
procSnmp.Tcp.EstabResets = &value
|
||||
procSnmp.EstabResets = &value
|
||||
case "CurrEstab":
|
||||
procSnmp.Tcp.CurrEstab = &value
|
||||
procSnmp.CurrEstab = &value
|
||||
case "InSegs":
|
||||
procSnmp.Tcp.InSegs = &value
|
||||
procSnmp.InSegs = &value
|
||||
case "OutSegs":
|
||||
procSnmp.Tcp.OutSegs = &value
|
||||
procSnmp.OutSegs = &value
|
||||
case "RetransSegs":
|
||||
procSnmp.Tcp.RetransSegs = &value
|
||||
procSnmp.RetransSegs = &value
|
||||
case "InErrs":
|
||||
procSnmp.Tcp.InErrs = &value
|
||||
procSnmp.InErrs = &value
|
||||
case "OutRsts":
|
||||
procSnmp.Tcp.OutRsts = &value
|
||||
procSnmp.OutRsts = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp.Tcp.InCsumErrors = &value
|
||||
}
|
||||
|
150
vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
150
vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
@@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
||||
case "Ip6":
|
||||
switch key {
|
||||
case "InReceives":
|
||||
procSnmp6.Ip6.InReceives = &value
|
||||
procSnmp6.InReceives = &value
|
||||
case "InHdrErrors":
|
||||
procSnmp6.Ip6.InHdrErrors = &value
|
||||
procSnmp6.InHdrErrors = &value
|
||||
case "InTooBigErrors":
|
||||
procSnmp6.Ip6.InTooBigErrors = &value
|
||||
procSnmp6.InTooBigErrors = &value
|
||||
case "InNoRoutes":
|
||||
procSnmp6.Ip6.InNoRoutes = &value
|
||||
procSnmp6.InNoRoutes = &value
|
||||
case "InAddrErrors":
|
||||
procSnmp6.Ip6.InAddrErrors = &value
|
||||
procSnmp6.InAddrErrors = &value
|
||||
case "InUnknownProtos":
|
||||
procSnmp6.Ip6.InUnknownProtos = &value
|
||||
procSnmp6.InUnknownProtos = &value
|
||||
case "InTruncatedPkts":
|
||||
procSnmp6.Ip6.InTruncatedPkts = &value
|
||||
procSnmp6.InTruncatedPkts = &value
|
||||
case "InDiscards":
|
||||
procSnmp6.Ip6.InDiscards = &value
|
||||
procSnmp6.InDiscards = &value
|
||||
case "InDelivers":
|
||||
procSnmp6.Ip6.InDelivers = &value
|
||||
procSnmp6.InDelivers = &value
|
||||
case "OutForwDatagrams":
|
||||
procSnmp6.Ip6.OutForwDatagrams = &value
|
||||
procSnmp6.OutForwDatagrams = &value
|
||||
case "OutRequests":
|
||||
procSnmp6.Ip6.OutRequests = &value
|
||||
procSnmp6.OutRequests = &value
|
||||
case "OutDiscards":
|
||||
procSnmp6.Ip6.OutDiscards = &value
|
||||
procSnmp6.OutDiscards = &value
|
||||
case "OutNoRoutes":
|
||||
procSnmp6.Ip6.OutNoRoutes = &value
|
||||
procSnmp6.OutNoRoutes = &value
|
||||
case "ReasmTimeout":
|
||||
procSnmp6.Ip6.ReasmTimeout = &value
|
||||
procSnmp6.ReasmTimeout = &value
|
||||
case "ReasmReqds":
|
||||
procSnmp6.Ip6.ReasmReqds = &value
|
||||
procSnmp6.ReasmReqds = &value
|
||||
case "ReasmOKs":
|
||||
procSnmp6.Ip6.ReasmOKs = &value
|
||||
procSnmp6.ReasmOKs = &value
|
||||
case "ReasmFails":
|
||||
procSnmp6.Ip6.ReasmFails = &value
|
||||
procSnmp6.ReasmFails = &value
|
||||
case "FragOKs":
|
||||
procSnmp6.Ip6.FragOKs = &value
|
||||
procSnmp6.FragOKs = &value
|
||||
case "FragFails":
|
||||
procSnmp6.Ip6.FragFails = &value
|
||||
procSnmp6.FragFails = &value
|
||||
case "FragCreates":
|
||||
procSnmp6.Ip6.FragCreates = &value
|
||||
procSnmp6.FragCreates = &value
|
||||
case "InMcastPkts":
|
||||
procSnmp6.Ip6.InMcastPkts = &value
|
||||
procSnmp6.InMcastPkts = &value
|
||||
case "OutMcastPkts":
|
||||
procSnmp6.Ip6.OutMcastPkts = &value
|
||||
procSnmp6.OutMcastPkts = &value
|
||||
case "InOctets":
|
||||
procSnmp6.Ip6.InOctets = &value
|
||||
procSnmp6.InOctets = &value
|
||||
case "OutOctets":
|
||||
procSnmp6.Ip6.OutOctets = &value
|
||||
procSnmp6.OutOctets = &value
|
||||
case "InMcastOctets":
|
||||
procSnmp6.Ip6.InMcastOctets = &value
|
||||
procSnmp6.InMcastOctets = &value
|
||||
case "OutMcastOctets":
|
||||
procSnmp6.Ip6.OutMcastOctets = &value
|
||||
procSnmp6.OutMcastOctets = &value
|
||||
case "InBcastOctets":
|
||||
procSnmp6.Ip6.InBcastOctets = &value
|
||||
procSnmp6.InBcastOctets = &value
|
||||
case "OutBcastOctets":
|
||||
procSnmp6.Ip6.OutBcastOctets = &value
|
||||
procSnmp6.OutBcastOctets = &value
|
||||
case "InNoECTPkts":
|
||||
procSnmp6.Ip6.InNoECTPkts = &value
|
||||
procSnmp6.InNoECTPkts = &value
|
||||
case "InECT1Pkts":
|
||||
procSnmp6.Ip6.InECT1Pkts = &value
|
||||
procSnmp6.InECT1Pkts = &value
|
||||
case "InECT0Pkts":
|
||||
procSnmp6.Ip6.InECT0Pkts = &value
|
||||
procSnmp6.InECT0Pkts = &value
|
||||
case "InCEPkts":
|
||||
procSnmp6.Ip6.InCEPkts = &value
|
||||
procSnmp6.InCEPkts = &value
|
||||
|
||||
}
|
||||
case "Icmp6":
|
||||
switch key {
|
||||
case "InMsgs":
|
||||
procSnmp6.Icmp6.InMsgs = &value
|
||||
procSnmp6.InMsgs = &value
|
||||
case "InErrors":
|
||||
procSnmp6.Icmp6.InErrors = &value
|
||||
case "OutMsgs":
|
||||
procSnmp6.Icmp6.OutMsgs = &value
|
||||
procSnmp6.OutMsgs = &value
|
||||
case "OutErrors":
|
||||
procSnmp6.Icmp6.OutErrors = &value
|
||||
procSnmp6.OutErrors = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp6.Icmp6.InCsumErrors = &value
|
||||
case "InDestUnreachs":
|
||||
procSnmp6.Icmp6.InDestUnreachs = &value
|
||||
procSnmp6.InDestUnreachs = &value
|
||||
case "InPktTooBigs":
|
||||
procSnmp6.Icmp6.InPktTooBigs = &value
|
||||
procSnmp6.InPktTooBigs = &value
|
||||
case "InTimeExcds":
|
||||
procSnmp6.Icmp6.InTimeExcds = &value
|
||||
procSnmp6.InTimeExcds = &value
|
||||
case "InParmProblems":
|
||||
procSnmp6.Icmp6.InParmProblems = &value
|
||||
procSnmp6.InParmProblems = &value
|
||||
case "InEchos":
|
||||
procSnmp6.Icmp6.InEchos = &value
|
||||
procSnmp6.InEchos = &value
|
||||
case "InEchoReplies":
|
||||
procSnmp6.Icmp6.InEchoReplies = &value
|
||||
procSnmp6.InEchoReplies = &value
|
||||
case "InGroupMembQueries":
|
||||
procSnmp6.Icmp6.InGroupMembQueries = &value
|
||||
procSnmp6.InGroupMembQueries = &value
|
||||
case "InGroupMembResponses":
|
||||
procSnmp6.Icmp6.InGroupMembResponses = &value
|
||||
procSnmp6.InGroupMembResponses = &value
|
||||
case "InGroupMembReductions":
|
||||
procSnmp6.Icmp6.InGroupMembReductions = &value
|
||||
procSnmp6.InGroupMembReductions = &value
|
||||
case "InRouterSolicits":
|
||||
procSnmp6.Icmp6.InRouterSolicits = &value
|
||||
procSnmp6.InRouterSolicits = &value
|
||||
case "InRouterAdvertisements":
|
||||
procSnmp6.Icmp6.InRouterAdvertisements = &value
|
||||
procSnmp6.InRouterAdvertisements = &value
|
||||
case "InNeighborSolicits":
|
||||
procSnmp6.Icmp6.InNeighborSolicits = &value
|
||||
procSnmp6.InNeighborSolicits = &value
|
||||
case "InNeighborAdvertisements":
|
||||
procSnmp6.Icmp6.InNeighborAdvertisements = &value
|
||||
procSnmp6.InNeighborAdvertisements = &value
|
||||
case "InRedirects":
|
||||
procSnmp6.Icmp6.InRedirects = &value
|
||||
procSnmp6.InRedirects = &value
|
||||
case "InMLDv2Reports":
|
||||
procSnmp6.Icmp6.InMLDv2Reports = &value
|
||||
procSnmp6.InMLDv2Reports = &value
|
||||
case "OutDestUnreachs":
|
||||
procSnmp6.Icmp6.OutDestUnreachs = &value
|
||||
procSnmp6.OutDestUnreachs = &value
|
||||
case "OutPktTooBigs":
|
||||
procSnmp6.Icmp6.OutPktTooBigs = &value
|
||||
procSnmp6.OutPktTooBigs = &value
|
||||
case "OutTimeExcds":
|
||||
procSnmp6.Icmp6.OutTimeExcds = &value
|
||||
procSnmp6.OutTimeExcds = &value
|
||||
case "OutParmProblems":
|
||||
procSnmp6.Icmp6.OutParmProblems = &value
|
||||
procSnmp6.OutParmProblems = &value
|
||||
case "OutEchos":
|
||||
procSnmp6.Icmp6.OutEchos = &value
|
||||
procSnmp6.OutEchos = &value
|
||||
case "OutEchoReplies":
|
||||
procSnmp6.Icmp6.OutEchoReplies = &value
|
||||
procSnmp6.OutEchoReplies = &value
|
||||
case "OutGroupMembQueries":
|
||||
procSnmp6.Icmp6.OutGroupMembQueries = &value
|
||||
procSnmp6.OutGroupMembQueries = &value
|
||||
case "OutGroupMembResponses":
|
||||
procSnmp6.Icmp6.OutGroupMembResponses = &value
|
||||
procSnmp6.OutGroupMembResponses = &value
|
||||
case "OutGroupMembReductions":
|
||||
procSnmp6.Icmp6.OutGroupMembReductions = &value
|
||||
procSnmp6.OutGroupMembReductions = &value
|
||||
case "OutRouterSolicits":
|
||||
procSnmp6.Icmp6.OutRouterSolicits = &value
|
||||
procSnmp6.OutRouterSolicits = &value
|
||||
case "OutRouterAdvertisements":
|
||||
procSnmp6.Icmp6.OutRouterAdvertisements = &value
|
||||
procSnmp6.OutRouterAdvertisements = &value
|
||||
case "OutNeighborSolicits":
|
||||
procSnmp6.Icmp6.OutNeighborSolicits = &value
|
||||
procSnmp6.OutNeighborSolicits = &value
|
||||
case "OutNeighborAdvertisements":
|
||||
procSnmp6.Icmp6.OutNeighborAdvertisements = &value
|
||||
procSnmp6.OutNeighborAdvertisements = &value
|
||||
case "OutRedirects":
|
||||
procSnmp6.Icmp6.OutRedirects = &value
|
||||
procSnmp6.OutRedirects = &value
|
||||
case "OutMLDv2Reports":
|
||||
procSnmp6.Icmp6.OutMLDv2Reports = &value
|
||||
procSnmp6.OutMLDv2Reports = &value
|
||||
case "InType1":
|
||||
procSnmp6.Icmp6.InType1 = &value
|
||||
procSnmp6.InType1 = &value
|
||||
case "InType134":
|
||||
procSnmp6.Icmp6.InType134 = &value
|
||||
procSnmp6.InType134 = &value
|
||||
case "InType135":
|
||||
procSnmp6.Icmp6.InType135 = &value
|
||||
procSnmp6.InType135 = &value
|
||||
case "InType136":
|
||||
procSnmp6.Icmp6.InType136 = &value
|
||||
procSnmp6.InType136 = &value
|
||||
case "InType143":
|
||||
procSnmp6.Icmp6.InType143 = &value
|
||||
procSnmp6.InType143 = &value
|
||||
case "OutType133":
|
||||
procSnmp6.Icmp6.OutType133 = &value
|
||||
procSnmp6.OutType133 = &value
|
||||
case "OutType135":
|
||||
procSnmp6.Icmp6.OutType135 = &value
|
||||
procSnmp6.OutType135 = &value
|
||||
case "OutType136":
|
||||
procSnmp6.Icmp6.OutType136 = &value
|
||||
procSnmp6.OutType136 = &value
|
||||
case "OutType143":
|
||||
procSnmp6.Icmp6.OutType143 = &value
|
||||
procSnmp6.OutType143 = &value
|
||||
}
|
||||
case "Udp6":
|
||||
switch key {
|
||||
@@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
||||
case "InCsumErrors":
|
||||
procSnmp6.Udp6.InCsumErrors = &value
|
||||
case "IgnoredMulti":
|
||||
procSnmp6.Udp6.IgnoredMulti = &value
|
||||
procSnmp6.IgnoredMulti = &value
|
||||
}
|
||||
case "UdpLite6":
|
||||
switch key {
|
||||
|
18
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
18
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
@@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
|
||||
}
|
||||
}
|
||||
case "NSpid":
|
||||
s.NSpids = calcNSPidsList(vString)
|
||||
nspids, err := calcNSPidsList(vString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.NSpids = nspids
|
||||
case "VmPeak":
|
||||
s.VmPeak = vUintBytes
|
||||
case "VmSize":
|
||||
@@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 {
|
||||
return g
|
||||
}
|
||||
|
||||
func calcNSPidsList(nspidsString string) []uint64 {
|
||||
s := strings.Split(nspidsString, " ")
|
||||
func calcNSPidsList(nspidsString string) ([]uint64, error) {
|
||||
s := strings.Split(nspidsString, "\t")
|
||||
var nspids []uint64
|
||||
|
||||
for _, nspid := range s {
|
||||
nspid, _ := strconv.ParseUint(nspid, 10, 64)
|
||||
if nspid == 0 {
|
||||
continue
|
||||
nspid, err := strconv.ParseUint(nspid, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nspids = append(nspids, nspid)
|
||||
}
|
||||
|
||||
return nspids
|
||||
return nspids, nil
|
||||
}
|
||||
|
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
@@ -21,7 +21,7 @@ import (
|
||||
)
|
||||
|
||||
func sysctlToPath(sysctl string) string {
|
||||
return strings.Replace(sysctl, ".", "/", -1)
|
||||
return strings.ReplaceAll(sysctl, ".", "/")
|
||||
}
|
||||
|
||||
func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
|
||||
|
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
@@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case parts[0] == "HI:":
|
||||
switch parts[0] {
|
||||
case "HI:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Hi = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TIMER:":
|
||||
case "TIMER:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Timer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_TX:":
|
||||
case "NET_TX:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.NetTx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_RX:":
|
||||
case "NET_RX:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.NetRx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "BLOCK:":
|
||||
case "BLOCK:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Block = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "IRQ_POLL:":
|
||||
case "IRQ_POLL:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.IRQPoll = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TASKLET:":
|
||||
case "TASKLET:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Tasklet = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "SCHED:":
|
||||
case "SCHED:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Sched = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "HRTIMER:":
|
||||
case "HRTIMER:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.HRTimer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
@@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "RCU:":
|
||||
case "RCU:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.RCU = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
2
vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md
generated
vendored
2
vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md
generated
vendored
@@ -1,4 +1,4 @@
|
||||
## Summary
|
||||
# Prometheus Benchmarks
|
||||
|
||||
Using the Prometheus bridge and the OTLP exporter adds roughly ~50% to the CPU and memory overhead of an application compared to serving a Prometheus HTTP endpoint for metrics.
|
||||
|
||||
|
9
vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go
generated
vendored
9
vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go
generated
vendored
@@ -5,13 +5,6 @@ package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime"
|
||||
|
||||
// Version is the current release version of the runtime instrumentation.
|
||||
func Version() string {
|
||||
return "0.60.0"
|
||||
return "0.61.0"
|
||||
// This string is updated by the pre_release.sh script during release
|
||||
}
|
||||
|
||||
// SemVersion is the semantic version to be supplied to tracer/meter creation.
|
||||
//
|
||||
// Deprecated: Use [Version] instead.
|
||||
func SemVersion() string {
|
||||
return Version()
|
||||
}
|
||||
|
33
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go
generated
vendored
33
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go
generated
vendored
@@ -13,6 +13,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
@@ -359,8 +360,9 @@ func WithTimeout(duration time.Duration) Option {
|
||||
// explicitly returns a backoff time in the response, that time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// These settings do not define any network retry strategy. That is entirely
|
||||
// handled by the gRPC ClientConn.
|
||||
// These settings define the retry strategy implemented by the exporter.
|
||||
// These settings do not define any network retry strategy.
|
||||
// That is handled by the gRPC ClientConn.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
@@ -442,13 +444,15 @@ func convHeaders(s string) (map[string]string, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
escKey, e := url.PathUnescape(rawKey)
|
||||
if e != nil {
|
||||
key := strings.TrimSpace(rawKey)
|
||||
|
||||
// Validate the key.
|
||||
if !isValidHeaderKey(key) {
|
||||
err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey))
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(escKey)
|
||||
|
||||
// Only decode the value.
|
||||
escVal, e := url.PathUnescape(rawVal)
|
||||
if e != nil {
|
||||
err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal))
|
||||
@@ -651,3 +655,22 @@ func fallback[T any](val T) resolver[T] {
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
func isValidHeaderKey(key string) bool {
|
||||
if key == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range key {
|
||||
if !isTokenChar(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isTokenChar(c rune) bool {
|
||||
return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
|
||||
unicode.IsDigit(c) ||
|
||||
c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
|
||||
c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/cenkalti/backoff/v5"
|
||||
)
|
||||
|
||||
// DefaultConfig are the recommended defaults to use.
|
||||
@@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: c.MaxInterval,
|
||||
MaxElapsedTime: c.MaxElapsedTime,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
|
||||
maxElapsedTime := c.MaxElapsedTime
|
||||
startTime := time.Now()
|
||||
|
||||
for {
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
@@ -94,22 +94,18 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
return err
|
||||
}
|
||||
|
||||
bOff := b.NextBackOff()
|
||||
if bOff == backoff.Stop {
|
||||
if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||
}
|
||||
|
||||
// Wait for the greater of the backoff or throttle delay.
|
||||
var delay time.Duration
|
||||
if bOff > throttle {
|
||||
delay = bOff
|
||||
} else {
|
||||
elapsed := b.GetElapsedTime()
|
||||
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||
bOff := b.NextBackOff()
|
||||
delay := max(throttle, bOff)
|
||||
|
||||
elapsed := time.Since(startTime)
|
||||
if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
delay = throttle
|
||||
}
|
||||
|
||||
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||
return fmt.Errorf("%w: %w", ctxErr, err)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlplog/transform/log.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -257,7 +257,7 @@ func stringSliceValues(vals []string) []*cpb.AnyValue {
|
||||
return converted
|
||||
}
|
||||
|
||||
// Attrs transforms a slice of [api.KeyValue] into OTLP key-values.
|
||||
// LogAttrs transforms a slice of [api.KeyValue] into OTLP key-values.
|
||||
func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue {
|
||||
if len(attrs) == 0 {
|
||||
return nil
|
||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go
generated
vendored
@@ -5,5 +5,5 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o
|
||||
|
||||
// Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use.
|
||||
func Version() string {
|
||||
return "0.11.0"
|
||||
return "0.12.2"
|
||||
}
|
||||
|
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go
generated
vendored
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go
generated
vendored
@@ -44,7 +44,9 @@ func newNoopClient() *client {
|
||||
|
||||
// newHTTPClient creates a new HTTP log client.
|
||||
func newHTTPClient(cfg config) (*client, error) {
|
||||
hc := &http.Client{
|
||||
hc := cfg.httpClient
|
||||
if hc == nil {
|
||||
hc = &http.Client{
|
||||
Transport: ourTransport,
|
||||
Timeout: cfg.timeout.Value,
|
||||
}
|
||||
@@ -60,6 +62,7 @@ func newHTTPClient(cfg config) (*client, error) {
|
||||
clonedTransport.Proxy = cfg.proxy.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: "https",
|
||||
|
48
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go
generated
vendored
48
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry"
|
||||
@@ -94,6 +95,7 @@ type config struct {
|
||||
timeout setting[time.Duration]
|
||||
proxy setting[HTTPTransportProxyFunc]
|
||||
retryCfg setting[retry.Config]
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
func newConfig(options []Option) config {
|
||||
@@ -343,6 +345,25 @@ func WithProxy(pf HTTPTransportProxyFunc) Option {
|
||||
})
|
||||
}
|
||||
|
||||
// WithHTTPClient sets the HTTP client to used by the exporter.
|
||||
//
|
||||
// This option will take precedence over [WithProxy], [WithTimeout],
|
||||
// [WithTLSClientConfig] options as well as OTEL_EXPORTER_OTLP_CERTIFICATE,
|
||||
// OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE, OTEL_EXPORTER_OTLP_TIMEOUT,
|
||||
// OTEL_EXPORTER_OTLP_LOGS_TIMEOUT environment variables.
|
||||
//
|
||||
// Timeout and all other fields of the passed [http.Client] are left intact.
|
||||
//
|
||||
// Be aware that passing an HTTP client with transport like
|
||||
// [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.NewTransport] can
|
||||
// cause the client to be instrumented twice and cause infinite recursion.
|
||||
func WithHTTPClient(c *http.Client) Option {
|
||||
return fnOpt(func(cfg config) config {
|
||||
cfg.httpClient = c
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// setting is a configuration setting value.
|
||||
type setting[T any] struct {
|
||||
Value T
|
||||
@@ -544,13 +565,15 @@ func convHeaders(s string) (map[string]string, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
escKey, e := url.PathUnescape(rawKey)
|
||||
if e != nil {
|
||||
key := strings.TrimSpace(rawKey)
|
||||
|
||||
// Validate the key.
|
||||
if !isValidHeaderKey(key) {
|
||||
err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey))
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(escKey)
|
||||
|
||||
// Only decode the value.
|
||||
escVal, e := url.PathUnescape(rawVal)
|
||||
if e != nil {
|
||||
err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal))
|
||||
@@ -600,3 +623,22 @@ func fallback[T any](val T) resolver[T] {
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
func isValidHeaderKey(key string) bool {
|
||||
if key == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range key {
|
||||
if !isTokenChar(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isTokenChar(c rune) bool {
|
||||
return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
|
||||
unicode.IsDigit(c) ||
|
||||
c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
|
||||
c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/cenkalti/backoff/v5"
|
||||
)
|
||||
|
||||
// DefaultConfig are the recommended defaults to use.
|
||||
@@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: c.MaxInterval,
|
||||
MaxElapsedTime: c.MaxElapsedTime,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
|
||||
maxElapsedTime := c.MaxElapsedTime
|
||||
startTime := time.Now()
|
||||
|
||||
for {
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
@@ -94,22 +94,18 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
return err
|
||||
}
|
||||
|
||||
bOff := b.NextBackOff()
|
||||
if bOff == backoff.Stop {
|
||||
if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||
}
|
||||
|
||||
// Wait for the greater of the backoff or throttle delay.
|
||||
var delay time.Duration
|
||||
if bOff > throttle {
|
||||
delay = bOff
|
||||
} else {
|
||||
elapsed := b.GetElapsedTime()
|
||||
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||
bOff := b.NextBackOff()
|
||||
delay := max(throttle, bOff)
|
||||
|
||||
elapsed := time.Since(startTime)
|
||||
if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
delay = throttle
|
||||
}
|
||||
|
||||
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||
return fmt.Errorf("%w: %w", ctxErr, err)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlplog/transform/log.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -257,7 +257,7 @@ func stringSliceValues(vals []string) []*cpb.AnyValue {
|
||||
return converted
|
||||
}
|
||||
|
||||
// Attrs transforms a slice of [api.KeyValue] into OTLP key-values.
|
||||
// LogAttrs transforms a slice of [api.KeyValue] into OTLP key-values.
|
||||
func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue {
|
||||
if len(attrs) == 0 {
|
||||
return nil
|
||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go
generated
vendored
@@ -5,5 +5,5 @@ package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o
|
||||
|
||||
// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use.
|
||||
func Version() string {
|
||||
return "0.11.0"
|
||||
return "0.12.2"
|
||||
}
|
||||
|
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
@@ -238,8 +238,9 @@ func WithTimeout(duration time.Duration) Option {
|
||||
// explicitly returns a backoff time in the response, that time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// These settings do not define any network retry strategy. That is entirely
|
||||
// handled by the gRPC ClientConn.
|
||||
// These settings define the retry strategy implemented by the exporter.
|
||||
// These settings do not define any network retry strategy.
|
||||
// That is handled by the gRPC ClientConn.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
|
@@ -1,9 +1,11 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package envconfig provides functionality to parse configuration from
|
||||
// environment variables.
|
||||
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
|
||||
|
||||
import (
|
||||
|
@@ -1,6 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package internal provides internal functionally for the otlpmetricgrpc package.
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -80,8 +80,16 @@ func getOptionsFromEnv() []GenericOption {
|
||||
}),
|
||||
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert(
|
||||
"CLIENT_CERTIFICATE",
|
||||
"CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
envconfig.WithClientCert(
|
||||
"METRICS_CLIENT_CERTIFICATE",
|
||||
"METRICS_CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||
@@ -91,8 +99,14 @@ func getOptionsFromEnv() []GenericOption {
|
||||
WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||
envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||
withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
|
||||
withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
|
||||
withEnvTemporalityPreference(
|
||||
"METRICS_TEMPORALITY_PREFERENCE",
|
||||
func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) },
|
||||
),
|
||||
withEnvAggPreference(
|
||||
"METRICS_DEFAULT_HISTOGRAM_AGGREGATION",
|
||||
func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) },
|
||||
),
|
||||
)
|
||||
|
||||
return opts
|
||||
@@ -157,7 +171,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector))
|
||||
case "lowmemory":
|
||||
fn(lowMemory)
|
||||
default:
|
||||
global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
|
||||
global.Warn(
|
||||
"OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.",
|
||||
"value",
|
||||
s,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -203,7 +221,11 @@ func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e
|
||||
return metric.DefaultAggregationSelector(kind)
|
||||
})
|
||||
default:
|
||||
global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
|
||||
global.Warn(
|
||||
"OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.",
|
||||
"value",
|
||||
s,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,9 +1,10 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package oconf provides configuration for the otlpmetric exporters.
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||
|
||||
import (
|
||||
@@ -56,13 +57,15 @@ type (
|
||||
Timeout time.Duration
|
||||
URLPath string
|
||||
|
||||
// gRPC configurations
|
||||
GRPCCredentials credentials.TransportCredentials
|
||||
|
||||
TemporalitySelector metric.TemporalitySelector
|
||||
AggregationSelector metric.AggregationSelector
|
||||
|
||||
// gRPC configurations
|
||||
GRPCCredentials credentials.TransportCredentials
|
||||
|
||||
// HTTP configurations
|
||||
Proxy HTTPTransportProxyFunc
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
Config struct {
|
||||
@@ -372,3 +375,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithHTTPClient(c *http.Client) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.HTTPClient = c
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/partialsuccess.go
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/cenkalti/backoff/v5"
|
||||
)
|
||||
|
||||
// DefaultConfig are the recommended defaults to use.
|
||||
@@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: c.MaxInterval,
|
||||
MaxElapsedTime: c.MaxElapsedTime,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
|
||||
maxElapsedTime := c.MaxElapsedTime
|
||||
startTime := time.Now()
|
||||
|
||||
for {
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
@@ -94,22 +94,18 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
return err
|
||||
}
|
||||
|
||||
bOff := b.NextBackOff()
|
||||
if bOff == backoff.Stop {
|
||||
if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||
}
|
||||
|
||||
// Wait for the greater of the backoff or throttle delay.
|
||||
var delay time.Duration
|
||||
if bOff > throttle {
|
||||
delay = bOff
|
||||
} else {
|
||||
elapsed := b.GetElapsedTime()
|
||||
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||
bOff := b.NextBackOff()
|
||||
delay := max(throttle, bOff)
|
||||
|
||||
elapsed := time.Since(startTime)
|
||||
if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
delay = throttle
|
||||
}
|
||||
|
||||
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||
return fmt.Errorf("%w: %w", ctxErr, err)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -203,7 +203,9 @@ func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint
|
||||
|
||||
// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
|
||||
// returned if the temporality of h is unknown.
|
||||
func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
|
||||
func ExponentialHistogram[N int64 | float64](
|
||||
h metricdata.ExponentialHistogram[N],
|
||||
) (*mpb.Metric_ExponentialHistogram, error) {
|
||||
t, err := Temporality(h.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -218,7 +220,9 @@ func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N
|
||||
|
||||
// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
|
||||
// from dPts.
|
||||
func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
|
||||
func ExponentialHistogramDataPoints[N int64 | float64](
|
||||
dPts []metricdata.ExponentialHistogramDataPoint[N],
|
||||
) []*mpb.ExponentialHistogramDataPoint {
|
||||
out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
sum := float64(dPt.Sum)
|
||||
@@ -250,7 +254,9 @@ func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.Exponen
|
||||
|
||||
// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
|
||||
// from bucket.
|
||||
func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
|
||||
func ExponentialHistogramDataPointBuckets(
|
||||
bucket metricdata.ExponentialBucket,
|
||||
) *mpb.ExponentialHistogramDataPoint_Buckets {
|
||||
return &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||
Offset: bucket.Offset,
|
||||
BucketCounts: bucket.Counts,
|
||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
generated
vendored
@@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
|
||||
|
||||
// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
|
||||
func Version() string {
|
||||
return "1.35.0"
|
||||
return "1.36.0"
|
||||
}
|
||||
|
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
generated
vendored
@@ -55,7 +55,9 @@ var ourTransport = &http.Transport{
|
||||
|
||||
// newClient creates a new HTTP metric client.
|
||||
func newClient(cfg oconf.Config) (*client, error) {
|
||||
httpClient := &http.Client{
|
||||
httpClient := cfg.Metrics.HTTPClient
|
||||
if httpClient == nil {
|
||||
httpClient = &http.Client{
|
||||
Transport: ourTransport,
|
||||
Timeout: cfg.Metrics.Timeout,
|
||||
}
|
||||
@@ -71,6 +73,7 @@ func newClient(cfg oconf.Config) (*client, error) {
|
||||
clonedTransport.Proxy = cfg.Metrics.Proxy
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: "https",
|
||||
@@ -277,7 +280,7 @@ type request struct {
|
||||
// reset reinitializes the request Body and uses ctx for the request.
|
||||
func (r *request) reset(ctx context.Context) {
|
||||
r.Body = r.bodyReader()
|
||||
r.Request = r.Request.WithContext(ctx)
|
||||
r.Request = r.WithContext(ctx)
|
||||
}
|
||||
|
||||
// retryableError represents a request failure that can be retried.
|
||||
|
16
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
generated
vendored
16
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
generated
vendored
@@ -222,3 +222,19 @@ func WithAggregationSelector(selector metric.AggregationSelector) Option {
|
||||
func WithProxy(pf HTTPTransportProxyFunc) Option {
|
||||
return wrappedOption{oconf.WithProxy(oconf.HTTPTransportProxyFunc(pf))}
|
||||
}
|
||||
|
||||
// WithHTTPClient sets the HTTP client to used by the exporter.
|
||||
//
|
||||
// This option will take precedence over [WithProxy], [WithTimeout],
|
||||
// [WithTLSClientConfig] options as well as OTEL_EXPORTER_OTLP_CERTIFICATE,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE, OTEL_EXPORTER_OTLP_TIMEOUT,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT environment variables.
|
||||
//
|
||||
// Timeout and all other fields of the passed [http.Client] are left intact.
|
||||
//
|
||||
// Be aware that passing an HTTP client with transport like
|
||||
// [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.NewTransport] can
|
||||
// cause the client to be instrumented twice and cause infinite recursion.
|
||||
func WithHTTPClient(c *http.Client) Option {
|
||||
return wrappedOption{oconf.WithHTTPClient(c)}
|
||||
}
|
||||
|
@@ -1,9 +1,11 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package envconfig provides functionality to parse configuration from
|
||||
// environment variables.
|
||||
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig"
|
||||
|
||||
import (
|
||||
|
@@ -1,6 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package internal provides internal functionally for the otlpmetrichttp package.
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -80,8 +80,16 @@ func getOptionsFromEnv() []GenericOption {
|
||||
}),
|
||||
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert(
|
||||
"CLIENT_CERTIFICATE",
|
||||
"CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
envconfig.WithClientCert(
|
||||
"METRICS_CLIENT_CERTIFICATE",
|
||||
"METRICS_CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||
@@ -91,8 +99,14 @@ func getOptionsFromEnv() []GenericOption {
|
||||
WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||
envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||
withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
|
||||
withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
|
||||
withEnvTemporalityPreference(
|
||||
"METRICS_TEMPORALITY_PREFERENCE",
|
||||
func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) },
|
||||
),
|
||||
withEnvAggPreference(
|
||||
"METRICS_DEFAULT_HISTOGRAM_AGGREGATION",
|
||||
func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) },
|
||||
),
|
||||
)
|
||||
|
||||
return opts
|
||||
@@ -157,7 +171,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector))
|
||||
case "lowmemory":
|
||||
fn(lowMemory)
|
||||
default:
|
||||
global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
|
||||
global.Warn(
|
||||
"OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.",
|
||||
"value",
|
||||
s,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -203,7 +221,11 @@ func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e
|
||||
return metric.DefaultAggregationSelector(kind)
|
||||
})
|
||||
default:
|
||||
global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
|
||||
global.Warn(
|
||||
"OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.",
|
||||
"value",
|
||||
s,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,9 +1,10 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package oconf provides configuration for the otlpmetric exporters.
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
|
||||
|
||||
import (
|
||||
@@ -56,13 +57,15 @@ type (
|
||||
Timeout time.Duration
|
||||
URLPath string
|
||||
|
||||
// gRPC configurations
|
||||
GRPCCredentials credentials.TransportCredentials
|
||||
|
||||
TemporalitySelector metric.TemporalitySelector
|
||||
AggregationSelector metric.AggregationSelector
|
||||
|
||||
// gRPC configurations
|
||||
GRPCCredentials credentials.TransportCredentials
|
||||
|
||||
// HTTP configurations
|
||||
Proxy HTTPTransportProxyFunc
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
Config struct {
|
||||
@@ -372,3 +375,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithHTTPClient(c *http.Client) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.HTTPClient = c
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/partialsuccess.go
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/cenkalti/backoff/v5"
|
||||
)
|
||||
|
||||
// DefaultConfig are the recommended defaults to use.
|
||||
@@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: c.MaxInterval,
|
||||
MaxElapsedTime: c.MaxElapsedTime,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
|
||||
maxElapsedTime := c.MaxElapsedTime
|
||||
startTime := time.Now()
|
||||
|
||||
for {
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
@@ -94,22 +94,18 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
return err
|
||||
}
|
||||
|
||||
bOff := b.NextBackOff()
|
||||
if bOff == backoff.Stop {
|
||||
if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||
}
|
||||
|
||||
// Wait for the greater of the backoff or throttle delay.
|
||||
var delay time.Duration
|
||||
if bOff > throttle {
|
||||
delay = bOff
|
||||
} else {
|
||||
elapsed := b.GetElapsedTime()
|
||||
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||
bOff := b.NextBackOff()
|
||||
delay := max(throttle, bOff)
|
||||
|
||||
elapsed := time.Since(startTime)
|
||||
if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
delay = throttle
|
||||
}
|
||||
|
||||
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||
return fmt.Errorf("%w: %w", ctxErr, err)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -203,7 +203,9 @@ func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint
|
||||
|
||||
// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
|
||||
// returned if the temporality of h is unknown.
|
||||
func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
|
||||
func ExponentialHistogram[N int64 | float64](
|
||||
h metricdata.ExponentialHistogram[N],
|
||||
) (*mpb.Metric_ExponentialHistogram, error) {
|
||||
t, err := Temporality(h.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -218,7 +220,9 @@ func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N
|
||||
|
||||
// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
|
||||
// from dPts.
|
||||
func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
|
||||
func ExponentialHistogramDataPoints[N int64 | float64](
|
||||
dPts []metricdata.ExponentialHistogramDataPoint[N],
|
||||
) []*mpb.ExponentialHistogramDataPoint {
|
||||
out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
sum := float64(dPt.Sum)
|
||||
@@ -250,7 +254,9 @@ func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.Exponen
|
||||
|
||||
// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
|
||||
// from bucket.
|
||||
func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
|
||||
func ExponentialHistogramDataPointBuckets(
|
||||
bucket metricdata.ExponentialBucket,
|
||||
) *mpb.ExponentialHistogramDataPoint_Buckets {
|
||||
return &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||
Offset: bucket.Offset,
|
||||
BucketCounts: bucket.Counts,
|
||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
generated
vendored
@@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
|
||||
|
||||
// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use.
|
||||
func Version() string {
|
||||
return "1.35.0"
|
||||
return "1.36.0"
|
||||
}
|
||||
|
@@ -1,6 +1,8 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package tracetransform provides conversion functionality for the otlptrace
|
||||
// exporters.
|
||||
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
|
||||
|
||||
import (
|
||||
|
@@ -1,9 +1,11 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package envconfig provides functionality to parse configuration from
|
||||
// environment variables.
|
||||
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
|
||||
|
||||
import (
|
||||
|
@@ -1,6 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package internal provides internal functionally for the otlptracegrpc package.
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption {
|
||||
}),
|
||||
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert(
|
||||
"CLIENT_CERTIFICATE",
|
||||
"CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
envconfig.WithClientCert(
|
||||
"TRACES_CLIENT_CERTIFICATE",
|
||||
"TRACES_CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
|
@@ -1,9 +1,10 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package otlpconfig provides configuration for the otlptrace exporters.
|
||||
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
|
||||
|
||||
import (
|
||||
@@ -52,7 +53,9 @@ type (
|
||||
// gRPC configurations
|
||||
GRPCCredentials credentials.TransportCredentials
|
||||
|
||||
// HTTP configurations
|
||||
Proxy HTTPTransportProxyFunc
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
Config struct {
|
||||
@@ -349,3 +352,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithHTTPClient(c *http.Client) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Traces.HTTPClient = c
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/partialsuccess.go
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/cenkalti/backoff/v5"
|
||||
)
|
||||
|
||||
// DefaultConfig are the recommended defaults to use.
|
||||
@@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: c.MaxInterval,
|
||||
MaxElapsedTime: c.MaxElapsedTime,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
|
||||
maxElapsedTime := c.MaxElapsedTime
|
||||
startTime := time.Now()
|
||||
|
||||
for {
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
@@ -94,22 +94,18 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
return err
|
||||
}
|
||||
|
||||
bOff := b.NextBackOff()
|
||||
if bOff == backoff.Stop {
|
||||
if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||
}
|
||||
|
||||
// Wait for the greater of the backoff or throttle delay.
|
||||
var delay time.Duration
|
||||
if bOff > throttle {
|
||||
delay = bOff
|
||||
} else {
|
||||
elapsed := b.GetElapsedTime()
|
||||
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||
bOff := b.NextBackOff()
|
||||
delay := max(throttle, bOff)
|
||||
|
||||
elapsed := time.Since(startTime)
|
||||
if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
delay = throttle
|
||||
}
|
||||
|
||||
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||
return fmt.Errorf("%w: %w", ctxErr, err)
|
||||
|
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
generated
vendored
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
generated
vendored
@@ -199,8 +199,9 @@ func WithTimeout(duration time.Duration) Option {
|
||||
// explicitly returns a backoff time in the response. That time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// These settings do not define any network retry strategy. That is entirely
|
||||
// handled by the gRPC ClientConn.
|
||||
// These settings define the retry strategy implemented by the exporter.
|
||||
// These settings do not define any network retry strategy.
|
||||
// That is handled by the gRPC ClientConn.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
|
8
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
generated
vendored
8
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
generated
vendored
@@ -71,7 +71,10 @@ var _ otlptrace.Client = (*client)(nil)
|
||||
func NewClient(opts ...Option) otlptrace.Client {
|
||||
cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...)
|
||||
|
||||
httpClient := &http.Client{
|
||||
httpClient := cfg.Traces.HTTPClient
|
||||
|
||||
if httpClient == nil {
|
||||
httpClient = &http.Client{
|
||||
Transport: ourTransport,
|
||||
Timeout: cfg.Traces.Timeout,
|
||||
}
|
||||
@@ -87,6 +90,7 @@ func NewClient(opts ...Option) otlptrace.Client {
|
||||
clonedTransport.Proxy = cfg.Traces.Proxy
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
return &client{
|
||||
@@ -300,7 +304,7 @@ type request struct {
|
||||
// reset reinitializes the request Body and uses ctx for the request.
|
||||
func (r *request) reset(ctx context.Context) {
|
||||
r.Body = r.bodyReader()
|
||||
r.Request = r.Request.WithContext(ctx)
|
||||
r.Request = r.WithContext(ctx)
|
||||
}
|
||||
|
||||
// retryableError represents a request failure that can be retried.
|
||||
|
@@ -1,9 +1,11 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package envconfig provides functionality to parse configuration from
|
||||
// environment variables.
|
||||
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig"
|
||||
|
||||
import (
|
||||
|
@@ -1,6 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package internal provides internal functionally for the otlptracehttp package.
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// Code generated by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
@@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption {
|
||||
}),
|
||||
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert(
|
||||
"CLIENT_CERTIFICATE",
|
||||
"CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
envconfig.WithClientCert(
|
||||
"TRACES_CLIENT_CERTIFICATE",
|
||||
"TRACES_CLIENT_KEY",
|
||||
func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
|
||||
),
|
||||
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user