- Update to version 1.1.0 Release notes https://github.com/kubevirt/kubevirt/releases/tag/v1.1.0 - Drop upstreamed patches 0001-Fix-qemu-system-lookup.patch 0003-Virtiofs-Remove-duplicated-functional-tests.patch 0005-Support-multiple-watchdogs-in-the-domain-schema.patch - Add patches 0001-Update-google.golang.org-grpc-to-1.56.3.patch (CVE-2023-44487) 0002-virt-launcher-fix-qemu-non-root-path.patch 0003-cgroupsv2-reconstruct-device-allowlist.patch OBS-URL: https://build.opensuse.org/request/show/1125817 OBS-URL: https://build.opensuse.org/package/show/Virtualization/kubevirt?expand=0&rev=139
10919 lines
392 KiB
Diff
10919 lines
392 KiB
Diff
From ad6d2dc3f73aaf37b892ef58209902ec519b6edc Mon Sep 17 00:00:00 2001
|
|
From: Michael Henriksen <mhenriks@redhat.com>
|
|
Date: Thu, 2 Nov 2023 21:13:48 -0400
|
|
Subject: [PATCH] Update google.golang.org/grpc to 1.56.3
|
|
|
|
Address CVE-2023-44487
|
|
|
|
Signed-off-by: Michael Henriksen <mhenriks@redhat.com>
|
|
---
|
|
go.mod | 10 +-
|
|
go.sum | 13 +-
|
|
.../github.com/cespare/xxhash/v2/BUILD.bazel | 3 +-
|
|
vendor/github.com/cespare/xxhash/v2/README.md | 31 +-
|
|
.../github.com/cespare/xxhash/v2/testall.sh | 10 +
|
|
vendor/github.com/cespare/xxhash/v2/xxhash.go | 47 +-
|
|
.../cespare/xxhash/v2/xxhash_amd64.s | 336 ++++----
|
|
.../cespare/xxhash/v2/xxhash_arm64.s | 183 ++++
|
|
.../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 +
|
|
.../cespare/xxhash/v2/xxhash_other.go | 22 +-
|
|
.../cespare/xxhash/v2/xxhash_safe.go | 1 +
|
|
.../cespare/xxhash/v2/xxhash_unsafe.go | 3 +-
|
|
.../x/net/context/ctxhttp/BUILD.bazel | 9 -
|
|
.../x/net/context/ctxhttp/ctxhttp.go | 71 --
|
|
vendor/golang.org/x/oauth2/AUTHORS | 3 -
|
|
vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 -
|
|
vendor/golang.org/x/oauth2/README.md | 12 +-
|
|
.../golang.org/x/oauth2/internal/BUILD.bazel | 1 -
|
|
vendor/golang.org/x/oauth2/internal/token.go | 4 +-
|
|
vendor/golang.org/x/oauth2/oauth2.go | 33 +-
|
|
vendor/golang.org/x/oauth2/token.go | 14 +-
|
|
.../googleapis/rpc/status/status.pb.go | 10 +-
|
|
vendor/google.golang.org/grpc/CONTRIBUTING.md | 25 +-
|
|
.../grpc/attributes/attributes.go | 31 +-
|
|
vendor/google.golang.org/grpc/backoff.go | 2 +-
|
|
.../grpc/balancer/balancer.go | 33 +-
|
|
.../grpc/balancer/base/balancer.go | 4 +-
|
|
.../grpc/balancer/conn_state_evaluator.go | 12 +-
|
|
.../grpc/balancer/roundrobin/roundrobin.go | 16 +-
|
|
.../grpc/balancer_conn_wrappers.go | 529 ++++++------
|
|
.../grpc_binarylog_v1/binarylog.pb.go | 22 +-
|
|
vendor/google.golang.org/grpc/call.go | 5 +
|
|
.../grpc/channelz/channelz.go | 2 +-
|
|
vendor/google.golang.org/grpc/clientconn.go | 798 +++++++++++-------
|
|
.../grpc/codes/code_string.go | 51 +-
|
|
.../grpc/credentials/credentials.go | 20 +-
|
|
.../google.golang.org/grpc/credentials/tls.go | 6 +-
|
|
vendor/google.golang.org/grpc/dialoptions.go | 87 +-
|
|
.../grpc/encoding/encoding.go | 7 +-
|
|
.../grpc/grpclog/loggerv2.go | 9 +-
|
|
vendor/google.golang.org/grpc/idle.go | 287 +++++++
|
|
.../grpc/internal/binarylog/binarylog.go | 31 +-
|
|
.../grpc/internal/binarylog/env_config.go | 20 +-
|
|
.../grpc/internal/binarylog/method_logger.go | 153 ++--
|
|
.../grpc/internal/binarylog/sink.go | 12 +-
|
|
.../grpc/internal/buffer/unbounded.go | 26 +-
|
|
.../grpc/internal/channelz/types.go | 16 +-
|
|
.../grpc/internal/envconfig/envconfig.go | 43 +-
|
|
.../grpc/internal/envconfig/observability.go | 42 +
|
|
.../grpc/internal/envconfig/xds.go | 51 +-
|
|
.../grpc/internal/grpclog/grpclog.go | 2 +-
|
|
.../grpc/internal/grpclog/prefixLogger.go | 12 +
|
|
.../grpc/internal/grpcrand/grpcrand.go | 21 +
|
|
.../internal/grpcsync/callback_serializer.go | 119 +++
|
|
.../grpc/internal/grpcsync/oncefunc.go | 32 +
|
|
.../grpc/internal/grpcutil/compressor.go | 47 ++
|
|
.../grpc/internal/grpcutil/method.go | 1 -
|
|
.../grpc/internal/internal.go | 77 +-
|
|
.../grpc/internal/metadata/metadata.go | 62 +-
|
|
.../internal/resolver/dns/dns_resolver.go | 6 +-
|
|
.../resolver/passthrough/passthrough.go | 11 +-
|
|
.../grpc/internal/resolver/unix/unix.go | 9 +-
|
|
.../grpc/internal/serviceconfig/duration.go | 130 +++
|
|
.../internal/serviceconfig/serviceconfig.go | 8 +-
|
|
.../grpc/internal/status/status.go | 10 +
|
|
.../grpc/internal/transport/controlbuf.go | 131 +--
|
|
.../grpc/internal/transport/defaults.go | 6 +
|
|
.../grpc/internal/transport/handler_server.go | 60 +-
|
|
.../grpc/internal/transport/http2_client.go | 314 ++++---
|
|
.../grpc/internal/transport/http2_server.go | 248 +++---
|
|
.../grpc/internal/transport/http_util.go | 49 +-
|
|
.../grpc/internal/transport/logging.go | 40 +
|
|
.../grpc/internal/transport/transport.go | 41 +-
|
|
.../grpc/metadata/metadata.go | 86 +-
|
|
.../google.golang.org/grpc/picker_wrapper.go | 69 +-
|
|
vendor/google.golang.org/grpc/pickfirst.go | 58 +-
|
|
vendor/google.golang.org/grpc/preloader.go | 2 +-
|
|
vendor/google.golang.org/grpc/regenerate.sh | 7 +-
|
|
.../grpc/resolver/resolver.go | 72 +-
|
|
.../grpc/resolver_conn_wrapper.go | 229 +++--
|
|
vendor/google.golang.org/grpc/rpc_util.go | 109 ++-
|
|
vendor/google.golang.org/grpc/server.go | 450 +++++++---
|
|
.../google.golang.org/grpc/service_config.go | 83 +-
|
|
.../grpc/serviceconfig/serviceconfig.go | 2 +-
|
|
vendor/google.golang.org/grpc/stats/stats.go | 22 +-
|
|
.../google.golang.org/grpc/status/status.go | 61 +-
|
|
vendor/google.golang.org/grpc/stream.go | 237 ++++--
|
|
vendor/google.golang.org/grpc/tap/tap.go | 2 +-
|
|
vendor/google.golang.org/grpc/version.go | 2 +-
|
|
vendor/google.golang.org/grpc/vet.sh | 40 +-
|
|
vendor/modules.txt | 15 +-
|
|
91 files changed, 4169 insertions(+), 1974 deletions(-)
|
|
create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh
|
|
create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
|
|
rename vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%)
|
|
delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/BUILD.bazel
|
|
delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
|
|
delete mode 100644 vendor/golang.org/x/oauth2/AUTHORS
|
|
delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS
|
|
create mode 100644 vendor/google.golang.org/grpc/idle.go
|
|
create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/observability.go
|
|
create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
|
|
create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
|
|
create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
|
|
create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
|
|
create mode 100644 vendor/google.golang.org/grpc/internal/transport/logging.go
|
|
|
|
diff --git a/go.mod b/go.mod
|
|
index c4adee251..1ae36f932 100644
|
|
--- a/go.mod
|
|
+++ b/go.mod
|
|
@@ -16,7 +16,7 @@ require (
|
|
github.com/go-openapi/strfmt v0.20.0
|
|
github.com/go-openapi/validate v0.20.2
|
|
github.com/gogo/protobuf v1.3.2
|
|
- github.com/golang/glog v1.0.0
|
|
+ github.com/golang/glog v1.1.0
|
|
github.com/golang/mock v1.6.0
|
|
github.com/golang/protobuf v1.5.3
|
|
github.com/google/go-github/v32 v32.0.0
|
|
@@ -64,7 +64,7 @@ require (
|
|
golang.org/x/term v0.12.0
|
|
golang.org/x/time v0.3.0
|
|
golang.org/x/tools v0.13.0
|
|
- google.golang.org/grpc v1.49.0
|
|
+ google.golang.org/grpc v1.56.3
|
|
gopkg.in/cheggaaa/pb.v1 v1.0.28
|
|
gopkg.in/yaml.v2 v2.4.0
|
|
k8s.io/api v0.27.1
|
|
@@ -90,7 +90,7 @@ require (
|
|
github.com/VividCortex/ewma v1.2.0 // indirect
|
|
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect
|
|
github.com/beorn7/perks v1.0.1 // indirect
|
|
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
|
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
|
github.com/cilium/ebpf v0.7.0 // indirect
|
|
github.com/coreos/go-systemd/v22 v22.4.0 // indirect
|
|
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
|
@@ -145,10 +145,10 @@ require (
|
|
go.mongodb.org/mongo-driver v1.8.4 // indirect
|
|
golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9 // indirect
|
|
golang.org/x/mod v0.12.0 // indirect
|
|
- golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
|
+ golang.org/x/oauth2 v0.7.0 // indirect
|
|
golang.org/x/text v0.13.0 // indirect
|
|
google.golang.org/appengine v1.6.7 // indirect
|
|
- google.golang.org/genproto v0.0.0-20220720214146-176da50484ac // indirect
|
|
+ google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
|
google.golang.org/protobuf v1.30.0 // indirect
|
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
|
diff --git a/go.sum b/go.sum
|
|
index f822c8405..0c551e9d7 100644
|
|
--- a/go.sum
|
|
+++ b/go.sum
|
|
@@ -167,8 +167,9 @@ github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1Vqq
|
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
|
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
|
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
github.com/cheggaaa/pb/v3 v3.1.0 h1:3uouEsl32RL7gTiQsuaXD4Bzbfl5tGztXGUvXbs4O04=
|
|
github.com/cheggaaa/pb/v3 v3.1.0/go.mod h1:YjrevcBqadFDaGQKRdmZxTY42pXEqda48Ea3lt0K/BE=
|
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
|
@@ -1429,8 +1430,9 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ
|
|
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
|
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
|
+golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
|
|
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
|
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
@@ -1793,8 +1795,8 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc
|
|
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
|
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
|
-google.golang.org/genproto v0.0.0-20220720214146-176da50484ac h1:EOa+Yrhx1C0O+4pHeXeWrCwdI0tWI6IfUU56Vebs9wQ=
|
|
-google.golang.org/genproto v0.0.0-20220720214146-176da50484ac/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE=
|
|
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
|
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
|
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
@@ -1833,8 +1835,9 @@ google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzI
|
|
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
|
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
|
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
|
-google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
|
|
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
|
+google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
|
|
+google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
|
diff --git a/vendor/github.com/cespare/xxhash/v2/BUILD.bazel b/vendor/github.com/cespare/xxhash/v2/BUILD.bazel
|
|
index 9021f3c5d..2bfb7768d 100644
|
|
--- a/vendor/github.com/cespare/xxhash/v2/BUILD.bazel
|
|
+++ b/vendor/github.com/cespare/xxhash/v2/BUILD.bazel
|
|
@@ -4,8 +4,9 @@ go_library(
|
|
name = "go_default_library",
|
|
srcs = [
|
|
"xxhash.go",
|
|
- "xxhash_amd64.go",
|
|
"xxhash_amd64.s",
|
|
+ "xxhash_arm64.s",
|
|
+ "xxhash_asm.go",
|
|
"xxhash_other.go",
|
|
"xxhash_unsafe.go",
|
|
],
|
|
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
|
|
index 792b4a60b..8bf0e5b78 100644
|
|
--- a/vendor/github.com/cespare/xxhash/v2/README.md
|
|
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
|
|
@@ -3,8 +3,7 @@
|
|
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
|
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
|
|
|
-xxhash is a Go implementation of the 64-bit
|
|
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
|
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
|
high-quality hashing algorithm that is much faster than anything in the Go
|
|
standard library.
|
|
|
|
@@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
|
|
func (*Digest) Sum64() uint64
|
|
```
|
|
|
|
-This implementation provides a fast pure-Go implementation and an even faster
|
|
-assembly implementation for amd64.
|
|
+The package is written with optimized pure Go and also contains even faster
|
|
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
|
+opts into using the Go code even on those architectures.
|
|
+
|
|
+[xxHash]: http://cyan4973.github.io/xxHash/
|
|
|
|
## Compatibility
|
|
|
|
@@ -45,19 +47,20 @@ I recommend using the latest release of Go.
|
|
Here are some quick benchmarks comparing the pure-Go and assembly
|
|
implementations of Sum64.
|
|
|
|
-| input size | purego | asm |
|
|
-| --- | --- | --- |
|
|
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
|
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
|
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
|
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
|
+| input size | purego | asm |
|
|
+| ---------- | --------- | --------- |
|
|
+| 4 B | 1.3 GB/s | 1.2 GB/s |
|
|
+| 16 B | 2.9 GB/s | 3.5 GB/s |
|
|
+| 100 B | 6.9 GB/s | 8.1 GB/s |
|
|
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
|
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
|
|
|
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
|
-the following commands under Go 1.11.2:
|
|
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
|
+CPU using the following commands under Go 1.19.2:
|
|
|
|
```
|
|
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
|
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
|
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
|
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
|
```
|
|
|
|
## Projects using this package
|
|
diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh
|
|
new file mode 100644
|
|
index 000000000..94b9c4439
|
|
--- /dev/null
|
|
+++ b/vendor/github.com/cespare/xxhash/v2/testall.sh
|
|
@@ -0,0 +1,10 @@
|
|
+#!/bin/bash
|
|
+set -eu -o pipefail
|
|
+
|
|
+# Small convenience script for running the tests with various combinations of
|
|
+# arch/tags. This assumes we're running on amd64 and have qemu available.
|
|
+
|
|
+go test ./...
|
|
+go test -tags purego ./...
|
|
+GOARCH=arm64 go test
|
|
+GOARCH=arm64 go test -tags purego
|
|
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
|
|
index 15c835d54..a9e0d45c9 100644
|
|
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
|
|
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
|
|
@@ -16,19 +16,11 @@ const (
|
|
prime5 uint64 = 2870177450012600261
|
|
)
|
|
|
|
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
|
-// possible in the Go code is worth a small (but measurable) performance boost
|
|
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
|
-// convenience in the Go code in a few places where we need to intentionally
|
|
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
|
-// result overflows a uint64).
|
|
-var (
|
|
- prime1v = prime1
|
|
- prime2v = prime2
|
|
- prime3v = prime3
|
|
- prime4v = prime4
|
|
- prime5v = prime5
|
|
-)
|
|
+// Store the primes in an array as well.
|
|
+//
|
|
+// The consts are used when possible in Go code to avoid MOVs but we need a
|
|
+// contiguous array of the assembly code.
|
|
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
|
|
|
// Digest implements hash.Hash64.
|
|
type Digest struct {
|
|
@@ -50,10 +42,10 @@ func New() *Digest {
|
|
|
|
// Reset clears the Digest's state so that it can be reused.
|
|
func (d *Digest) Reset() {
|
|
- d.v1 = prime1v + prime2
|
|
+ d.v1 = primes[0] + prime2
|
|
d.v2 = prime2
|
|
d.v3 = 0
|
|
- d.v4 = -prime1v
|
|
+ d.v4 = -primes[0]
|
|
d.total = 0
|
|
d.n = 0
|
|
}
|
|
@@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
|
n = len(b)
|
|
d.total += uint64(n)
|
|
|
|
+ memleft := d.mem[d.n&(len(d.mem)-1):]
|
|
+
|
|
if d.n+n < 32 {
|
|
// This new data doesn't even fill the current block.
|
|
- copy(d.mem[d.n:], b)
|
|
+ copy(memleft, b)
|
|
d.n += n
|
|
return
|
|
}
|
|
|
|
if d.n > 0 {
|
|
// Finish off the partial block.
|
|
- copy(d.mem[d.n:], b)
|
|
+ c := copy(memleft, b)
|
|
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
|
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
|
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
|
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
|
- b = b[32-d.n:]
|
|
+ b = b[c:]
|
|
d.n = 0
|
|
}
|
|
|
|
@@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
|
|
|
|
h += d.total
|
|
|
|
- i, end := 0, d.n
|
|
- for ; i+8 <= end; i += 8 {
|
|
- k1 := round(0, u64(d.mem[i:i+8]))
|
|
+ b := d.mem[:d.n&(len(d.mem)-1)]
|
|
+ for ; len(b) >= 8; b = b[8:] {
|
|
+ k1 := round(0, u64(b[:8]))
|
|
h ^= k1
|
|
h = rol27(h)*prime1 + prime4
|
|
}
|
|
- if i+4 <= end {
|
|
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
|
+ if len(b) >= 4 {
|
|
+ h ^= uint64(u32(b[:4])) * prime1
|
|
h = rol23(h)*prime2 + prime3
|
|
- i += 4
|
|
+ b = b[4:]
|
|
}
|
|
- for i < end {
|
|
- h ^= uint64(d.mem[i]) * prime5
|
|
+ for ; len(b) > 0; b = b[1:] {
|
|
+ h ^= uint64(b[0]) * prime5
|
|
h = rol11(h) * prime1
|
|
- i++
|
|
}
|
|
|
|
h ^= h >> 33
|
|
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
|
|
index be8db5bf7..3e8b13257 100644
|
|
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
|
|
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
|
|
@@ -1,215 +1,209 @@
|
|
+//go:build !appengine && gc && !purego
|
|
// +build !appengine
|
|
// +build gc
|
|
// +build !purego
|
|
|
|
#include "textflag.h"
|
|
|
|
-// Register allocation:
|
|
-// AX h
|
|
-// SI pointer to advance through b
|
|
-// DX n
|
|
-// BX loop end
|
|
-// R8 v1, k1
|
|
-// R9 v2
|
|
-// R10 v3
|
|
-// R11 v4
|
|
-// R12 tmp
|
|
-// R13 prime1v
|
|
-// R14 prime2v
|
|
-// DI prime4v
|
|
-
|
|
-// round reads from and advances the buffer pointer in SI.
|
|
-// It assumes that R13 has prime1v and R14 has prime2v.
|
|
-#define round(r) \
|
|
- MOVQ (SI), R12 \
|
|
- ADDQ $8, SI \
|
|
- IMULQ R14, R12 \
|
|
- ADDQ R12, r \
|
|
- ROLQ $31, r \
|
|
- IMULQ R13, r
|
|
-
|
|
-// mergeRound applies a merge round on the two registers acc and val.
|
|
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
|
-#define mergeRound(acc, val) \
|
|
- IMULQ R14, val \
|
|
- ROLQ $31, val \
|
|
- IMULQ R13, val \
|
|
- XORQ val, acc \
|
|
- IMULQ R13, acc \
|
|
- ADDQ DI, acc
|
|
+// Registers:
|
|
+#define h AX
|
|
+#define d AX
|
|
+#define p SI // pointer to advance through b
|
|
+#define n DX
|
|
+#define end BX // loop end
|
|
+#define v1 R8
|
|
+#define v2 R9
|
|
+#define v3 R10
|
|
+#define v4 R11
|
|
+#define x R12
|
|
+#define prime1 R13
|
|
+#define prime2 R14
|
|
+#define prime4 DI
|
|
+
|
|
+#define round(acc, x) \
|
|
+ IMULQ prime2, x \
|
|
+ ADDQ x, acc \
|
|
+ ROLQ $31, acc \
|
|
+ IMULQ prime1, acc
|
|
+
|
|
+// round0 performs the operation x = round(0, x).
|
|
+#define round0(x) \
|
|
+ IMULQ prime2, x \
|
|
+ ROLQ $31, x \
|
|
+ IMULQ prime1, x
|
|
+
|
|
+// mergeRound applies a merge round on the two registers acc and x.
|
|
+// It assumes that prime1, prime2, and prime4 have been loaded.
|
|
+#define mergeRound(acc, x) \
|
|
+ round0(x) \
|
|
+ XORQ x, acc \
|
|
+ IMULQ prime1, acc \
|
|
+ ADDQ prime4, acc
|
|
+
|
|
+// blockLoop processes as many 32-byte blocks as possible,
|
|
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
|
+// to process.
|
|
+#define blockLoop() \
|
|
+loop: \
|
|
+ MOVQ +0(p), x \
|
|
+ round(v1, x) \
|
|
+ MOVQ +8(p), x \
|
|
+ round(v2, x) \
|
|
+ MOVQ +16(p), x \
|
|
+ round(v3, x) \
|
|
+ MOVQ +24(p), x \
|
|
+ round(v4, x) \
|
|
+ ADDQ $32, p \
|
|
+ CMPQ p, end \
|
|
+ JLE loop
|
|
|
|
// func Sum64(b []byte) uint64
|
|
-TEXT ·Sum64(SB), NOSPLIT, $0-32
|
|
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
|
// Load fixed primes.
|
|
- MOVQ ·prime1v(SB), R13
|
|
- MOVQ ·prime2v(SB), R14
|
|
- MOVQ ·prime4v(SB), DI
|
|
+ MOVQ ·primes+0(SB), prime1
|
|
+ MOVQ ·primes+8(SB), prime2
|
|
+ MOVQ ·primes+24(SB), prime4
|
|
|
|
// Load slice.
|
|
- MOVQ b_base+0(FP), SI
|
|
- MOVQ b_len+8(FP), DX
|
|
- LEAQ (SI)(DX*1), BX
|
|
+ MOVQ b_base+0(FP), p
|
|
+ MOVQ b_len+8(FP), n
|
|
+ LEAQ (p)(n*1), end
|
|
|
|
// The first loop limit will be len(b)-32.
|
|
- SUBQ $32, BX
|
|
+ SUBQ $32, end
|
|
|
|
// Check whether we have at least one block.
|
|
- CMPQ DX, $32
|
|
+ CMPQ n, $32
|
|
JLT noBlocks
|
|
|
|
// Set up initial state (v1, v2, v3, v4).
|
|
- MOVQ R13, R8
|
|
- ADDQ R14, R8
|
|
- MOVQ R14, R9
|
|
- XORQ R10, R10
|
|
- XORQ R11, R11
|
|
- SUBQ R13, R11
|
|
-
|
|
- // Loop until SI > BX.
|
|
-blockLoop:
|
|
- round(R8)
|
|
- round(R9)
|
|
- round(R10)
|
|
- round(R11)
|
|
-
|
|
- CMPQ SI, BX
|
|
- JLE blockLoop
|
|
-
|
|
- MOVQ R8, AX
|
|
- ROLQ $1, AX
|
|
- MOVQ R9, R12
|
|
- ROLQ $7, R12
|
|
- ADDQ R12, AX
|
|
- MOVQ R10, R12
|
|
- ROLQ $12, R12
|
|
- ADDQ R12, AX
|
|
- MOVQ R11, R12
|
|
- ROLQ $18, R12
|
|
- ADDQ R12, AX
|
|
-
|
|
- mergeRound(AX, R8)
|
|
- mergeRound(AX, R9)
|
|
- mergeRound(AX, R10)
|
|
- mergeRound(AX, R11)
|
|
+ MOVQ prime1, v1
|
|
+ ADDQ prime2, v1
|
|
+ MOVQ prime2, v2
|
|
+ XORQ v3, v3
|
|
+ XORQ v4, v4
|
|
+ SUBQ prime1, v4
|
|
+
|
|
+ blockLoop()
|
|
+
|
|
+ MOVQ v1, h
|
|
+ ROLQ $1, h
|
|
+ MOVQ v2, x
|
|
+ ROLQ $7, x
|
|
+ ADDQ x, h
|
|
+ MOVQ v3, x
|
|
+ ROLQ $12, x
|
|
+ ADDQ x, h
|
|
+ MOVQ v4, x
|
|
+ ROLQ $18, x
|
|
+ ADDQ x, h
|
|
+
|
|
+ mergeRound(h, v1)
|
|
+ mergeRound(h, v2)
|
|
+ mergeRound(h, v3)
|
|
+ mergeRound(h, v4)
|
|
|
|
JMP afterBlocks
|
|
|
|
noBlocks:
|
|
- MOVQ ·prime5v(SB), AX
|
|
+ MOVQ ·primes+32(SB), h
|
|
|
|
afterBlocks:
|
|
- ADDQ DX, AX
|
|
-
|
|
- // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
|
- ADDQ $24, BX
|
|
-
|
|
- CMPQ SI, BX
|
|
- JG fourByte
|
|
-
|
|
-wordLoop:
|
|
- // Calculate k1.
|
|
- MOVQ (SI), R8
|
|
- ADDQ $8, SI
|
|
- IMULQ R14, R8
|
|
- ROLQ $31, R8
|
|
- IMULQ R13, R8
|
|
-
|
|
- XORQ R8, AX
|
|
- ROLQ $27, AX
|
|
- IMULQ R13, AX
|
|
- ADDQ DI, AX
|
|
-
|
|
- CMPQ SI, BX
|
|
- JLE wordLoop
|
|
-
|
|
-fourByte:
|
|
- ADDQ $4, BX
|
|
- CMPQ SI, BX
|
|
- JG singles
|
|
-
|
|
- MOVL (SI), R8
|
|
- ADDQ $4, SI
|
|
- IMULQ R13, R8
|
|
- XORQ R8, AX
|
|
-
|
|
- ROLQ $23, AX
|
|
- IMULQ R14, AX
|
|
- ADDQ ·prime3v(SB), AX
|
|
-
|
|
-singles:
|
|
- ADDQ $4, BX
|
|
- CMPQ SI, BX
|
|
+ ADDQ n, h
|
|
+
|
|
+ ADDQ $24, end
|
|
+ CMPQ p, end
|
|
+ JG try4
|
|
+
|
|
+loop8:
|
|
+ MOVQ (p), x
|
|
+ ADDQ $8, p
|
|
+ round0(x)
|
|
+ XORQ x, h
|
|
+ ROLQ $27, h
|
|
+ IMULQ prime1, h
|
|
+ ADDQ prime4, h
|
|
+
|
|
+ CMPQ p, end
|
|
+ JLE loop8
|
|
+
|
|
+try4:
|
|
+ ADDQ $4, end
|
|
+ CMPQ p, end
|
|
+ JG try1
|
|
+
|
|
+ MOVL (p), x
|
|
+ ADDQ $4, p
|
|
+ IMULQ prime1, x
|
|
+ XORQ x, h
|
|
+
|
|
+ ROLQ $23, h
|
|
+ IMULQ prime2, h
|
|
+ ADDQ ·primes+16(SB), h
|
|
+
|
|
+try1:
|
|
+ ADDQ $4, end
|
|
+ CMPQ p, end
|
|
JGE finalize
|
|
|
|
-singlesLoop:
|
|
- MOVBQZX (SI), R12
|
|
- ADDQ $1, SI
|
|
- IMULQ ·prime5v(SB), R12
|
|
- XORQ R12, AX
|
|
+loop1:
|
|
+ MOVBQZX (p), x
|
|
+ ADDQ $1, p
|
|
+ IMULQ ·primes+32(SB), x
|
|
+ XORQ x, h
|
|
+ ROLQ $11, h
|
|
+ IMULQ prime1, h
|
|
|
|
- ROLQ $11, AX
|
|
- IMULQ R13, AX
|
|
-
|
|
- CMPQ SI, BX
|
|
- JL singlesLoop
|
|
+ CMPQ p, end
|
|
+ JL loop1
|
|
|
|
finalize:
|
|
- MOVQ AX, R12
|
|
- SHRQ $33, R12
|
|
- XORQ R12, AX
|
|
- IMULQ R14, AX
|
|
- MOVQ AX, R12
|
|
- SHRQ $29, R12
|
|
- XORQ R12, AX
|
|
- IMULQ ·prime3v(SB), AX
|
|
- MOVQ AX, R12
|
|
- SHRQ $32, R12
|
|
- XORQ R12, AX
|
|
-
|
|
- MOVQ AX, ret+24(FP)
|
|
+ MOVQ h, x
|
|
+ SHRQ $33, x
|
|
+ XORQ x, h
|
|
+ IMULQ prime2, h
|
|
+ MOVQ h, x
|
|
+ SHRQ $29, x
|
|
+ XORQ x, h
|
|
+ IMULQ ·primes+16(SB), h
|
|
+ MOVQ h, x
|
|
+ SHRQ $32, x
|
|
+ XORQ x, h
|
|
+
|
|
+ MOVQ h, ret+24(FP)
|
|
RET
|
|
|
|
-// writeBlocks uses the same registers as above except that it uses AX to store
|
|
-// the d pointer.
|
|
-
|
|
// func writeBlocks(d *Digest, b []byte) int
|
|
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
|
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
|
// Load fixed primes needed for round.
|
|
- MOVQ ·prime1v(SB), R13
|
|
- MOVQ ·prime2v(SB), R14
|
|
+ MOVQ ·primes+0(SB), prime1
|
|
+ MOVQ ·primes+8(SB), prime2
|
|
|
|
// Load slice.
|
|
- MOVQ b_base+8(FP), SI
|
|
- MOVQ b_len+16(FP), DX
|
|
- LEAQ (SI)(DX*1), BX
|
|
- SUBQ $32, BX
|
|
+ MOVQ b_base+8(FP), p
|
|
+ MOVQ b_len+16(FP), n
|
|
+ LEAQ (p)(n*1), end
|
|
+ SUBQ $32, end
|
|
|
|
// Load vN from d.
|
|
- MOVQ d+0(FP), AX
|
|
- MOVQ 0(AX), R8 // v1
|
|
- MOVQ 8(AX), R9 // v2
|
|
- MOVQ 16(AX), R10 // v3
|
|
- MOVQ 24(AX), R11 // v4
|
|
+ MOVQ s+0(FP), d
|
|
+ MOVQ 0(d), v1
|
|
+ MOVQ 8(d), v2
|
|
+ MOVQ 16(d), v3
|
|
+ MOVQ 24(d), v4
|
|
|
|
// We don't need to check the loop condition here; this function is
|
|
// always called with at least one block of data to process.
|
|
-blockLoop:
|
|
- round(R8)
|
|
- round(R9)
|
|
- round(R10)
|
|
- round(R11)
|
|
-
|
|
- CMPQ SI, BX
|
|
- JLE blockLoop
|
|
+ blockLoop()
|
|
|
|
// Copy vN back to d.
|
|
- MOVQ R8, 0(AX)
|
|
- MOVQ R9, 8(AX)
|
|
- MOVQ R10, 16(AX)
|
|
- MOVQ R11, 24(AX)
|
|
-
|
|
- // The number of bytes written is SI minus the old base pointer.
|
|
- SUBQ b_base+8(FP), SI
|
|
- MOVQ SI, ret+32(FP)
|
|
+ MOVQ v1, 0(d)
|
|
+ MOVQ v2, 8(d)
|
|
+ MOVQ v3, 16(d)
|
|
+ MOVQ v4, 24(d)
|
|
+
|
|
+ // The number of bytes written is p minus the old base pointer.
|
|
+ SUBQ b_base+8(FP), p
|
|
+ MOVQ p, ret+32(FP)
|
|
|
|
RET
|
|
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
|
|
new file mode 100644
|
|
index 000000000..7e3145a22
|
|
--- /dev/null
|
|
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
|
|
@@ -0,0 +1,183 @@
|
|
+//go:build !appengine && gc && !purego
|
|
+// +build !appengine
|
|
+// +build gc
|
|
+// +build !purego
|
|
+
|
|
+#include "textflag.h"
|
|
+
|
|
+// Registers:
|
|
+#define digest R1
|
|
+#define h R2 // return value
|
|
+#define p R3 // input pointer
|
|
+#define n R4 // input length
|
|
+#define nblocks R5 // n / 32
|
|
+#define prime1 R7
|
|
+#define prime2 R8
|
|
+#define prime3 R9
|
|
+#define prime4 R10
|
|
+#define prime5 R11
|
|
+#define v1 R12
|
|
+#define v2 R13
|
|
+#define v3 R14
|
|
+#define v4 R15
|
|
+#define x1 R20
|
|
+#define x2 R21
|
|
+#define x3 R22
|
|
+#define x4 R23
|
|
+
|
|
+#define round(acc, x) \
|
|
+ MADD prime2, acc, x, acc \
|
|
+ ROR $64-31, acc \
|
|
+ MUL prime1, acc
|
|
+
|
|
+// round0 performs the operation x = round(0, x).
|
|
+#define round0(x) \
|
|
+ MUL prime2, x \
|
|
+ ROR $64-31, x \
|
|
+ MUL prime1, x
|
|
+
|
|
+#define mergeRound(acc, x) \
|
|
+ round0(x) \
|
|
+ EOR x, acc \
|
|
+ MADD acc, prime4, prime1, acc
|
|
+
|
|
+// blockLoop processes as many 32-byte blocks as possible,
|
|
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
|
+#define blockLoop() \
|
|
+ LSR $5, n, nblocks \
|
|
+ PCALIGN $16 \
|
|
+ loop: \
|
|
+ LDP.P 16(p), (x1, x2) \
|
|
+ LDP.P 16(p), (x3, x4) \
|
|
+ round(v1, x1) \
|
|
+ round(v2, x2) \
|
|
+ round(v3, x3) \
|
|
+ round(v4, x4) \
|
|
+ SUB $1, nblocks \
|
|
+ CBNZ nblocks, loop
|
|
+
|
|
+// func Sum64(b []byte) uint64
|
|
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
|
+ LDP b_base+0(FP), (p, n)
|
|
+
|
|
+ LDP ·primes+0(SB), (prime1, prime2)
|
|
+ LDP ·primes+16(SB), (prime3, prime4)
|
|
+ MOVD ·primes+32(SB), prime5
|
|
+
|
|
+ CMP $32, n
|
|
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
|
+ BLT afterLoop
|
|
+
|
|
+ ADD prime1, prime2, v1
|
|
+ MOVD prime2, v2
|
|
+ MOVD $0, v3
|
|
+ NEG prime1, v4
|
|
+
|
|
+ blockLoop()
|
|
+
|
|
+ ROR $64-1, v1, x1
|
|
+ ROR $64-7, v2, x2
|
|
+ ADD x1, x2
|
|
+ ROR $64-12, v3, x3
|
|
+ ROR $64-18, v4, x4
|
|
+ ADD x3, x4
|
|
+ ADD x2, x4, h
|
|
+
|
|
+ mergeRound(h, v1)
|
|
+ mergeRound(h, v2)
|
|
+ mergeRound(h, v3)
|
|
+ mergeRound(h, v4)
|
|
+
|
|
+afterLoop:
|
|
+ ADD n, h
|
|
+
|
|
+ TBZ $4, n, try8
|
|
+ LDP.P 16(p), (x1, x2)
|
|
+
|
|
+ round0(x1)
|
|
+
|
|
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
|
|
+ // rotated register) is worth a small but measurable speedup for small
|
|
+ // inputs.
|
|
+ ROR $64-27, h
|
|
+ EOR x1 @> 64-27, h, h
|
|
+ MADD h, prime4, prime1, h
|
|
+
|
|
+ round0(x2)
|
|
+ ROR $64-27, h
|
|
+ EOR x2 @> 64-27, h, h
|
|
+ MADD h, prime4, prime1, h
|
|
+
|
|
+try8:
|
|
+ TBZ $3, n, try4
|
|
+ MOVD.P 8(p), x1
|
|
+
|
|
+ round0(x1)
|
|
+ ROR $64-27, h
|
|
+ EOR x1 @> 64-27, h, h
|
|
+ MADD h, prime4, prime1, h
|
|
+
|
|
+try4:
|
|
+ TBZ $2, n, try2
|
|
+ MOVWU.P 4(p), x2
|
|
+
|
|
+ MUL prime1, x2
|
|
+ ROR $64-23, h
|
|
+ EOR x2 @> 64-23, h, h
|
|
+ MADD h, prime3, prime2, h
|
|
+
|
|
+try2:
|
|
+ TBZ $1, n, try1
|
|
+ MOVHU.P 2(p), x3
|
|
+ AND $255, x3, x1
|
|
+ LSR $8, x3, x2
|
|
+
|
|
+ MUL prime5, x1
|
|
+ ROR $64-11, h
|
|
+ EOR x1 @> 64-11, h, h
|
|
+ MUL prime1, h
|
|
+
|
|
+ MUL prime5, x2
|
|
+ ROR $64-11, h
|
|
+ EOR x2 @> 64-11, h, h
|
|
+ MUL prime1, h
|
|
+
|
|
+try1:
|
|
+ TBZ $0, n, finalize
|
|
+ MOVBU (p), x4
|
|
+
|
|
+ MUL prime5, x4
|
|
+ ROR $64-11, h
|
|
+ EOR x4 @> 64-11, h, h
|
|
+ MUL prime1, h
|
|
+
|
|
+finalize:
|
|
+ EOR h >> 33, h
|
|
+ MUL prime2, h
|
|
+ EOR h >> 29, h
|
|
+ MUL prime3, h
|
|
+ EOR h >> 32, h
|
|
+
|
|
+ MOVD h, ret+24(FP)
|
|
+ RET
|
|
+
|
|
+// func writeBlocks(d *Digest, b []byte) int
|
|
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
|
+ LDP ·primes+0(SB), (prime1, prime2)
|
|
+
|
|
+ // Load state. Assume v[1-4] are stored contiguously.
|
|
+ MOVD d+0(FP), digest
|
|
+ LDP 0(digest), (v1, v2)
|
|
+ LDP 16(digest), (v3, v4)
|
|
+
|
|
+ LDP b_base+8(FP), (p, n)
|
|
+
|
|
+ blockLoop()
|
|
+
|
|
+ // Store updated state.
|
|
+ STP (v1, v2), 0(digest)
|
|
+ STP (v3, v4), 16(digest)
|
|
+
|
|
+ BIC $31, n
|
|
+ MOVD n, ret+32(FP)
|
|
+ RET
|
|
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
|
|
similarity index 73%
|
|
rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
|
|
rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
|
|
index ad14b807f..9216e0a40 100644
|
|
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
|
|
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
|
|
@@ -1,3 +1,5 @@
|
|
+//go:build (amd64 || arm64) && !appengine && gc && !purego
|
|
+// +build amd64 arm64
|
|
// +build !appengine
|
|
// +build gc
|
|
// +build !purego
|
|
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
|
|
index 4a5a82160..26df13bba 100644
|
|
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
|
|
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
|
|
@@ -1,4 +1,5 @@
|
|
-// +build !amd64 appengine !gc purego
|
|
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
|
+// +build !amd64,!arm64 appengine !gc purego
|
|
|
|
package xxhash
|
|
|
|
@@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
|
|
var h uint64
|
|
|
|
if n >= 32 {
|
|
- v1 := prime1v + prime2
|
|
+ v1 := primes[0] + prime2
|
|
v2 := prime2
|
|
v3 := uint64(0)
|
|
- v4 := -prime1v
|
|
+ v4 := -primes[0]
|
|
for len(b) >= 32 {
|
|
v1 = round(v1, u64(b[0:8:len(b)]))
|
|
v2 = round(v2, u64(b[8:16:len(b)]))
|
|
@@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
|
|
|
|
h += uint64(n)
|
|
|
|
- i, end := 0, len(b)
|
|
- for ; i+8 <= end; i += 8 {
|
|
- k1 := round(0, u64(b[i:i+8:len(b)]))
|
|
+ for ; len(b) >= 8; b = b[8:] {
|
|
+ k1 := round(0, u64(b[:8]))
|
|
h ^= k1
|
|
h = rol27(h)*prime1 + prime4
|
|
}
|
|
- if i+4 <= end {
|
|
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
|
+ if len(b) >= 4 {
|
|
+ h ^= uint64(u32(b[:4])) * prime1
|
|
h = rol23(h)*prime2 + prime3
|
|
- i += 4
|
|
+ b = b[4:]
|
|
}
|
|
- for ; i < end; i++ {
|
|
- h ^= uint64(b[i]) * prime5
|
|
+ for ; len(b) > 0; b = b[1:] {
|
|
+ h ^= uint64(b[0]) * prime5
|
|
h = rol11(h) * prime1
|
|
}
|
|
|
|
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
|
|
index fc9bea7a3..e86f1b5fd 100644
|
|
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
|
|
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
|
|
@@ -1,3 +1,4 @@
|
|
+//go:build appengine
|
|
// +build appengine
|
|
|
|
// This file contains the safe implementations of otherwise unsafe-using code.
|
|
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
|
|
index 376e0ca2e..1c1638fd8 100644
|
|
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
|
|
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
|
|
@@ -1,3 +1,4 @@
|
|
+//go:build !appengine
|
|
// +build !appengine
|
|
|
|
// This file encapsulates usage of unsafe.
|
|
@@ -11,7 +12,7 @@ import (
|
|
|
|
// In the future it's possible that compiler optimizations will make these
|
|
// XxxString functions unnecessary by realizing that calls such as
|
|
-// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
|
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
|
// If that happens, even if we keep these functions they can be replaced with
|
|
// the trivial safe code.
|
|
|
|
diff --git a/vendor/golang.org/x/net/context/ctxhttp/BUILD.bazel b/vendor/golang.org/x/net/context/ctxhttp/BUILD.bazel
|
|
deleted file mode 100644
|
|
index 0ac02dea2..000000000
|
|
--- a/vendor/golang.org/x/net/context/ctxhttp/BUILD.bazel
|
|
+++ /dev/null
|
|
@@ -1,9 +0,0 @@
|
|
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
|
-
|
|
-go_library(
|
|
- name = "go_default_library",
|
|
- srcs = ["ctxhttp.go"],
|
|
- importmap = "kubevirt.io/kubevirt/vendor/golang.org/x/net/context/ctxhttp",
|
|
- importpath = "golang.org/x/net/context/ctxhttp",
|
|
- visibility = ["//visibility:public"],
|
|
-)
|
|
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
|
|
deleted file mode 100644
|
|
index 37dc0cfdb..000000000
|
|
--- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
|
|
+++ /dev/null
|
|
@@ -1,71 +0,0 @@
|
|
-// Copyright 2016 The Go Authors. All rights reserved.
|
|
-// Use of this source code is governed by a BSD-style
|
|
-// license that can be found in the LICENSE file.
|
|
-
|
|
-// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
|
|
-package ctxhttp // import "golang.org/x/net/context/ctxhttp"
|
|
-
|
|
-import (
|
|
- "context"
|
|
- "io"
|
|
- "net/http"
|
|
- "net/url"
|
|
- "strings"
|
|
-)
|
|
-
|
|
-// Do sends an HTTP request with the provided http.Client and returns
|
|
-// an HTTP response.
|
|
-//
|
|
-// If the client is nil, http.DefaultClient is used.
|
|
-//
|
|
-// The provided ctx must be non-nil. If it is canceled or times out,
|
|
-// ctx.Err() will be returned.
|
|
-func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
|
- if client == nil {
|
|
- client = http.DefaultClient
|
|
- }
|
|
- resp, err := client.Do(req.WithContext(ctx))
|
|
- // If we got an error, and the context has been canceled,
|
|
- // the context's error is probably more useful.
|
|
- if err != nil {
|
|
- select {
|
|
- case <-ctx.Done():
|
|
- err = ctx.Err()
|
|
- default:
|
|
- }
|
|
- }
|
|
- return resp, err
|
|
-}
|
|
-
|
|
-// Get issues a GET request via the Do function.
|
|
-func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
|
- req, err := http.NewRequest("GET", url, nil)
|
|
- if err != nil {
|
|
- return nil, err
|
|
- }
|
|
- return Do(ctx, client, req)
|
|
-}
|
|
-
|
|
-// Head issues a HEAD request via the Do function.
|
|
-func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
|
- req, err := http.NewRequest("HEAD", url, nil)
|
|
- if err != nil {
|
|
- return nil, err
|
|
- }
|
|
- return Do(ctx, client, req)
|
|
-}
|
|
-
|
|
-// Post issues a POST request via the Do function.
|
|
-func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
|
|
- req, err := http.NewRequest("POST", url, body)
|
|
- if err != nil {
|
|
- return nil, err
|
|
- }
|
|
- req.Header.Set("Content-Type", bodyType)
|
|
- return Do(ctx, client, req)
|
|
-}
|
|
-
|
|
-// PostForm issues a POST request via the Do function.
|
|
-func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
|
- return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
|
-}
|
|
diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS
|
|
deleted file mode 100644
|
|
index 15167cd74..000000000
|
|
--- a/vendor/golang.org/x/oauth2/AUTHORS
|
|
+++ /dev/null
|
|
@@ -1,3 +0,0 @@
|
|
-# This source code refers to The Go Authors for copyright purposes.
|
|
-# The master list of authors is in the main Go distribution,
|
|
-# visible at http://tip.golang.org/AUTHORS.
|
|
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS
|
|
deleted file mode 100644
|
|
index 1c4577e96..000000000
|
|
--- a/vendor/golang.org/x/oauth2/CONTRIBUTORS
|
|
+++ /dev/null
|
|
@@ -1,3 +0,0 @@
|
|
-# This source code was written by the Go contributors.
|
|
-# The master list of contributors is in the main Go distribution,
|
|
-# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
|
|
index 1473e1296..781770c20 100644
|
|
--- a/vendor/golang.org/x/oauth2/README.md
|
|
+++ b/vendor/golang.org/x/oauth2/README.md
|
|
@@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples.
|
|
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
|
|
* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google)
|
|
|
|
-## Policy for new packages
|
|
+## Policy for new endpoints
|
|
|
|
We no longer accept new provider-specific packages in this repo if all
|
|
they do is add a single endpoint variable. If you just want to add a
|
|
@@ -29,8 +29,12 @@ package.
|
|
|
|
## Report Issues / Send Patches
|
|
|
|
-This repository uses Gerrit for code changes. To learn how to submit changes to
|
|
-this repository, see https://golang.org/doc/contribute.html.
|
|
-
|
|
The main issue tracker for the oauth2 repository is located at
|
|
https://github.com/golang/oauth2/issues.
|
|
+
|
|
+This repository uses Gerrit for code changes. To learn how to submit changes to
|
|
+this repository, see https://golang.org/doc/contribute.html. In particular:
|
|
+
|
|
+* Excluding trivial changes, all contributions should be connected to an existing issue.
|
|
+* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted.
|
|
+* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2).
|
|
diff --git a/vendor/golang.org/x/oauth2/internal/BUILD.bazel b/vendor/golang.org/x/oauth2/internal/BUILD.bazel
|
|
index 6c625078f..1de6d207c 100644
|
|
--- a/vendor/golang.org/x/oauth2/internal/BUILD.bazel
|
|
+++ b/vendor/golang.org/x/oauth2/internal/BUILD.bazel
|
|
@@ -11,5 +11,4 @@ go_library(
|
|
importmap = "kubevirt.io/kubevirt/vendor/golang.org/x/oauth2/internal",
|
|
importpath = "golang.org/x/oauth2/internal",
|
|
visibility = ["//vendor/golang.org/x/oauth2:__subpackages__"],
|
|
- deps = ["//vendor/golang.org/x/net/context/ctxhttp:go_default_library"],
|
|
)
|
|
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
|
|
index 355c38696..b4723fcac 100644
|
|
--- a/vendor/golang.org/x/oauth2/internal/token.go
|
|
+++ b/vendor/golang.org/x/oauth2/internal/token.go
|
|
@@ -19,8 +19,6 @@ import (
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
-
|
|
- "golang.org/x/net/context/ctxhttp"
|
|
)
|
|
|
|
// Token represents the credentials used to authorize
|
|
@@ -229,7 +227,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
|
|
}
|
|
|
|
func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
|
- r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
|
|
+ r, err := ContextClient(ctx).Do(req.WithContext(ctx))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
|
|
index 291df5c83..9085fabe3 100644
|
|
--- a/vendor/golang.org/x/oauth2/oauth2.go
|
|
+++ b/vendor/golang.org/x/oauth2/oauth2.go
|
|
@@ -16,6 +16,7 @@ import (
|
|
"net/url"
|
|
"strings"
|
|
"sync"
|
|
+ "time"
|
|
|
|
"golang.org/x/oauth2/internal"
|
|
)
|
|
@@ -140,7 +141,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
|
|
//
|
|
// State is a token to protect the user from CSRF attacks. You must
|
|
// always provide a non-empty string and validate that it matches the
|
|
-// the state query parameter on your redirect callback.
|
|
+// state query parameter on your redirect callback.
|
|
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
|
|
//
|
|
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
|
@@ -290,6 +291,8 @@ type reuseTokenSource struct {
|
|
|
|
mu sync.Mutex // guards t
|
|
t *Token
|
|
+
|
|
+ expiryDelta time.Duration
|
|
}
|
|
|
|
// Token returns the current token if it's still valid, else will
|
|
@@ -305,6 +308,7 @@ func (s *reuseTokenSource) Token() (*Token, error) {
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
+ t.expiryDelta = s.expiryDelta
|
|
s.t = t
|
|
return t, nil
|
|
}
|
|
@@ -379,3 +383,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
|
new: src,
|
|
}
|
|
}
|
|
+
|
|
+// ReuseTokenSource returns a TokenSource that acts in the same manner as the
|
|
+// TokenSource returned by ReuseTokenSource, except the expiry buffer is
|
|
+// configurable. The expiration time of a token is calculated as
|
|
+// t.Expiry.Add(-earlyExpiry).
|
|
+func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource {
|
|
+ // Don't wrap a reuseTokenSource in itself. That would work,
|
|
+ // but cause an unnecessary number of mutex operations.
|
|
+ // Just build the equivalent one.
|
|
+ if rt, ok := src.(*reuseTokenSource); ok {
|
|
+ if t == nil {
|
|
+ // Just use it directly, but set the expiryDelta to earlyExpiry,
|
|
+ // so the behavior matches what the user expects.
|
|
+ rt.expiryDelta = earlyExpiry
|
|
+ return rt
|
|
+ }
|
|
+ src = rt.new
|
|
+ }
|
|
+ if t != nil {
|
|
+ t.expiryDelta = earlyExpiry
|
|
+ }
|
|
+ return &reuseTokenSource{
|
|
+ t: t,
|
|
+ new: src,
|
|
+ expiryDelta: earlyExpiry,
|
|
+ }
|
|
+}
|
|
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
|
|
index 822720341..7c64006de 100644
|
|
--- a/vendor/golang.org/x/oauth2/token.go
|
|
+++ b/vendor/golang.org/x/oauth2/token.go
|
|
@@ -16,10 +16,10 @@ import (
|
|
"golang.org/x/oauth2/internal"
|
|
)
|
|
|
|
-// expiryDelta determines how earlier a token should be considered
|
|
+// defaultExpiryDelta determines how earlier a token should be considered
|
|
// expired than its actual expiration time. It is used to avoid late
|
|
// expirations due to client-server time mismatches.
|
|
-const expiryDelta = 10 * time.Second
|
|
+const defaultExpiryDelta = 10 * time.Second
|
|
|
|
// Token represents the credentials used to authorize
|
|
// the requests to access protected resources on the OAuth 2.0
|
|
@@ -52,6 +52,11 @@ type Token struct {
|
|
// raw optionally contains extra metadata from the server
|
|
// when updating a token.
|
|
raw interface{}
|
|
+
|
|
+ // expiryDelta is used to calculate when a token is considered
|
|
+ // expired, by subtracting from Expiry. If zero, defaultExpiryDelta
|
|
+ // is used.
|
|
+ expiryDelta time.Duration
|
|
}
|
|
|
|
// Type returns t.TokenType if non-empty, else "Bearer".
|
|
@@ -127,6 +132,11 @@ func (t *Token) expired() bool {
|
|
if t.Expiry.IsZero() {
|
|
return false
|
|
}
|
|
+
|
|
+ expiryDelta := defaultExpiryDelta
|
|
+ if t.expiryDelta != 0 {
|
|
+ expiryDelta = t.expiryDelta
|
|
+ }
|
|
return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
|
|
}
|
|
|
|
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
|
|
index f34a38e4e..a6b508188 100644
|
|
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
|
|
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
|
|
@@ -1,4 +1,4 @@
|
|
-// Copyright 2020 Google LLC
|
|
+// Copyright 2022 Google LLC
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
@@ -15,7 +15,7 @@
|
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
// versions:
|
|
// protoc-gen-go v1.26.0
|
|
-// protoc v3.12.2
|
|
+// protoc v3.21.9
|
|
// source: google/rpc/status.proto
|
|
|
|
package status
|
|
@@ -48,11 +48,13 @@ type Status struct {
|
|
sizeCache protoimpl.SizeCache
|
|
unknownFields protoimpl.UnknownFields
|
|
|
|
- // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
|
|
+ // The status code, which should be an enum value of
|
|
+ // [google.rpc.Code][google.rpc.Code].
|
|
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
|
// A developer-facing error message, which should be in English. Any
|
|
// user-facing error message should be localized and sent in the
|
|
- // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
|
|
+ // [google.rpc.Status.details][google.rpc.Status.details] field, or localized
|
|
+ // by the client.
|
|
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
|
// A list of messages that carry the error details. There is a common set of
|
|
// message types for APIs to use.
|
|
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
|
|
index 52338d004..608aa6e1a 100644
|
|
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
|
|
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
|
|
@@ -20,6 +20,15 @@ How to get your contributions merged smoothly and quickly.
|
|
both author's & review's time is wasted. Create more PRs to address different
|
|
concerns and everyone will be happy.
|
|
|
|
+- If you are searching for features to work on, issues labeled [Status: Help
|
|
+ Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22)
|
|
+ is a great place to start. These issues are well-documented and usually can be
|
|
+ resolved with a single pull request.
|
|
+
|
|
+- If you are adding a new file, make sure it has the copyright message template
|
|
+ at the top as a comment. You can copy over the message from an existing file
|
|
+ and update the year.
|
|
+
|
|
- The grpc package should only depend on standard Go packages and a small number
|
|
of exceptions. If your contribution introduces new dependencies which are NOT
|
|
in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
|
|
@@ -32,14 +41,18 @@ How to get your contributions merged smoothly and quickly.
|
|
- Provide a good **PR description** as a record of **what** change is being made
|
|
and **why** it was made. Link to a github issue if it exists.
|
|
|
|
-- Don't fix code style and formatting unless you are already changing that line
|
|
- to address an issue. PRs with irrelevant changes won't be merged. If you do
|
|
- want to fix formatting or style, do that in a separate PR.
|
|
+- If you want to fix formatting or style, consider whether your changes are an
|
|
+ obvious improvement or might be considered a personal preference. If a style
|
|
+ change is based on preference, it likely will not be accepted. If it corrects
|
|
+ widely agreed-upon anti-patterns, then please do create a PR and explain the
|
|
+ benefits of the change.
|
|
|
|
- Unless your PR is trivial, you should expect there will be reviewer comments
|
|
- that you'll need to address before merging. We expect you to be reasonably
|
|
- responsive to those comments, otherwise the PR will be closed after 2-3 weeks
|
|
- of inactivity.
|
|
+ that you'll need to address before merging. We'll mark it as `Status: Requires
|
|
+ Reporter Clarification` if we expect you to respond to these comments in a
|
|
+ timely manner. If the PR remains inactive for 6 days, it will be marked as
|
|
+ `stale` and automatically close 7 days after that if we don't hear back from
|
|
+ you.
|
|
|
|
- Maintain **clean commit history** and use **meaningful commit messages**. PRs
|
|
with messy commit history are difficult to review and won't be merged. Use
|
|
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
|
|
index ae13ddac1..3efca4591 100644
|
|
--- a/vendor/google.golang.org/grpc/attributes/attributes.go
|
|
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
|
|
@@ -19,12 +19,17 @@
|
|
// Package attributes defines a generic key/value store used in various gRPC
|
|
// components.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
package attributes
|
|
|
|
+import (
|
|
+ "fmt"
|
|
+ "strings"
|
|
+)
|
|
+
|
|
// Attributes is an immutable struct for storing and retrieving generic
|
|
// key/value pairs. Keys must be hashable, and users should define their own
|
|
// types for keys. Values should not be modified after they are added to an
|
|
@@ -99,3 +104,27 @@ func (a *Attributes) Equal(o *Attributes) bool {
|
|
}
|
|
return true
|
|
}
|
|
+
|
|
+// String prints the attribute map. If any key or values throughout the map
|
|
+// implement fmt.Stringer, it calls that method and appends.
|
|
+func (a *Attributes) String() string {
|
|
+ var sb strings.Builder
|
|
+ sb.WriteString("{")
|
|
+ first := true
|
|
+ for k, v := range a.m {
|
|
+ var key, val string
|
|
+ if str, ok := k.(interface{ String() string }); ok {
|
|
+ key = str.String()
|
|
+ }
|
|
+ if str, ok := v.(interface{ String() string }); ok {
|
|
+ val = str.String()
|
|
+ }
|
|
+ if !first {
|
|
+ sb.WriteString(", ")
|
|
+ }
|
|
+ sb.WriteString(fmt.Sprintf("%q: %q, ", key, val))
|
|
+ first = false
|
|
+ }
|
|
+ sb.WriteString("}")
|
|
+ return sb.String()
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
|
|
index 542594f5c..29475e31c 100644
|
|
--- a/vendor/google.golang.org/grpc/backoff.go
|
|
+++ b/vendor/google.golang.org/grpc/backoff.go
|
|
@@ -48,7 +48,7 @@ type BackoffConfig struct {
|
|
// here for more details:
|
|
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
|
|
index 257139080..8f00523c0 100644
|
|
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
|
|
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
|
|
@@ -110,6 +110,11 @@ type SubConn interface {
|
|
UpdateAddresses([]resolver.Address)
|
|
// Connect starts the connecting for this SubConn.
|
|
Connect()
|
|
+ // GetOrBuildProducer returns a reference to the existing Producer for this
|
|
+ // ProducerBuilder in this SubConn, or, if one does not currently exist,
|
|
+ // creates a new one and returns it. Returns a close function which must
|
|
+ // be called when the Producer is no longer needed.
|
|
+ GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
|
|
}
|
|
|
|
// NewSubConnOptions contains options to create new SubConn.
|
|
@@ -244,7 +249,7 @@ type DoneInfo struct {
|
|
// ServerLoad is the load received from server. It's usually sent as part of
|
|
// trailing metadata.
|
|
//
|
|
- // The only supported type now is *orca_v1.LoadReport.
|
|
+ // The only supported type now is *orca_v3.LoadReport.
|
|
ServerLoad interface{}
|
|
}
|
|
|
|
@@ -274,6 +279,14 @@ type PickResult struct {
|
|
// type, Done may not be called. May be nil if the balancer does not wish
|
|
// to be notified when the RPC completes.
|
|
Done func(DoneInfo)
|
|
+
|
|
+ // Metadata provides a way for LB policies to inject arbitrary per-call
|
|
+ // metadata. Any metadata returned here will be merged with existing
|
|
+ // metadata added by the client application.
|
|
+ //
|
|
+ // LB policies with child policies are responsible for propagating metadata
|
|
+ // injected by their children to the ClientConn, as part of Pick().
|
|
+ Metadata metadata.MD
|
|
}
|
|
|
|
// TransientFailureError returns e. It exists for backward compatibility and
|
|
@@ -371,3 +384,21 @@ type ClientConnState struct {
|
|
// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
|
|
// problem with the provided name resolver data.
|
|
var ErrBadResolverState = errors.New("bad resolver state")
|
|
+
|
|
+// A ProducerBuilder is a simple constructor for a Producer. It is used by the
|
|
+// SubConn to create producers when needed.
|
|
+type ProducerBuilder interface {
|
|
+ // Build creates a Producer. The first parameter is always a
|
|
+ // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
|
|
+ // associated SubConn), but is declared as interface{} to avoid a
|
|
+ // dependency cycle. Should also return a close function that will be
|
|
+ // called when all references to the Producer have been given up.
|
|
+ Build(grpcClientConnInterface interface{}) (p Producer, close func())
|
|
+}
|
|
+
|
|
+// A Producer is a type shared among potentially many consumers. It is
|
|
+// associated with a SubConn, and an implementation will typically contain
|
|
+// other methods to provide additional functionality, e.g. configuration or
|
|
+// subscription registration.
|
|
+type Producer interface {
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
|
|
index e8dfc828a..3929c26d3 100644
|
|
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
|
|
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
|
|
@@ -157,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error {
|
|
|
|
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
|
// from it. The picker is
|
|
-// - errPicker if the balancer is in TransientFailure,
|
|
-// - built by the pickerBuilder with all READY SubConns otherwise.
|
|
+// - errPicker if the balancer is in TransientFailure,
|
|
+// - built by the pickerBuilder with all READY SubConns otherwise.
|
|
func (b *baseBalancer) regeneratePicker() {
|
|
if b.state == connectivity.TransientFailure {
|
|
b.picker = NewErrPicker(b.mergeErrors())
|
|
diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
|
|
index a87b6809a..c33413581 100644
|
|
--- a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
|
|
+++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
|
|
@@ -34,10 +34,10 @@ type ConnectivityStateEvaluator struct {
|
|
// RecordTransition records state change happening in subConn and based on that
|
|
// it evaluates what aggregated state should be.
|
|
//
|
|
-// - If at least one SubConn in Ready, the aggregated state is Ready;
|
|
-// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
|
-// - Else if at least one SubConn is Idle, the aggregated state is Idle;
|
|
-// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
|
|
+// - If at least one SubConn in Ready, the aggregated state is Ready;
|
|
+// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
|
+// - Else if at least one SubConn is Idle, the aggregated state is Idle;
|
|
+// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
|
|
//
|
|
// Shutdown is not considered.
|
|
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
|
|
@@ -55,7 +55,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne
|
|
cse.numIdle += updateVal
|
|
}
|
|
}
|
|
+ return cse.CurrentState()
|
|
+}
|
|
|
|
+// CurrentState returns the current aggregate conn state by evaluating the counters
|
|
+func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State {
|
|
// Evaluate.
|
|
if cse.numReady > 0 {
|
|
return connectivity.Ready
|
|
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
|
|
index 274eb2f85..f7031ad22 100644
|
|
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
|
|
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
|
|
@@ -22,7 +22,7 @@
|
|
package roundrobin
|
|
|
|
import (
|
|
- "sync"
|
|
+ "sync/atomic"
|
|
|
|
"google.golang.org/grpc/balancer"
|
|
"google.golang.org/grpc/balancer/base"
|
|
@@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
|
|
// Start at a random index, as the same RR balancer rebuilds a new
|
|
// picker when SubConn states change, and we don't want to apply excess
|
|
// load to the first server in the list.
|
|
- next: grpcrand.Intn(len(scs)),
|
|
+ next: uint32(grpcrand.Intn(len(scs))),
|
|
}
|
|
}
|
|
|
|
@@ -69,15 +69,13 @@ type rrPicker struct {
|
|
// created. The slice is immutable. Each Get() will do a round robin
|
|
// selection from it and return the selected SubConn.
|
|
subConns []balancer.SubConn
|
|
-
|
|
- mu sync.Mutex
|
|
- next int
|
|
+ next uint32
|
|
}
|
|
|
|
func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
|
- p.mu.Lock()
|
|
- sc := p.subConns[p.next]
|
|
- p.next = (p.next + 1) % len(p.subConns)
|
|
- p.mu.Unlock()
|
|
+ subConnsLen := uint32(len(p.subConns))
|
|
+ nextIndex := atomic.AddUint32(&p.next, 1)
|
|
+
|
|
+ sc := p.subConns[nextIndex%subConnsLen]
|
|
return balancer.PickResult{SubConn: sc}, nil
|
|
}
|
|
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
|
|
index b1c23eaae..04b9ad411 100644
|
|
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
|
|
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
|
|
@@ -19,6 +19,7 @@
|
|
package grpc
|
|
|
|
import (
|
|
+ "context"
|
|
"fmt"
|
|
"strings"
|
|
"sync"
|
|
@@ -26,12 +27,20 @@ import (
|
|
"google.golang.org/grpc/balancer"
|
|
"google.golang.org/grpc/connectivity"
|
|
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
|
- "google.golang.org/grpc/internal/buffer"
|
|
"google.golang.org/grpc/internal/channelz"
|
|
"google.golang.org/grpc/internal/grpcsync"
|
|
"google.golang.org/grpc/resolver"
|
|
)
|
|
|
|
+type ccbMode int
|
|
+
|
|
+const (
|
|
+ ccbModeActive = iota
|
|
+ ccbModeIdle
|
|
+ ccbModeClosed
|
|
+ ccbModeExitingIdle
|
|
+)
|
|
+
|
|
// ccBalancerWrapper sits between the ClientConn and the Balancer.
|
|
//
|
|
// ccBalancerWrapper implements methods corresponding to the ones on the
|
|
@@ -46,192 +55,101 @@ import (
|
|
// It uses the gracefulswitch.Balancer internally to ensure that balancer
|
|
// switches happen in a graceful manner.
|
|
type ccBalancerWrapper struct {
|
|
- cc *ClientConn
|
|
-
|
|
- // Since these fields are accessed only from handleXxx() methods which are
|
|
- // synchronized by the watcher goroutine, we do not need a mutex to protect
|
|
- // these fields.
|
|
+ // The following fields are initialized when the wrapper is created and are
|
|
+ // read-only afterwards, and therefore can be accessed without a mutex.
|
|
+ cc *ClientConn
|
|
+ opts balancer.BuildOptions
|
|
+
|
|
+ // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a
|
|
+ // mutually exclusive manner as they are scheduled in the serializer. Fields
|
|
+ // accessed *only* in these serializer callbacks, can therefore be accessed
|
|
+ // without a mutex.
|
|
balancer *gracefulswitch.Balancer
|
|
curBalancerName string
|
|
|
|
- updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher().
|
|
- resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here.
|
|
- closed *grpcsync.Event // Indicates if close has been called.
|
|
- done *grpcsync.Event // Indicates if close has completed its work.
|
|
+ // mu guards access to the below fields. Access to the serializer and its
|
|
+ // cancel function needs to be mutex protected because they are overwritten
|
|
+ // when the wrapper exits idle mode.
|
|
+ mu sync.Mutex
|
|
+ serializer *grpcsync.CallbackSerializer // To serialize all outoing calls.
|
|
+ serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time.
|
|
+ mode ccbMode // Tracks the current mode of the wrapper.
|
|
}
|
|
|
|
// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
|
|
// is not created until the switchTo() method is invoked.
|
|
func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
|
+ ctx, cancel := context.WithCancel(context.Background())
|
|
ccb := &ccBalancerWrapper{
|
|
- cc: cc,
|
|
- updateCh: buffer.NewUnbounded(),
|
|
- resultCh: buffer.NewUnbounded(),
|
|
- closed: grpcsync.NewEvent(),
|
|
- done: grpcsync.NewEvent(),
|
|
+ cc: cc,
|
|
+ opts: bopts,
|
|
+ serializer: grpcsync.NewCallbackSerializer(ctx),
|
|
+ serializerCancel: cancel,
|
|
}
|
|
- go ccb.watcher()
|
|
ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
|
|
return ccb
|
|
}
|
|
|
|
-// The following xxxUpdate structs wrap the arguments received as part of the
|
|
-// corresponding update. The watcher goroutine uses the 'type' of the update to
|
|
-// invoke the appropriate handler routine to handle the update.
|
|
-
|
|
-type ccStateUpdate struct {
|
|
- ccs *balancer.ClientConnState
|
|
-}
|
|
-
|
|
-type scStateUpdate struct {
|
|
- sc balancer.SubConn
|
|
- state connectivity.State
|
|
- err error
|
|
-}
|
|
-
|
|
-type exitIdleUpdate struct{}
|
|
-
|
|
-type resolverErrorUpdate struct {
|
|
- err error
|
|
-}
|
|
-
|
|
-type switchToUpdate struct {
|
|
- name string
|
|
-}
|
|
-
|
|
-type subConnUpdate struct {
|
|
- acbw *acBalancerWrapper
|
|
-}
|
|
-
|
|
-// watcher is a long-running goroutine which reads updates from a channel and
|
|
-// invokes corresponding methods on the underlying balancer. It ensures that
|
|
-// these methods are invoked in a synchronous fashion. It also ensures that
|
|
-// these methods are invoked in the order in which the updates were received.
|
|
-func (ccb *ccBalancerWrapper) watcher() {
|
|
- for {
|
|
- select {
|
|
- case u := <-ccb.updateCh.Get():
|
|
- ccb.updateCh.Load()
|
|
- if ccb.closed.HasFired() {
|
|
- break
|
|
- }
|
|
- switch update := u.(type) {
|
|
- case *ccStateUpdate:
|
|
- ccb.handleClientConnStateChange(update.ccs)
|
|
- case *scStateUpdate:
|
|
- ccb.handleSubConnStateChange(update)
|
|
- case *exitIdleUpdate:
|
|
- ccb.handleExitIdle()
|
|
- case *resolverErrorUpdate:
|
|
- ccb.handleResolverError(update.err)
|
|
- case *switchToUpdate:
|
|
- ccb.handleSwitchTo(update.name)
|
|
- case *subConnUpdate:
|
|
- ccb.handleRemoveSubConn(update.acbw)
|
|
- default:
|
|
- logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update)
|
|
- }
|
|
- case <-ccb.closed.Done():
|
|
- }
|
|
-
|
|
- if ccb.closed.HasFired() {
|
|
- ccb.handleClose()
|
|
- return
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
// updateClientConnState is invoked by grpc to push a ClientConnState update to
|
|
// the underlying balancer.
|
|
-//
|
|
-// Unlike other methods invoked by grpc to push updates to the underlying
|
|
-// balancer, this method cannot simply push the update onto the update channel
|
|
-// and return. It needs to return the error returned by the underlying balancer
|
|
-// back to grpc which propagates that to the resolver.
|
|
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
|
- ccb.updateCh.Put(&ccStateUpdate{ccs: ccs})
|
|
-
|
|
- var res interface{}
|
|
- select {
|
|
- case res = <-ccb.resultCh.Get():
|
|
- ccb.resultCh.Load()
|
|
- case <-ccb.closed.Done():
|
|
- // Return early if the balancer wrapper is closed while we are waiting for
|
|
- // the underlying balancer to process a ClientConnState update.
|
|
- return nil
|
|
- }
|
|
- // If the returned error is nil, attempting to type assert to error leads to
|
|
- // panic. So, this needs to handled separately.
|
|
- if res == nil {
|
|
- return nil
|
|
- }
|
|
- return res.(error)
|
|
-}
|
|
-
|
|
-// handleClientConnStateChange handles a ClientConnState update from the update
|
|
-// channel and invokes the appropriate method on the underlying balancer.
|
|
-//
|
|
-// If the addresses specified in the update contain addresses of type "grpclb"
|
|
-// and the selected LB policy is not "grpclb", these addresses will be filtered
|
|
-// out and ccs will be modified with the updated address list.
|
|
-func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) {
|
|
- if ccb.curBalancerName != grpclbName {
|
|
- // Filter any grpclb addresses since we don't have the grpclb balancer.
|
|
- var addrs []resolver.Address
|
|
- for _, addr := range ccs.ResolverState.Addresses {
|
|
- if addr.Type == resolver.GRPCLB {
|
|
- continue
|
|
+ ccb.mu.Lock()
|
|
+ errCh := make(chan error, 1)
|
|
+ // Here and everywhere else where Schedule() is called, it is done with the
|
|
+ // lock held. But the lock guards only the scheduling part. The actual
|
|
+ // callback is called asynchronously without the lock being held.
|
|
+ ok := ccb.serializer.Schedule(func(_ context.Context) {
|
|
+ // If the addresses specified in the update contain addresses of type
|
|
+ // "grpclb" and the selected LB policy is not "grpclb", these addresses
|
|
+ // will be filtered out and ccs will be modified with the updated
|
|
+ // address list.
|
|
+ if ccb.curBalancerName != grpclbName {
|
|
+ var addrs []resolver.Address
|
|
+ for _, addr := range ccs.ResolverState.Addresses {
|
|
+ if addr.Type == resolver.GRPCLB {
|
|
+ continue
|
|
+ }
|
|
+ addrs = append(addrs, addr)
|
|
}
|
|
- addrs = append(addrs, addr)
|
|
+ ccs.ResolverState.Addresses = addrs
|
|
}
|
|
- ccs.ResolverState.Addresses = addrs
|
|
+ errCh <- ccb.balancer.UpdateClientConnState(*ccs)
|
|
+ })
|
|
+ if !ok {
|
|
+ // If we are unable to schedule a function with the serializer, it
|
|
+ // indicates that it has been closed. A serializer is only closed when
|
|
+ // the wrapper is closed or is in idle.
|
|
+ ccb.mu.Unlock()
|
|
+ return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer")
|
|
}
|
|
- ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs))
|
|
+ ccb.mu.Unlock()
|
|
+
|
|
+ // We get here only if the above call to Schedule succeeds, in which case it
|
|
+ // is guaranteed that the scheduled function will run. Therefore it is safe
|
|
+ // to block on this channel.
|
|
+ err := <-errCh
|
|
+ if logger.V(2) && err != nil {
|
|
+ logger.Infof("error from balancer.UpdateClientConnState: %v", err)
|
|
+ }
|
|
+ return err
|
|
}
|
|
|
|
// updateSubConnState is invoked by grpc to push a subConn state update to the
|
|
// underlying balancer.
|
|
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
|
|
- // When updating addresses for a SubConn, if the address in use is not in
|
|
- // the new addresses, the old ac will be tearDown() and a new ac will be
|
|
- // created. tearDown() generates a state change with Shutdown state, we
|
|
- // don't want the balancer to receive this state change. So before
|
|
- // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
|
|
- // this function will be called with (nil, Shutdown). We don't need to call
|
|
- // balancer method in this case.
|
|
- if sc == nil {
|
|
- return
|
|
- }
|
|
- ccb.updateCh.Put(&scStateUpdate{
|
|
- sc: sc,
|
|
- state: s,
|
|
- err: err,
|
|
+ ccb.mu.Lock()
|
|
+ ccb.serializer.Schedule(func(_ context.Context) {
|
|
+ ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
|
|
})
|
|
-}
|
|
-
|
|
-// handleSubConnStateChange handles a SubConnState update from the update
|
|
-// channel and invokes the appropriate method on the underlying balancer.
|
|
-func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) {
|
|
- ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err})
|
|
-}
|
|
-
|
|
-func (ccb *ccBalancerWrapper) exitIdle() {
|
|
- ccb.updateCh.Put(&exitIdleUpdate{})
|
|
-}
|
|
-
|
|
-func (ccb *ccBalancerWrapper) handleExitIdle() {
|
|
- if ccb.cc.GetState() != connectivity.Idle {
|
|
- return
|
|
- }
|
|
- ccb.balancer.ExitIdle()
|
|
+ ccb.mu.Unlock()
|
|
}
|
|
|
|
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
|
- ccb.updateCh.Put(&resolverErrorUpdate{err: err})
|
|
-}
|
|
-
|
|
-func (ccb *ccBalancerWrapper) handleResolverError(err error) {
|
|
- ccb.balancer.ResolverError(err)
|
|
+ ccb.mu.Lock()
|
|
+ ccb.serializer.Schedule(func(_ context.Context) {
|
|
+ ccb.balancer.ResolverError(err)
|
|
+ })
|
|
+ ccb.mu.Unlock()
|
|
}
|
|
|
|
// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
|
|
@@ -245,24 +163,27 @@ func (ccb *ccBalancerWrapper) handleResolverError(err error) {
|
|
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
|
|
// the graceful balancer switching process if the name does not change.
|
|
func (ccb *ccBalancerWrapper) switchTo(name string) {
|
|
- ccb.updateCh.Put(&switchToUpdate{name: name})
|
|
+ ccb.mu.Lock()
|
|
+ ccb.serializer.Schedule(func(_ context.Context) {
|
|
+ // TODO: Other languages use case-sensitive balancer registries. We should
|
|
+ // switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
|
|
+ if strings.EqualFold(ccb.curBalancerName, name) {
|
|
+ return
|
|
+ }
|
|
+ ccb.buildLoadBalancingPolicy(name)
|
|
+ })
|
|
+ ccb.mu.Unlock()
|
|
}
|
|
|
|
-// handleSwitchTo handles a balancer switch update from the update channel. It
|
|
-// calls the SwitchTo() method on the gracefulswitch.Balancer with a
|
|
-// balancer.Builder corresponding to name. If no balancer.Builder is registered
|
|
-// for the given name, it uses the default LB policy which is "pick_first".
|
|
-func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
|
|
- // TODO: Other languages use case-insensitive balancer registries. We should
|
|
- // switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
|
|
- if strings.EqualFold(ccb.curBalancerName, name) {
|
|
- return
|
|
- }
|
|
-
|
|
- // TODO: Ensure that name is a registered LB policy when we get here.
|
|
- // We currently only validate the `loadBalancingConfig` field. We need to do
|
|
- // the same for the `loadBalancingPolicy` field and reject the service config
|
|
- // if the specified policy is not registered.
|
|
+// buildLoadBalancingPolicy performs the following:
|
|
+// - retrieve a balancer builder for the given name. Use the default LB
|
|
+// policy, pick_first, if no LB policy with name is found in the registry.
|
|
+// - instruct the gracefulswitch balancer to switch to the above builder. This
|
|
+// will actually build the new balancer.
|
|
+// - update the `curBalancerName` field
|
|
+//
|
|
+// Must be called from a serializer callback.
|
|
+func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
|
|
builder := balancer.Get(name)
|
|
if builder == nil {
|
|
channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
|
|
@@ -278,26 +199,114 @@ func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
|
|
ccb.curBalancerName = builder.Name()
|
|
}
|
|
|
|
-// handleRemoveSucConn handles a request from the underlying balancer to remove
|
|
-// a subConn.
|
|
-//
|
|
-// See comments in RemoveSubConn() for more details.
|
|
-func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) {
|
|
- ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
|
+func (ccb *ccBalancerWrapper) close() {
|
|
+ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
|
|
+ ccb.closeBalancer(ccbModeClosed)
|
|
}
|
|
|
|
-func (ccb *ccBalancerWrapper) close() {
|
|
- ccb.closed.Fire()
|
|
- <-ccb.done.Done()
|
|
+// enterIdleMode is invoked by grpc when the channel enters idle mode upon
|
|
+// expiry of idle_timeout. This call blocks until the balancer is closed.
|
|
+func (ccb *ccBalancerWrapper) enterIdleMode() {
|
|
+ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode")
|
|
+ ccb.closeBalancer(ccbModeIdle)
|
|
}
|
|
|
|
-func (ccb *ccBalancerWrapper) handleClose() {
|
|
- ccb.balancer.Close()
|
|
- ccb.done.Fire()
|
|
+// closeBalancer is invoked when the channel is being closed or when it enters
|
|
+// idle mode upon expiry of idle_timeout.
|
|
+func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
|
|
+ ccb.mu.Lock()
|
|
+ if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle {
|
|
+ ccb.mu.Unlock()
|
|
+ return
|
|
+ }
|
|
+
|
|
+ ccb.mode = m
|
|
+ done := ccb.serializer.Done
|
|
+ b := ccb.balancer
|
|
+ ok := ccb.serializer.Schedule(func(_ context.Context) {
|
|
+ // Close the serializer to ensure that no more calls from gRPC are sent
|
|
+ // to the balancer.
|
|
+ ccb.serializerCancel()
|
|
+ // Empty the current balancer name because we don't have a balancer
|
|
+ // anymore and also so that we act on the next call to switchTo by
|
|
+ // creating a new balancer specified by the new resolver.
|
|
+ ccb.curBalancerName = ""
|
|
+ })
|
|
+ if !ok {
|
|
+ ccb.mu.Unlock()
|
|
+ return
|
|
+ }
|
|
+ ccb.mu.Unlock()
|
|
+
|
|
+ // Give enqueued callbacks a chance to finish.
|
|
+ <-done
|
|
+ // Spawn a goroutine to close the balancer (since it may block trying to
|
|
+ // cleanup all allocated resources) and return early.
|
|
+ go b.Close()
|
|
+}
|
|
+
|
|
+// exitIdleMode is invoked by grpc when the channel exits idle mode either
|
|
+// because of an RPC or because of an invocation of the Connect() API. This
|
|
+// recreates the balancer that was closed previously when entering idle mode.
|
|
+//
|
|
+// If the channel is not in idle mode, we know for a fact that we are here as a
|
|
+// result of the user calling the Connect() method on the ClientConn. In this
|
|
+// case, we can simply forward the call to the underlying balancer, instructing
|
|
+// it to reconnect to the backends.
|
|
+func (ccb *ccBalancerWrapper) exitIdleMode() {
|
|
+ ccb.mu.Lock()
|
|
+ if ccb.mode == ccbModeClosed {
|
|
+ // Request to exit idle is a no-op when wrapper is already closed.
|
|
+ ccb.mu.Unlock()
|
|
+ return
|
|
+ }
|
|
+
|
|
+ if ccb.mode == ccbModeIdle {
|
|
+ // Recreate the serializer which was closed when we entered idle.
|
|
+ ctx, cancel := context.WithCancel(context.Background())
|
|
+ ccb.serializer = grpcsync.NewCallbackSerializer(ctx)
|
|
+ ccb.serializerCancel = cancel
|
|
+ }
|
|
+
|
|
+ // The ClientConn guarantees that mutual exclusion between close() and
|
|
+ // exitIdleMode(), and since we just created a new serializer, we can be
|
|
+ // sure that the below function will be scheduled.
|
|
+ done := make(chan struct{})
|
|
+ ccb.serializer.Schedule(func(_ context.Context) {
|
|
+ defer close(done)
|
|
+
|
|
+ ccb.mu.Lock()
|
|
+ defer ccb.mu.Unlock()
|
|
+
|
|
+ if ccb.mode != ccbModeIdle {
|
|
+ ccb.balancer.ExitIdle()
|
|
+ return
|
|
+ }
|
|
+
|
|
+ // Gracefulswitch balancer does not support a switchTo operation after
|
|
+ // being closed. Hence we need to create a new one here.
|
|
+ ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
|
|
+ ccb.mode = ccbModeActive
|
|
+ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode")
|
|
+
|
|
+ })
|
|
+ ccb.mu.Unlock()
|
|
+
|
|
+ <-done
|
|
+}
|
|
+
|
|
+func (ccb *ccBalancerWrapper) isIdleOrClosed() bool {
|
|
+ ccb.mu.Lock()
|
|
+ defer ccb.mu.Unlock()
|
|
+ return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed
|
|
}
|
|
|
|
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
|
- if len(addrs) <= 0 {
|
|
+ if ccb.isIdleOrClosed() {
|
|
+ return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle")
|
|
+ }
|
|
+
|
|
+ if len(addrs) == 0 {
|
|
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
|
}
|
|
ac, err := ccb.cc.newAddrConn(addrs, opts)
|
|
@@ -305,32 +314,36 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
|
|
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
|
|
return nil, err
|
|
}
|
|
- acbw := &acBalancerWrapper{ac: ac}
|
|
- acbw.ac.mu.Lock()
|
|
+ acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)}
|
|
ac.acbw = acbw
|
|
- acbw.ac.mu.Unlock()
|
|
return acbw, nil
|
|
}
|
|
|
|
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
|
- // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it
|
|
- // was required to handle the RemoveSubConn() method asynchronously by pushing
|
|
- // the update onto the update channel. This was done to avoid a deadlock as
|
|
- // switchBalancer() was holding cc.mu when calling Close() on the old
|
|
- // balancer, which would in turn call RemoveSubConn().
|
|
- //
|
|
- // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this
|
|
- // asynchronously is probably not required anymore since the switchTo() method
|
|
- // handles the balancer switch by pushing the update onto the channel.
|
|
- // TODO(easwars): Handle this inline.
|
|
+ if ccb.isIdleOrClosed() {
|
|
+ // It it safe to ignore this call when the balancer is closed or in idle
|
|
+ // because the ClientConn takes care of closing the connections.
|
|
+ //
|
|
+ // Not returning early from here when the balancer is closed or in idle
|
|
+ // leads to a deadlock though, because of the following sequence of
|
|
+ // calls when holding cc.mu:
|
|
+ // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
|
|
+ // ccb.RemoveAddrConn --> cc.removeAddrConn
|
|
+ return
|
|
+ }
|
|
+
|
|
acbw, ok := sc.(*acBalancerWrapper)
|
|
if !ok {
|
|
return
|
|
}
|
|
- ccb.updateCh.Put(&subConnUpdate{acbw: acbw})
|
|
+ ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
|
|
}
|
|
|
|
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
|
+ if ccb.isIdleOrClosed() {
|
|
+ return
|
|
+ }
|
|
+
|
|
acbw, ok := sc.(*acBalancerWrapper)
|
|
if !ok {
|
|
return
|
|
@@ -339,6 +352,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
|
|
}
|
|
|
|
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
|
+ if ccb.isIdleOrClosed() {
|
|
+ return
|
|
+ }
|
|
+
|
|
// Update picker before updating state. Even though the ordering here does
|
|
// not matter, it can lead to multiple calls of Pick in the common start-up
|
|
// case where we wait for ready and then perform an RPC. If the picker is
|
|
@@ -349,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
|
}
|
|
|
|
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
|
|
+ if ccb.isIdleOrClosed() {
|
|
+ return
|
|
+ }
|
|
+
|
|
ccb.cc.resolveNow(o)
|
|
}
|
|
|
|
@@ -359,58 +380,80 @@ func (ccb *ccBalancerWrapper) Target() string {
|
|
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
|
// It implements balancer.SubConn interface.
|
|
type acBalancerWrapper struct {
|
|
- mu sync.Mutex
|
|
- ac *addrConn
|
|
+ ac *addrConn // read-only
|
|
+
|
|
+ mu sync.Mutex
|
|
+ producers map[balancer.ProducerBuilder]*refCountedProducer
|
|
+}
|
|
+
|
|
+func (acbw *acBalancerWrapper) String() string {
|
|
+ return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
|
|
}
|
|
|
|
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
|
- acbw.mu.Lock()
|
|
- defer acbw.mu.Unlock()
|
|
- if len(addrs) <= 0 {
|
|
- acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
|
|
- return
|
|
+ acbw.ac.updateAddrs(addrs)
|
|
+}
|
|
+
|
|
+func (acbw *acBalancerWrapper) Connect() {
|
|
+ go acbw.ac.connect()
|
|
+}
|
|
+
|
|
+// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
|
|
+// ready, blocks until it is or ctx expires. Returns an error when the context
|
|
+// expires or the addrConn is shut down.
|
|
+func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
|
+ transport, err := acbw.ac.getTransport(ctx)
|
|
+ if err != nil {
|
|
+ return nil, err
|
|
}
|
|
- if !acbw.ac.tryUpdateAddrs(addrs) {
|
|
- cc := acbw.ac.cc
|
|
- opts := acbw.ac.scopts
|
|
- acbw.ac.mu.Lock()
|
|
- // Set old ac.acbw to nil so the Shutdown state update will be ignored
|
|
- // by balancer.
|
|
- //
|
|
- // TODO(bar) the state transition could be wrong when tearDown() old ac
|
|
- // and creating new ac, fix the transition.
|
|
- acbw.ac.acbw = nil
|
|
- acbw.ac.mu.Unlock()
|
|
- acState := acbw.ac.getState()
|
|
- acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
|
|
-
|
|
- if acState == connectivity.Shutdown {
|
|
- return
|
|
- }
|
|
+ return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
|
|
+}
|
|
|
|
- newAC, err := cc.newAddrConn(addrs, opts)
|
|
- if err != nil {
|
|
- channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
|
- return
|
|
- }
|
|
- acbw.ac = newAC
|
|
- newAC.mu.Lock()
|
|
- newAC.acbw = acbw
|
|
- newAC.mu.Unlock()
|
|
- if acState != connectivity.Idle {
|
|
- go newAC.connect()
|
|
- }
|
|
+// Invoke performs a unary RPC. If the addrConn is not ready, returns
|
|
+// errSubConnNotReady.
|
|
+func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error {
|
|
+ cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
|
|
+ if err != nil {
|
|
+ return err
|
|
+ }
|
|
+ if err := cs.SendMsg(args); err != nil {
|
|
+ return err
|
|
}
|
|
+ return cs.RecvMsg(reply)
|
|
}
|
|
|
|
-func (acbw *acBalancerWrapper) Connect() {
|
|
- acbw.mu.Lock()
|
|
- defer acbw.mu.Unlock()
|
|
- go acbw.ac.connect()
|
|
+type refCountedProducer struct {
|
|
+ producer balancer.Producer
|
|
+ refs int // number of current refs to the producer
|
|
+ close func() // underlying producer's close function
|
|
}
|
|
|
|
-func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
|
|
+func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
|
|
acbw.mu.Lock()
|
|
defer acbw.mu.Unlock()
|
|
- return acbw.ac
|
|
+
|
|
+ // Look up existing producer from this builder.
|
|
+ pData := acbw.producers[pb]
|
|
+ if pData == nil {
|
|
+ // Not found; create a new one and add it to the producers map.
|
|
+ p, close := pb.Build(acbw)
|
|
+ pData = &refCountedProducer{producer: p, close: close}
|
|
+ acbw.producers[pb] = pData
|
|
+ }
|
|
+ // Account for this new reference.
|
|
+ pData.refs++
|
|
+
|
|
+ // Return a cleanup function wrapped in a OnceFunc to remove this reference
|
|
+ // and delete the refCountedProducer from the map if the total reference
|
|
+ // count goes to zero.
|
|
+ unref := func() {
|
|
+ acbw.mu.Lock()
|
|
+ pData.refs--
|
|
+ if pData.refs == 0 {
|
|
+ defer pData.close() // Run outside the acbw mutex
|
|
+ delete(acbw.producers, pb)
|
|
+ }
|
|
+ acbw.mu.Unlock()
|
|
+ }
|
|
+ return pData.producer, grpcsync.OnceFunc(unref)
|
|
}
|
|
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
|
|
index ed75290cd..ec2c2fa14 100644
|
|
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
|
|
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
|
|
@@ -18,14 +18,13 @@
|
|
|
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
// versions:
|
|
-// protoc-gen-go v1.25.0
|
|
-// protoc v3.14.0
|
|
+// protoc-gen-go v1.30.0
|
|
+// protoc v4.22.0
|
|
// source: grpc/binlog/v1/binarylog.proto
|
|
|
|
package grpc_binarylog_v1
|
|
|
|
import (
|
|
- proto "github.com/golang/protobuf/proto"
|
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
|
@@ -41,10 +40,6 @@ const (
|
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
|
)
|
|
|
|
-// This is a compile-time assertion that a sufficiently up-to-date version
|
|
-// of the legacy proto package is being used.
|
|
-const _ = proto.ProtoPackageIsVersion4
|
|
-
|
|
// Enumerates the type of event
|
|
// Note the terminology is different from the RPC semantics
|
|
// definition, but the same meaning is expressed here.
|
|
@@ -261,6 +256,7 @@ type GrpcLogEntry struct {
|
|
// according to the type of the log entry.
|
|
//
|
|
// Types that are assignable to Payload:
|
|
+ //
|
|
// *GrpcLogEntry_ClientHeader
|
|
// *GrpcLogEntry_ServerHeader
|
|
// *GrpcLogEntry_Message
|
|
@@ -694,12 +690,12 @@ func (x *Message) GetData() []byte {
|
|
// Header keys added by gRPC are omitted. To be more specific,
|
|
// implementations will not log the following entries, and this is
|
|
// not to be treated as a truncation:
|
|
-// - entries handled by grpc that are not user visible, such as those
|
|
-// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
|
-// or keys like 'lb-token'
|
|
-// - transport specific entries, including but not limited to:
|
|
-// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
|
-// - entries added for call credentials
|
|
+// - entries handled by grpc that are not user visible, such as those
|
|
+// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
|
+// or keys like 'lb-token'
|
|
+// - transport specific entries, including but not limited to:
|
|
+// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
|
+// - entries added for call credentials
|
|
//
|
|
// Implementations must always log grpc-trace-bin if it is present.
|
|
// Practically speaking it will only be visible on server side because
|
|
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
|
|
index 9e20e4d38..e6a1dc5d7 100644
|
|
--- a/vendor/google.golang.org/grpc/call.go
|
|
+++ b/vendor/google.golang.org/grpc/call.go
|
|
@@ -27,6 +27,11 @@ import (
|
|
//
|
|
// All errors returned by Invoke are compatible with the status package.
|
|
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
|
+ if err := cc.idlenessMgr.onCallBegin(); err != nil {
|
|
+ return err
|
|
+ }
|
|
+ defer cc.idlenessMgr.onCallEnd()
|
|
+
|
|
// allow interceptor to see all applicable call options, which means those
|
|
// configured as defaults from dial option as well as per-call options
|
|
opts = combine(cc.dopts.callOptions, opts)
|
|
diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go
|
|
index a220c47c5..32b7fa579 100644
|
|
--- a/vendor/google.golang.org/grpc/channelz/channelz.go
|
|
+++ b/vendor/google.golang.org/grpc/channelz/channelz.go
|
|
@@ -23,7 +23,7 @@
|
|
// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
|
|
// the `internal/channelz` package.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: All APIs in this package are experimental and may be removed in a
|
|
// later release.
|
|
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
|
|
index 779b03bca..95a7459b0 100644
|
|
--- a/vendor/google.golang.org/grpc/clientconn.go
|
|
+++ b/vendor/google.golang.org/grpc/clientconn.go
|
|
@@ -24,7 +24,6 @@ import (
|
|
"fmt"
|
|
"math"
|
|
"net/url"
|
|
- "reflect"
|
|
"strings"
|
|
"sync"
|
|
"sync/atomic"
|
|
@@ -69,6 +68,9 @@ var (
|
|
errConnDrain = errors.New("grpc: the connection is drained")
|
|
// errConnClosing indicates that the connection is closing.
|
|
errConnClosing = errors.New("grpc: the connection is closing")
|
|
+ // errConnIdling indicates the the connection is being closed as the channel
|
|
+ // is moving to an idle mode due to inactivity.
|
|
+ errConnIdling = errors.New("grpc: the connection is closing due to channel idleness")
|
|
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
|
|
// service config.
|
|
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
|
|
@@ -134,20 +136,42 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
|
|
// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
|
|
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
|
|
cc := &ClientConn{
|
|
- target: target,
|
|
- csMgr: &connectivityStateManager{},
|
|
- conns: make(map[*addrConn]struct{}),
|
|
- dopts: defaultDialOptions(),
|
|
- blockingpicker: newPickerWrapper(),
|
|
- czData: new(channelzData),
|
|
- firstResolveEvent: grpcsync.NewEvent(),
|
|
- }
|
|
+ target: target,
|
|
+ csMgr: &connectivityStateManager{},
|
|
+ conns: make(map[*addrConn]struct{}),
|
|
+ dopts: defaultDialOptions(),
|
|
+ czData: new(channelzData),
|
|
+ }
|
|
+
|
|
+ // We start the channel off in idle mode, but kick it out of idle at the end
|
|
+ // of this method, instead of waiting for the first RPC. Other gRPC
|
|
+ // implementations do wait for the first RPC to kick the channel out of
|
|
+ // idle. But doing so would be a major behavior change for our users who are
|
|
+ // used to seeing the channel active after Dial.
|
|
+ //
|
|
+ // Taking this approach of kicking it out of idle at the end of this method
|
|
+ // allows us to share the code between channel creation and exiting idle
|
|
+ // mode. This will also make it easy for us to switch to starting the
|
|
+ // channel off in idle, if at all we ever get to do that.
|
|
+ cc.idlenessState = ccIdlenessStateIdle
|
|
+
|
|
cc.retryThrottler.Store((*retryThrottler)(nil))
|
|
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
|
|
cc.ctx, cc.cancel = context.WithCancel(context.Background())
|
|
+ cc.exitIdleCond = sync.NewCond(&cc.mu)
|
|
|
|
- for _, opt := range extraDialOptions {
|
|
- opt.apply(&cc.dopts)
|
|
+ disableGlobalOpts := false
|
|
+ for _, opt := range opts {
|
|
+ if _, ok := opt.(*disableGlobalDialOptions); ok {
|
|
+ disableGlobalOpts = true
|
|
+ break
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if !disableGlobalOpts {
|
|
+ for _, opt := range globalDialOptions {
|
|
+ opt.apply(&cc.dopts)
|
|
+ }
|
|
}
|
|
|
|
for _, opt := range opts {
|
|
@@ -163,40 +187,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|
}
|
|
}()
|
|
|
|
- pid := cc.dopts.channelzParentID
|
|
- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target)
|
|
- ted := &channelz.TraceEventDesc{
|
|
- Desc: "Channel created",
|
|
- Severity: channelz.CtInfo,
|
|
- }
|
|
- if cc.dopts.channelzParentID != nil {
|
|
- ted.Parent = &channelz.TraceEventDesc{
|
|
- Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()),
|
|
- Severity: channelz.CtInfo,
|
|
- }
|
|
- }
|
|
- channelz.AddTraceEvent(logger, cc.channelzID, 1, ted)
|
|
- cc.csMgr.channelzID = cc.channelzID
|
|
+ // Register ClientConn with channelz.
|
|
+ cc.channelzRegistration(target)
|
|
|
|
- if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
|
|
- return nil, errNoTransportSecurity
|
|
- }
|
|
- if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
|
|
- return nil, errTransportCredsAndBundle
|
|
- }
|
|
- if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
|
|
- return nil, errNoTransportCredsInBundle
|
|
- }
|
|
- transportCreds := cc.dopts.copts.TransportCredentials
|
|
- if transportCreds == nil {
|
|
- transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
|
|
- }
|
|
- if transportCreds.Info().SecurityProtocol == "insecure" {
|
|
- for _, cd := range cc.dopts.copts.PerRPCCredentials {
|
|
- if cd.RequireTransportSecurity() {
|
|
- return nil, errTransportCredentialsMissing
|
|
- }
|
|
- }
|
|
+ if err := cc.validateTransportCredentials(); err != nil {
|
|
+ return nil, err
|
|
}
|
|
|
|
if cc.dopts.defaultServiceConfigRawJSON != nil {
|
|
@@ -234,35 +229,19 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|
}
|
|
}()
|
|
|
|
- scSet := false
|
|
- if cc.dopts.scChan != nil {
|
|
- // Try to get an initial service config.
|
|
- select {
|
|
- case sc, ok := <-cc.dopts.scChan:
|
|
- if ok {
|
|
- cc.sc = &sc
|
|
- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
|
|
- scSet = true
|
|
- }
|
|
- default:
|
|
- }
|
|
- }
|
|
if cc.dopts.bs == nil {
|
|
cc.dopts.bs = backoff.DefaultExponential
|
|
}
|
|
|
|
// Determine the resolver to use.
|
|
- resolverBuilder, err := cc.parseTargetAndFindResolver()
|
|
- if err != nil {
|
|
+ if err := cc.parseTargetAndFindResolver(); err != nil {
|
|
return nil, err
|
|
}
|
|
- cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts)
|
|
- if err != nil {
|
|
+ if err = cc.determineAuthority(); err != nil {
|
|
return nil, err
|
|
}
|
|
- channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
|
|
|
|
- if cc.dopts.scChan != nil && !scSet {
|
|
+ if cc.dopts.scChan != nil {
|
|
// Blocking wait for the initial service config.
|
|
select {
|
|
case sc, ok := <-cc.dopts.scChan:
|
|
@@ -278,57 +257,224 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|
go cc.scWatcher()
|
|
}
|
|
|
|
+ // This creates the name resolver, load balancer, blocking picker etc.
|
|
+ if err := cc.exitIdleMode(); err != nil {
|
|
+ return nil, err
|
|
+ }
|
|
+
|
|
+ // Configure idleness support with configured idle timeout or default idle
|
|
+ // timeout duration. Idleness can be explicitly disabled by the user, by
|
|
+ // setting the dial option to 0.
|
|
+ cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout)
|
|
+
|
|
+ // Return early for non-blocking dials.
|
|
+ if !cc.dopts.block {
|
|
+ return cc, nil
|
|
+ }
|
|
+
|
|
+ // A blocking dial blocks until the clientConn is ready.
|
|
+ for {
|
|
+ s := cc.GetState()
|
|
+ if s == connectivity.Idle {
|
|
+ cc.Connect()
|
|
+ }
|
|
+ if s == connectivity.Ready {
|
|
+ return cc, nil
|
|
+ } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
|
|
+ if err = cc.connectionError(); err != nil {
|
|
+ terr, ok := err.(interface {
|
|
+ Temporary() bool
|
|
+ })
|
|
+ if ok && !terr.Temporary() {
|
|
+ return nil, err
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if !cc.WaitForStateChange(ctx, s) {
|
|
+ // ctx got timeout or canceled.
|
|
+ if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
|
|
+ return nil, err
|
|
+ }
|
|
+ return nil, ctx.Err()
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+// addTraceEvent is a helper method to add a trace event on the channel. If the
|
|
+// channel is a nested one, the same event is also added on the parent channel.
|
|
+func (cc *ClientConn) addTraceEvent(msg string) {
|
|
+ ted := &channelz.TraceEventDesc{
|
|
+ Desc: fmt.Sprintf("Channel %s", msg),
|
|
+ Severity: channelz.CtInfo,
|
|
+ }
|
|
+ if cc.dopts.channelzParentID != nil {
|
|
+ ted.Parent = &channelz.TraceEventDesc{
|
|
+ Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg),
|
|
+ Severity: channelz.CtInfo,
|
|
+ }
|
|
+ }
|
|
+ channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
|
|
+}
|
|
+
|
|
+// exitIdleMode moves the channel out of idle mode by recreating the name
|
|
+// resolver and load balancer.
|
|
+func (cc *ClientConn) exitIdleMode() error {
|
|
+ cc.mu.Lock()
|
|
+ if cc.conns == nil {
|
|
+ cc.mu.Unlock()
|
|
+ return errConnClosing
|
|
+ }
|
|
+ if cc.idlenessState != ccIdlenessStateIdle {
|
|
+ cc.mu.Unlock()
|
|
+ logger.Info("ClientConn asked to exit idle mode when not in idle mode")
|
|
+ return nil
|
|
+ }
|
|
+
|
|
+ defer func() {
|
|
+ // When Close() and exitIdleMode() race against each other, one of the
|
|
+ // following two can happen:
|
|
+ // - Close() wins the race and runs first. exitIdleMode() runs after, and
|
|
+ // sees that the ClientConn is already closed and hence returns early.
|
|
+ // - exitIdleMode() wins the race and runs first and recreates the balancer
|
|
+ // and releases the lock before recreating the resolver. If Close() runs
|
|
+ // in this window, it will wait for exitIdleMode to complete.
|
|
+ //
|
|
+ // We achieve this synchronization using the below condition variable.
|
|
+ cc.mu.Lock()
|
|
+ cc.idlenessState = ccIdlenessStateActive
|
|
+ cc.exitIdleCond.Signal()
|
|
+ cc.mu.Unlock()
|
|
+ }()
|
|
+
|
|
+ cc.idlenessState = ccIdlenessStateExitingIdle
|
|
+ exitedIdle := false
|
|
+ if cc.blockingpicker == nil {
|
|
+ cc.blockingpicker = newPickerWrapper()
|
|
+ } else {
|
|
+ cc.blockingpicker.exitIdleMode()
|
|
+ exitedIdle = true
|
|
+ }
|
|
+
|
|
var credsClone credentials.TransportCredentials
|
|
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
|
credsClone = creds.Clone()
|
|
}
|
|
- cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
|
|
- DialCreds: credsClone,
|
|
- CredsBundle: cc.dopts.copts.CredsBundle,
|
|
- Dialer: cc.dopts.copts.Dialer,
|
|
- Authority: cc.authority,
|
|
- CustomUserAgent: cc.dopts.copts.UserAgent,
|
|
- ChannelzParentID: cc.channelzID,
|
|
- Target: cc.parsedTarget,
|
|
- })
|
|
+ if cc.balancerWrapper == nil {
|
|
+ cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
|
|
+ DialCreds: credsClone,
|
|
+ CredsBundle: cc.dopts.copts.CredsBundle,
|
|
+ Dialer: cc.dopts.copts.Dialer,
|
|
+ Authority: cc.authority,
|
|
+ CustomUserAgent: cc.dopts.copts.UserAgent,
|
|
+ ChannelzParentID: cc.channelzID,
|
|
+ Target: cc.parsedTarget,
|
|
+ })
|
|
+ } else {
|
|
+ cc.balancerWrapper.exitIdleMode()
|
|
+ }
|
|
+ cc.firstResolveEvent = grpcsync.NewEvent()
|
|
+ cc.mu.Unlock()
|
|
|
|
- // Build the resolver.
|
|
- rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
|
|
- if err != nil {
|
|
- return nil, fmt.Errorf("failed to build resolver: %v", err)
|
|
+ // This needs to be called without cc.mu because this builds a new resolver
|
|
+ // which might update state or report error inline which needs to be handled
|
|
+ // by cc.updateResolverState() which also grabs cc.mu.
|
|
+ if err := cc.initResolverWrapper(credsClone); err != nil {
|
|
+ return err
|
|
}
|
|
+
|
|
+ if exitedIdle {
|
|
+ cc.addTraceEvent("exiting idle mode")
|
|
+ }
|
|
+ return nil
|
|
+}
|
|
+
|
|
+// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
|
|
+// name resolver, load balancer and any subchannels.
|
|
+func (cc *ClientConn) enterIdleMode() error {
|
|
cc.mu.Lock()
|
|
- cc.resolverWrapper = rWrapper
|
|
+ if cc.conns == nil {
|
|
+ cc.mu.Unlock()
|
|
+ return ErrClientConnClosing
|
|
+ }
|
|
+ if cc.idlenessState != ccIdlenessStateActive {
|
|
+ logger.Error("ClientConn asked to enter idle mode when not active")
|
|
+ return nil
|
|
+ }
|
|
+
|
|
+ // cc.conns == nil is a proxy for the ClientConn being closed. So, instead
|
|
+ // of setting it to nil here, we recreate the map. This also means that we
|
|
+ // don't have to do this when exiting idle mode.
|
|
+ conns := cc.conns
|
|
+ cc.conns = make(map[*addrConn]struct{})
|
|
+
|
|
+ // TODO: Currently, we close the resolver wrapper upon entering idle mode
|
|
+ // and create a new one upon exiting idle mode. This means that the
|
|
+ // `cc.resolverWrapper` field would be overwritten everytime we exit idle
|
|
+ // mode. While this means that we need to hold `cc.mu` when accessing
|
|
+ // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should
|
|
+ // try to do the same for the balancer and picker wrappers too.
|
|
+ cc.resolverWrapper.close()
|
|
+ cc.blockingpicker.enterIdleMode()
|
|
+ cc.balancerWrapper.enterIdleMode()
|
|
+ cc.csMgr.updateState(connectivity.Idle)
|
|
+ cc.idlenessState = ccIdlenessStateIdle
|
|
cc.mu.Unlock()
|
|
|
|
- // A blocking dial blocks until the clientConn is ready.
|
|
- if cc.dopts.block {
|
|
- for {
|
|
- cc.Connect()
|
|
- s := cc.GetState()
|
|
- if s == connectivity.Ready {
|
|
- break
|
|
- } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
|
|
- if err = cc.connectionError(); err != nil {
|
|
- terr, ok := err.(interface {
|
|
- Temporary() bool
|
|
- })
|
|
- if ok && !terr.Temporary() {
|
|
- return nil, err
|
|
- }
|
|
- }
|
|
- }
|
|
- if !cc.WaitForStateChange(ctx, s) {
|
|
- // ctx got timeout or canceled.
|
|
- if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
|
|
- return nil, err
|
|
- }
|
|
- return nil, ctx.Err()
|
|
+ go func() {
|
|
+ cc.addTraceEvent("entering idle mode")
|
|
+ for ac := range conns {
|
|
+ ac.tearDown(errConnIdling)
|
|
+ }
|
|
+ }()
|
|
+ return nil
|
|
+}
|
|
+
|
|
+// validateTransportCredentials performs a series of checks on the configured
|
|
+// transport credentials. It returns a non-nil error if any of these conditions
|
|
+// are met:
|
|
+// - no transport creds and no creds bundle is configured
|
|
+// - both transport creds and creds bundle are configured
|
|
+// - creds bundle is configured, but it lacks a transport credentials
|
|
+// - insecure transport creds configured alongside call creds that require
|
|
+// transport level security
|
|
+//
|
|
+// If none of the above conditions are met, the configured credentials are
|
|
+// deemed valid and a nil error is returned.
|
|
+func (cc *ClientConn) validateTransportCredentials() error {
|
|
+ if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
|
|
+ return errNoTransportSecurity
|
|
+ }
|
|
+ if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
|
|
+ return errTransportCredsAndBundle
|
|
+ }
|
|
+ if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
|
|
+ return errNoTransportCredsInBundle
|
|
+ }
|
|
+ transportCreds := cc.dopts.copts.TransportCredentials
|
|
+ if transportCreds == nil {
|
|
+ transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
|
|
+ }
|
|
+ if transportCreds.Info().SecurityProtocol == "insecure" {
|
|
+ for _, cd := range cc.dopts.copts.PerRPCCredentials {
|
|
+ if cd.RequireTransportSecurity() {
|
|
+ return errTransportCredentialsMissing
|
|
}
|
|
}
|
|
}
|
|
+ return nil
|
|
+}
|
|
|
|
- return cc, nil
|
|
+// channelzRegistration registers the newly created ClientConn with channelz and
|
|
+// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`.
|
|
+// A channelz trace event is emitted for ClientConn creation. If the newly
|
|
+// created ClientConn is a nested one, i.e a valid parent ClientConn ID is
|
|
+// specified via a dial option, the trace event is also added to the parent.
|
|
+//
|
|
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
|
|
+func (cc *ClientConn) channelzRegistration(target string) {
|
|
+ cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
|
|
+ cc.addTraceEvent("created")
|
|
+ cc.csMgr.channelzID = cc.channelzID
|
|
}
|
|
|
|
// chainUnaryClientInterceptors chains all unary client interceptors into one.
|
|
@@ -474,7 +620,9 @@ type ClientConn struct {
|
|
authority string // See determineAuthority().
|
|
dopts dialOptions // Default and user specified dial options.
|
|
channelzID *channelz.Identifier // Channelz identifier for the channel.
|
|
+ resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
|
|
balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
|
|
+ idlenessMgr idlenessManager
|
|
|
|
// The following provide their own synchronization, and therefore don't
|
|
// require cc.mu to be held to access them.
|
|
@@ -495,15 +643,35 @@ type ClientConn struct {
|
|
sc *ServiceConfig // Latest service config received from the resolver.
|
|
conns map[*addrConn]struct{} // Set to nil on close.
|
|
mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
|
|
+ idlenessState ccIdlenessState // Tracks idleness state of the channel.
|
|
+ exitIdleCond *sync.Cond // Signalled when channel exits idle.
|
|
|
|
lceMu sync.Mutex // protects lastConnectionError
|
|
lastConnectionError error
|
|
}
|
|
|
|
+// ccIdlenessState tracks the idleness state of the channel.
|
|
+//
|
|
+// Channels start off in `active` and move to `idle` after a period of
|
|
+// inactivity. When moving back to `active` upon an incoming RPC, they
|
|
+// transition through `exiting_idle`. This state is useful for synchronization
|
|
+// with Close().
|
|
+//
|
|
+// This state tracking is mostly for self-protection. The idlenessManager is
|
|
+// expected to keep track of the state as well, and is expected not to call into
|
|
+// the ClientConn unnecessarily.
|
|
+type ccIdlenessState int8
|
|
+
|
|
+const (
|
|
+ ccIdlenessStateActive ccIdlenessState = iota
|
|
+ ccIdlenessStateIdle
|
|
+ ccIdlenessStateExitingIdle
|
|
+)
|
|
+
|
|
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
|
// ctx expires. A true value is returned in former case and false in latter.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -522,7 +690,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
|
|
|
|
// GetState returns the connectivity.State of ClientConn.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
|
|
// release.
|
|
@@ -534,12 +702,15 @@ func (cc *ClientConn) GetState() connectivity.State {
|
|
// the channel is idle. Does not wait for the connection attempts to begin
|
|
// before returning.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
|
|
// release.
|
|
func (cc *ClientConn) Connect() {
|
|
- cc.balancerWrapper.exitIdle()
|
|
+ cc.exitIdleMode()
|
|
+ // If the ClientConn was not in idle mode, we need to call ExitIdle on the
|
|
+ // LB policy so that connections can be created.
|
|
+ cc.balancerWrapper.exitIdleMode()
|
|
}
|
|
|
|
func (cc *ClientConn) scWatcher() {
|
|
@@ -708,6 +879,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
|
|
dopts: cc.dopts,
|
|
czData: new(channelzData),
|
|
resetBackoff: make(chan struct{}),
|
|
+ stateChan: make(chan struct{}),
|
|
}
|
|
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
|
|
// Track ac in cc. This needs to be done before any getTransport(...) is called.
|
|
@@ -761,7 +933,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
|
|
|
|
// Target returns the target string of the ClientConn.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -788,16 +960,19 @@ func (cc *ClientConn) incrCallsFailed() {
|
|
func (ac *addrConn) connect() error {
|
|
ac.mu.Lock()
|
|
if ac.state == connectivity.Shutdown {
|
|
+ if logger.V(2) {
|
|
+ logger.Infof("connect called on shutdown addrConn; ignoring.")
|
|
+ }
|
|
ac.mu.Unlock()
|
|
return errConnClosing
|
|
}
|
|
if ac.state != connectivity.Idle {
|
|
+ if logger.V(2) {
|
|
+ logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state)
|
|
+ }
|
|
ac.mu.Unlock()
|
|
return nil
|
|
}
|
|
- // Update connectivity state within the lock to prevent subsequent or
|
|
- // concurrent calls from resetting the transport more than once.
|
|
- ac.updateConnectivityState(connectivity.Connecting, nil)
|
|
ac.mu.Unlock()
|
|
|
|
ac.resetTransport()
|
|
@@ -816,58 +991,62 @@ func equalAddresses(a, b []resolver.Address) bool {
|
|
return true
|
|
}
|
|
|
|
-// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
|
|
-//
|
|
-// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
|
|
-// addresses will be picked up by retry in the next iteration after backoff.
|
|
-//
|
|
-// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
|
|
-//
|
|
-// If the addresses is the same as the old list, it does nothing and returns
|
|
-// true.
|
|
-//
|
|
-// If ac is Connecting, it returns false. The caller should tear down the ac and
|
|
-// create a new one. Note that the backoff will be reset when this happens.
|
|
-//
|
|
-// If ac is Ready, it checks whether current connected address of ac is in the
|
|
-// new addrs list.
|
|
-// - If true, it updates ac.addrs and returns true. The ac will keep using
|
|
-// the existing connection.
|
|
-// - If false, it does nothing and returns false.
|
|
-func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
|
+// updateAddrs updates ac.addrs with the new addresses list and handles active
|
|
+// connections or connection attempts.
|
|
+func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
|
|
ac.mu.Lock()
|
|
- defer ac.mu.Unlock()
|
|
- channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
|
|
+ channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
|
|
+
|
|
+ if equalAddresses(ac.addrs, addrs) {
|
|
+ ac.mu.Unlock()
|
|
+ return
|
|
+ }
|
|
+
|
|
+ ac.addrs = addrs
|
|
+
|
|
if ac.state == connectivity.Shutdown ||
|
|
ac.state == connectivity.TransientFailure ||
|
|
ac.state == connectivity.Idle {
|
|
- ac.addrs = addrs
|
|
- return true
|
|
+ // We were not connecting, so do nothing but update the addresses.
|
|
+ ac.mu.Unlock()
|
|
+ return
|
|
}
|
|
|
|
- if equalAddresses(ac.addrs, addrs) {
|
|
- return true
|
|
+ if ac.state == connectivity.Ready {
|
|
+ // Try to find the connected address.
|
|
+ for _, a := range addrs {
|
|
+ a.ServerName = ac.cc.getServerName(a)
|
|
+ if a.Equal(ac.curAddr) {
|
|
+ // We are connected to a valid address, so do nothing but
|
|
+ // update the addresses.
|
|
+ ac.mu.Unlock()
|
|
+ return
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
- if ac.state == connectivity.Connecting {
|
|
- return false
|
|
- }
|
|
+ // We are either connected to the wrong address or currently connecting.
|
|
+ // Stop the current iteration and restart.
|
|
|
|
- // ac.state is Ready, try to find the connected address.
|
|
- var curAddrFound bool
|
|
- for _, a := range addrs {
|
|
- a.ServerName = ac.cc.getServerName(a)
|
|
- if reflect.DeepEqual(ac.curAddr, a) {
|
|
- curAddrFound = true
|
|
- break
|
|
- }
|
|
+ ac.cancel()
|
|
+ ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx)
|
|
+
|
|
+ // We have to defer here because GracefulClose => Close => onClose, which
|
|
+ // requires locking ac.mu.
|
|
+ if ac.transport != nil {
|
|
+ defer ac.transport.GracefulClose()
|
|
+ ac.transport = nil
|
|
}
|
|
- channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
|
|
- if curAddrFound {
|
|
- ac.addrs = addrs
|
|
+
|
|
+ if len(addrs) == 0 {
|
|
+ ac.updateConnectivityState(connectivity.Idle, nil)
|
|
}
|
|
|
|
- return curAddrFound
|
|
+ ac.mu.Unlock()
|
|
+
|
|
+ // Since we were connecting/connected, we should start a new connection
|
|
+ // attempt.
|
|
+ go ac.resetTransport()
|
|
}
|
|
|
|
// getServerName determines the serverName to be used in the connection
|
|
@@ -928,7 +1107,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
|
|
return cc.sc.healthCheckConfig
|
|
}
|
|
|
|
-func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
|
+func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
|
|
return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
|
|
Ctx: ctx,
|
|
FullMethodName: method,
|
|
@@ -998,7 +1177,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
|
|
// However, if a previously unavailable network becomes available, this may be
|
|
// used to trigger an immediate reconnect.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -1020,39 +1199,40 @@ func (cc *ClientConn) Close() error {
|
|
cc.mu.Unlock()
|
|
return ErrClientConnClosing
|
|
}
|
|
+
|
|
+ for cc.idlenessState == ccIdlenessStateExitingIdle {
|
|
+ cc.exitIdleCond.Wait()
|
|
+ }
|
|
+
|
|
conns := cc.conns
|
|
cc.conns = nil
|
|
cc.csMgr.updateState(connectivity.Shutdown)
|
|
|
|
+ pWrapper := cc.blockingpicker
|
|
rWrapper := cc.resolverWrapper
|
|
- cc.resolverWrapper = nil
|
|
bWrapper := cc.balancerWrapper
|
|
+ idlenessMgr := cc.idlenessMgr
|
|
cc.mu.Unlock()
|
|
|
|
// The order of closing matters here since the balancer wrapper assumes the
|
|
// picker is closed before it is closed.
|
|
- cc.blockingpicker.close()
|
|
+ if pWrapper != nil {
|
|
+ pWrapper.close()
|
|
+ }
|
|
if bWrapper != nil {
|
|
bWrapper.close()
|
|
}
|
|
if rWrapper != nil {
|
|
rWrapper.close()
|
|
}
|
|
+ if idlenessMgr != nil {
|
|
+ idlenessMgr.close()
|
|
+ }
|
|
|
|
for ac := range conns {
|
|
ac.tearDown(ErrClientConnClosing)
|
|
}
|
|
- ted := &channelz.TraceEventDesc{
|
|
- Desc: "Channel deleted",
|
|
- Severity: channelz.CtInfo,
|
|
- }
|
|
- if cc.dopts.channelzParentID != nil {
|
|
- ted.Parent = &channelz.TraceEventDesc{
|
|
- Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()),
|
|
- Severity: channelz.CtInfo,
|
|
- }
|
|
- }
|
|
- channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
|
|
+ cc.addTraceEvent("deleted")
|
|
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
|
|
// trace reference to the entity being deleted, and thus prevent it from being
|
|
// deleted right away.
|
|
@@ -1082,7 +1262,8 @@ type addrConn struct {
|
|
addrs []resolver.Address // All addresses that the resolver resolved to.
|
|
|
|
// Use updateConnectivityState for updating addrConn's connectivity state.
|
|
- state connectivity.State
|
|
+ state connectivity.State
|
|
+ stateChan chan struct{} // closed and recreated on every state change.
|
|
|
|
backoffIdx int // Needs to be stateful for resetConnectBackoff.
|
|
resetBackoff chan struct{}
|
|
@@ -1096,8 +1277,15 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
|
|
if ac.state == s {
|
|
return
|
|
}
|
|
+ // When changing states, reset the state change channel.
|
|
+ close(ac.stateChan)
|
|
+ ac.stateChan = make(chan struct{})
|
|
ac.state = s
|
|
- channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
|
|
+ if lastErr == nil {
|
|
+ channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
|
|
+ } else {
|
|
+ channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
|
|
+ }
|
|
ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
|
|
}
|
|
|
|
@@ -1117,7 +1305,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
|
|
|
|
func (ac *addrConn) resetTransport() {
|
|
ac.mu.Lock()
|
|
- if ac.state == connectivity.Shutdown {
|
|
+ acCtx := ac.ctx
|
|
+ if acCtx.Err() != nil {
|
|
ac.mu.Unlock()
|
|
return
|
|
}
|
|
@@ -1145,15 +1334,14 @@ func (ac *addrConn) resetTransport() {
|
|
ac.updateConnectivityState(connectivity.Connecting, nil)
|
|
ac.mu.Unlock()
|
|
|
|
- if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil {
|
|
+ if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
|
|
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
|
// After exhausting all addresses, the addrConn enters
|
|
// TRANSIENT_FAILURE.
|
|
- ac.mu.Lock()
|
|
- if ac.state == connectivity.Shutdown {
|
|
- ac.mu.Unlock()
|
|
+ if acCtx.Err() != nil {
|
|
return
|
|
}
|
|
+ ac.mu.Lock()
|
|
ac.updateConnectivityState(connectivity.TransientFailure, err)
|
|
|
|
// Backoff.
|
|
@@ -1168,13 +1356,13 @@ func (ac *addrConn) resetTransport() {
|
|
ac.mu.Unlock()
|
|
case <-b:
|
|
timer.Stop()
|
|
- case <-ac.ctx.Done():
|
|
+ case <-acCtx.Done():
|
|
timer.Stop()
|
|
return
|
|
}
|
|
|
|
ac.mu.Lock()
|
|
- if ac.state != connectivity.Shutdown {
|
|
+ if acCtx.Err() == nil {
|
|
ac.updateConnectivityState(connectivity.Idle, err)
|
|
}
|
|
ac.mu.Unlock()
|
|
@@ -1189,14 +1377,13 @@ func (ac *addrConn) resetTransport() {
|
|
// tryAllAddrs tries to creates a connection to the addresses, and stop when at
|
|
// the first successful one. It returns an error if no address was successfully
|
|
// connected, or updates ac appropriately with the new transport.
|
|
-func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error {
|
|
+func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
|
|
var firstConnErr error
|
|
for _, addr := range addrs {
|
|
- ac.mu.Lock()
|
|
- if ac.state == connectivity.Shutdown {
|
|
- ac.mu.Unlock()
|
|
+ if ctx.Err() != nil {
|
|
return errConnClosing
|
|
}
|
|
+ ac.mu.Lock()
|
|
|
|
ac.cc.mu.RLock()
|
|
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
|
|
@@ -1210,7 +1397,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
|
|
|
|
channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
|
|
|
|
- err := ac.createTransport(addr, copts, connectDeadline)
|
|
+ err := ac.createTransport(ctx, addr, copts, connectDeadline)
|
|
if err == nil {
|
|
return nil
|
|
}
|
|
@@ -1227,112 +1414,84 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
|
|
// createTransport creates a connection to addr. It returns an error if the
|
|
// address was not successfully connected, or updates ac appropriately with the
|
|
// new transport.
|
|
-func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
|
|
- // TODO: Delete prefaceReceived and move the logic to wait for it into the
|
|
- // transport.
|
|
- prefaceReceived := grpcsync.NewEvent()
|
|
- connClosed := grpcsync.NewEvent()
|
|
-
|
|
+func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
|
|
addr.ServerName = ac.cc.getServerName(addr)
|
|
- hctx, hcancel := context.WithCancel(ac.ctx)
|
|
- hcStarted := false // protected by ac.mu
|
|
+ hctx, hcancel := context.WithCancel(ctx)
|
|
|
|
- onClose := func() {
|
|
+ onClose := func(r transport.GoAwayReason) {
|
|
ac.mu.Lock()
|
|
defer ac.mu.Unlock()
|
|
- defer connClosed.Fire()
|
|
- defer hcancel()
|
|
- if !hcStarted || hctx.Err() != nil {
|
|
- // We didn't start the health check or set the state to READY, so
|
|
- // no need to do anything else here.
|
|
- //
|
|
- // OR, we have already cancelled the health check context, meaning
|
|
- // we have already called onClose once for this transport. In this
|
|
- // case it would be dangerous to clear the transport and update the
|
|
- // state, since there may be a new transport in this addrConn.
|
|
+ // adjust params based on GoAwayReason
|
|
+ ac.adjustParams(r)
|
|
+ if ctx.Err() != nil {
|
|
+ // Already shut down or connection attempt canceled. tearDown() or
|
|
+ // updateAddrs() already cleared the transport and canceled hctx
|
|
+ // via ac.ctx, and we expected this connection to be closed, so do
|
|
+ // nothing here.
|
|
+ return
|
|
+ }
|
|
+ hcancel()
|
|
+ if ac.transport == nil {
|
|
+ // We're still connecting to this address, which could error. Do
|
|
+ // not update the connectivity state or resolve; these will happen
|
|
+ // at the end of the tryAllAddrs connection loop in the event of an
|
|
+ // error.
|
|
return
|
|
}
|
|
ac.transport = nil
|
|
- // Refresh the name resolver
|
|
+ // Refresh the name resolver on any connection loss.
|
|
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
|
- if ac.state != connectivity.Shutdown {
|
|
- ac.updateConnectivityState(connectivity.Idle, nil)
|
|
- }
|
|
+ // Always go idle and wait for the LB policy to initiate a new
|
|
+ // connection attempt.
|
|
+ ac.updateConnectivityState(connectivity.Idle, nil)
|
|
}
|
|
|
|
- onGoAway := func(r transport.GoAwayReason) {
|
|
- ac.mu.Lock()
|
|
- ac.adjustParams(r)
|
|
- ac.mu.Unlock()
|
|
- onClose()
|
|
- }
|
|
-
|
|
- connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
|
+ connectCtx, cancel := context.WithDeadline(ctx, connectDeadline)
|
|
defer cancel()
|
|
copts.ChannelzParentID = ac.channelzID
|
|
|
|
- newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
|
|
+ newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose)
|
|
if err != nil {
|
|
+ if logger.V(2) {
|
|
+ logger.Infof("Creating new client transport to %q: %v", addr, err)
|
|
+ }
|
|
// newTr is either nil, or closed.
|
|
hcancel()
|
|
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
|
|
return err
|
|
}
|
|
|
|
- select {
|
|
- case <-connectCtx.Done():
|
|
- // We didn't get the preface in time.
|
|
+ ac.mu.Lock()
|
|
+ defer ac.mu.Unlock()
|
|
+ if ctx.Err() != nil {
|
|
+ // This can happen if the subConn was removed while in `Connecting`
|
|
+ // state. tearDown() would have set the state to `Shutdown`, but
|
|
+ // would not have closed the transport since ac.transport would not
|
|
+ // have been set at that point.
|
|
+ //
|
|
+ // We run this in a goroutine because newTr.Close() calls onClose()
|
|
+ // inline, which requires locking ac.mu.
|
|
+ //
|
|
// The error we pass to Close() is immaterial since there are no open
|
|
// streams at this point, so no trailers with error details will be sent
|
|
// out. We just need to pass a non-nil error.
|
|
- newTr.Close(transport.ErrConnClosing)
|
|
- if connectCtx.Err() == context.DeadlineExceeded {
|
|
- err := errors.New("failed to receive server preface within timeout")
|
|
- channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err)
|
|
- return err
|
|
- }
|
|
+ //
|
|
+ // This can also happen when updateAddrs is called during a connection
|
|
+ // attempt.
|
|
+ go newTr.Close(transport.ErrConnClosing)
|
|
return nil
|
|
- case <-prefaceReceived.Done():
|
|
- // We got the preface - huzzah! things are good.
|
|
- ac.mu.Lock()
|
|
- defer ac.mu.Unlock()
|
|
- if connClosed.HasFired() {
|
|
- // onClose called first; go idle but do nothing else.
|
|
- if ac.state != connectivity.Shutdown {
|
|
- ac.updateConnectivityState(connectivity.Idle, nil)
|
|
- }
|
|
- return nil
|
|
- }
|
|
- if ac.state == connectivity.Shutdown {
|
|
- // This can happen if the subConn was removed while in `Connecting`
|
|
- // state. tearDown() would have set the state to `Shutdown`, but
|
|
- // would not have closed the transport since ac.transport would not
|
|
- // been set at that point.
|
|
- //
|
|
- // We run this in a goroutine because newTr.Close() calls onClose()
|
|
- // inline, which requires locking ac.mu.
|
|
- //
|
|
- // The error we pass to Close() is immaterial since there are no open
|
|
- // streams at this point, so no trailers with error details will be sent
|
|
- // out. We just need to pass a non-nil error.
|
|
- go newTr.Close(transport.ErrConnClosing)
|
|
- return nil
|
|
- }
|
|
- ac.curAddr = addr
|
|
- ac.transport = newTr
|
|
- hcStarted = true
|
|
- ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
|
|
+ }
|
|
+ if hctx.Err() != nil {
|
|
+ // onClose was already called for this connection, but the connection
|
|
+ // was successfully established first. Consider it a success and set
|
|
+ // the new state to Idle.
|
|
+ ac.updateConnectivityState(connectivity.Idle, nil)
|
|
return nil
|
|
- case <-connClosed.Done():
|
|
- // The transport has already closed. If we received the preface, too,
|
|
- // this is not an error.
|
|
- select {
|
|
- case <-prefaceReceived.Done():
|
|
- return nil
|
|
- default:
|
|
- return errors.New("connection closed before server preface received")
|
|
- }
|
|
}
|
|
+ ac.curAddr = addr
|
|
+ ac.transport = newTr
|
|
+ ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
|
|
+ return nil
|
|
}
|
|
|
|
// startHealthCheck starts the health checking stream (RPC) to watch the health
|
|
@@ -1402,7 +1561,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
|
if status.Code(err) == codes.Unimplemented {
|
|
channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
|
|
} else {
|
|
- channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
|
|
+ channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err)
|
|
}
|
|
}
|
|
}()
|
|
@@ -1426,6 +1585,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
|
|
return nil
|
|
}
|
|
|
|
+// getTransport waits until the addrconn is ready and returns the transport.
|
|
+// If the context expires first, returns an appropriate status. If the
|
|
+// addrConn is stopped first, returns an Unavailable status error.
|
|
+func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
|
|
+ for ctx.Err() == nil {
|
|
+ ac.mu.Lock()
|
|
+ t, state, sc := ac.transport, ac.state, ac.stateChan
|
|
+ ac.mu.Unlock()
|
|
+ if state == connectivity.Ready {
|
|
+ return t, nil
|
|
+ }
|
|
+ if state == connectivity.Shutdown {
|
|
+ return nil, status.Errorf(codes.Unavailable, "SubConn shutting down")
|
|
+ }
|
|
+
|
|
+ select {
|
|
+ case <-ctx.Done():
|
|
+ case <-sc:
|
|
+ }
|
|
+ }
|
|
+ return nil, status.FromContextError(ctx.Err()).Err()
|
|
+}
|
|
+
|
|
// tearDown starts to tear down the addrConn.
|
|
//
|
|
// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
|
|
@@ -1553,6 +1735,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
|
|
// referenced by users.
|
|
var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
|
|
|
|
+// getResolver finds the scheme in the cc's resolvers or the global registry.
|
|
+// scheme should always be lowercase (typically by virtue of url.Parse()
|
|
+// performing proper RFC3986 behavior).
|
|
func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
|
|
for _, rb := range cc.dopts.resolvers {
|
|
if scheme == rb.Scheme() {
|
|
@@ -1574,7 +1759,14 @@ func (cc *ClientConn) connectionError() error {
|
|
return cc.lastConnectionError
|
|
}
|
|
|
|
-func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
|
|
+// parseTargetAndFindResolver parses the user's dial target and stores the
|
|
+// parsed target in `cc.parsedTarget`.
|
|
+//
|
|
+// The resolver to use is determined based on the scheme in the parsed target
|
|
+// and the same is stored in `cc.resolverBuilder`.
|
|
+//
|
|
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
|
|
+func (cc *ClientConn) parseTargetAndFindResolver() error {
|
|
channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target)
|
|
|
|
var rb resolver.Builder
|
|
@@ -1583,10 +1775,11 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
|
|
channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err)
|
|
} else {
|
|
channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
|
|
- rb = cc.getResolver(parsedTarget.Scheme)
|
|
+ rb = cc.getResolver(parsedTarget.URL.Scheme)
|
|
if rb != nil {
|
|
cc.parsedTarget = parsedTarget
|
|
- return rb, nil
|
|
+ cc.resolverBuilder = rb
|
|
+ return nil
|
|
}
|
|
}
|
|
|
|
@@ -1601,42 +1794,30 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
|
|
parsedTarget, err = parseTarget(canonicalTarget)
|
|
if err != nil {
|
|
channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err)
|
|
- return nil, err
|
|
+ return err
|
|
}
|
|
channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
|
|
- rb = cc.getResolver(parsedTarget.Scheme)
|
|
+ rb = cc.getResolver(parsedTarget.URL.Scheme)
|
|
if rb == nil {
|
|
- return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme)
|
|
+ return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
|
|
}
|
|
cc.parsedTarget = parsedTarget
|
|
- return rb, nil
|
|
+ cc.resolverBuilder = rb
|
|
+ return nil
|
|
}
|
|
|
|
// parseTarget uses RFC 3986 semantics to parse the given target into a
|
|
-// resolver.Target struct containing scheme, authority and endpoint. Query
|
|
+// resolver.Target struct containing scheme, authority and url. Query
|
|
// params are stripped from the endpoint.
|
|
func parseTarget(target string) (resolver.Target, error) {
|
|
u, err := url.Parse(target)
|
|
if err != nil {
|
|
return resolver.Target{}, err
|
|
}
|
|
- // For targets of the form "[scheme]://[authority]/endpoint, the endpoint
|
|
- // value returned from url.Parse() contains a leading "/". Although this is
|
|
- // in accordance with RFC 3986, we do not want to break existing resolver
|
|
- // implementations which expect the endpoint without the leading "/". So, we
|
|
- // end up stripping the leading "/" here. But this will result in an
|
|
- // incorrect parsing for something like "unix:///path/to/socket". Since we
|
|
- // own the "unix" resolver, we can workaround in the unix resolver by using
|
|
- // the `URL` field instead of the `Endpoint` field.
|
|
- endpoint := u.Path
|
|
- if endpoint == "" {
|
|
- endpoint = u.Opaque
|
|
- }
|
|
- endpoint = strings.TrimPrefix(endpoint, "/")
|
|
+
|
|
return resolver.Target{
|
|
Scheme: u.Scheme,
|
|
Authority: u.Host,
|
|
- Endpoint: endpoint,
|
|
URL: *u,
|
|
}, nil
|
|
}
|
|
@@ -1645,7 +1826,15 @@ func parseTarget(target string) (resolver.Target, error) {
|
|
// - user specified authority override using `WithAuthority` dial option
|
|
// - creds' notion of server name for the authentication handshake
|
|
// - endpoint from dial target of the form "scheme://[authority]/endpoint"
|
|
-func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) {
|
|
+//
|
|
+// Stores the determined authority in `cc.authority`.
|
|
+//
|
|
+// Returns a non-nil error if the authority returned by the transport
|
|
+// credentials do not match the authority configured through the dial option.
|
|
+//
|
|
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
|
|
+func (cc *ClientConn) determineAuthority() error {
|
|
+ dopts := cc.dopts
|
|
// Historically, we had two options for users to specify the serverName or
|
|
// authority for a channel. One was through the transport credentials
|
|
// (either in its constructor, or through the OverrideServerName() method).
|
|
@@ -1662,25 +1851,58 @@ func determineAuthority(endpoint, target string, dopts dialOptions) (string, err
|
|
}
|
|
authorityFromDialOption := dopts.authority
|
|
if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption {
|
|
- return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
|
|
+ return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
|
|
}
|
|
|
|
+ endpoint := cc.parsedTarget.Endpoint()
|
|
+ target := cc.target
|
|
switch {
|
|
case authorityFromDialOption != "":
|
|
- return authorityFromDialOption, nil
|
|
+ cc.authority = authorityFromDialOption
|
|
case authorityFromCreds != "":
|
|
- return authorityFromCreds, nil
|
|
+ cc.authority = authorityFromCreds
|
|
case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"):
|
|
// TODO: remove when the unix resolver implements optional interface to
|
|
// return channel authority.
|
|
- return "localhost", nil
|
|
+ cc.authority = "localhost"
|
|
case strings.HasPrefix(endpoint, ":"):
|
|
- return "localhost" + endpoint, nil
|
|
+ cc.authority = "localhost" + endpoint
|
|
default:
|
|
// TODO: Define an optional interface on the resolver builder to return
|
|
// the channel authority given the user's dial target. For resolvers
|
|
// which don't implement this interface, we will use the endpoint from
|
|
// "scheme://authority/endpoint" as the default authority.
|
|
- return endpoint, nil
|
|
+ cc.authority = endpoint
|
|
}
|
|
+ channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
|
|
+ return nil
|
|
+}
|
|
+
|
|
+// initResolverWrapper creates a ccResolverWrapper, which builds the name
|
|
+// resolver. This method grabs the lock to assign the newly built resolver
|
|
+// wrapper to the cc.resolverWrapper field.
|
|
+func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error {
|
|
+ rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{
|
|
+ target: cc.parsedTarget,
|
|
+ builder: cc.resolverBuilder,
|
|
+ bOpts: resolver.BuildOptions{
|
|
+ DisableServiceConfig: cc.dopts.disableServiceConfig,
|
|
+ DialCreds: creds,
|
|
+ CredsBundle: cc.dopts.copts.CredsBundle,
|
|
+ Dialer: cc.dopts.copts.Dialer,
|
|
+ },
|
|
+ channelzID: cc.channelzID,
|
|
+ })
|
|
+ if err != nil {
|
|
+ return fmt.Errorf("failed to build resolver: %v", err)
|
|
+ }
|
|
+ // Resolver implementations may report state update or error inline when
|
|
+ // built (or right after), and this is handled in cc.updateResolverState.
|
|
+ // Also, an error from the resolver might lead to a re-resolution request
|
|
+ // from the balancer, which is handled in resolveNow() where
|
|
+ // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here.
|
|
+ cc.mu.Lock()
|
|
+ cc.resolverWrapper = rw
|
|
+ cc.mu.Unlock()
|
|
+ return nil
|
|
}
|
|
diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go
|
|
index 0b206a578..934fac2b0 100644
|
|
--- a/vendor/google.golang.org/grpc/codes/code_string.go
|
|
+++ b/vendor/google.golang.org/grpc/codes/code_string.go
|
|
@@ -18,7 +18,15 @@
|
|
|
|
package codes
|
|
|
|
-import "strconv"
|
|
+import (
|
|
+ "strconv"
|
|
+
|
|
+ "google.golang.org/grpc/internal"
|
|
+)
|
|
+
|
|
+func init() {
|
|
+ internal.CanonicalString = canonicalString
|
|
+}
|
|
|
|
func (c Code) String() string {
|
|
switch c {
|
|
@@ -60,3 +68,44 @@ func (c Code) String() string {
|
|
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
|
|
}
|
|
}
|
|
+
|
|
+func canonicalString(c Code) string {
|
|
+ switch c {
|
|
+ case OK:
|
|
+ return "OK"
|
|
+ case Canceled:
|
|
+ return "CANCELLED"
|
|
+ case Unknown:
|
|
+ return "UNKNOWN"
|
|
+ case InvalidArgument:
|
|
+ return "INVALID_ARGUMENT"
|
|
+ case DeadlineExceeded:
|
|
+ return "DEADLINE_EXCEEDED"
|
|
+ case NotFound:
|
|
+ return "NOT_FOUND"
|
|
+ case AlreadyExists:
|
|
+ return "ALREADY_EXISTS"
|
|
+ case PermissionDenied:
|
|
+ return "PERMISSION_DENIED"
|
|
+ case ResourceExhausted:
|
|
+ return "RESOURCE_EXHAUSTED"
|
|
+ case FailedPrecondition:
|
|
+ return "FAILED_PRECONDITION"
|
|
+ case Aborted:
|
|
+ return "ABORTED"
|
|
+ case OutOfRange:
|
|
+ return "OUT_OF_RANGE"
|
|
+ case Unimplemented:
|
|
+ return "UNIMPLEMENTED"
|
|
+ case Internal:
|
|
+ return "INTERNAL"
|
|
+ case Unavailable:
|
|
+ return "UNAVAILABLE"
|
|
+ case DataLoss:
|
|
+ return "DATA_LOSS"
|
|
+ case Unauthenticated:
|
|
+ return "UNAUTHENTICATED"
|
|
+ default:
|
|
+ return "CODE(" + strconv.FormatInt(int64(c), 10) + ")"
|
|
+ }
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
|
|
index 96ff1877e..5feac3aa0 100644
|
|
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
|
|
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
|
|
@@ -36,16 +36,16 @@ import (
|
|
// PerRPCCredentials defines the common interface for the credentials which need to
|
|
// attach security information to every RPC (e.g., oauth2).
|
|
type PerRPCCredentials interface {
|
|
- // GetRequestMetadata gets the current request metadata, refreshing
|
|
- // tokens if required. This should be called by the transport layer on
|
|
- // each request, and the data should be populated in headers or other
|
|
- // context. If a status code is returned, it will be used as the status
|
|
- // for the RPC. uri is the URI of the entry point for the request.
|
|
- // When supported by the underlying implementation, ctx can be used for
|
|
- // timeout and cancellation. Additionally, RequestInfo data will be
|
|
- // available via ctx to this call.
|
|
- // TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
|
- // it as an arbitrary string.
|
|
+ // GetRequestMetadata gets the current request metadata, refreshing tokens
|
|
+ // if required. This should be called by the transport layer on each
|
|
+ // request, and the data should be populated in headers or other
|
|
+ // context. If a status code is returned, it will be used as the status for
|
|
+ // the RPC (restricted to an allowable set of codes as defined by gRFC
|
|
+ // A54). uri is the URI of the entry point for the request. When supported
|
|
+ // by the underlying implementation, ctx can be used for timeout and
|
|
+ // cancellation. Additionally, RequestInfo data will be available via ctx
|
|
+ // to this call. TODO(zhaoq): Define the set of the qualified keys instead
|
|
+ // of leaving it as an arbitrary string.
|
|
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
|
|
// RequireTransportSecurity indicates whether the credentials requires
|
|
// transport security.
|
|
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
|
|
index 784822d05..877b7cd21 100644
|
|
--- a/vendor/google.golang.org/grpc/credentials/tls.go
|
|
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
|
|
@@ -23,9 +23,9 @@ import (
|
|
"crypto/tls"
|
|
"crypto/x509"
|
|
"fmt"
|
|
- "io/ioutil"
|
|
"net"
|
|
"net/url"
|
|
+ "os"
|
|
|
|
credinternal "google.golang.org/grpc/internal/credentials"
|
|
)
|
|
@@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor
|
|
// it will override the virtual host name of authority (e.g. :authority header
|
|
// field) in requests.
|
|
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
|
- b, err := ioutil.ReadFile(certFile)
|
|
+ b, err := os.ReadFile(certFile)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
@@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
|
|
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
|
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
|
|
index 60403bc16..15a3d5102 100644
|
|
--- a/vendor/google.golang.org/grpc/dialoptions.go
|
|
+++ b/vendor/google.golang.org/grpc/dialoptions.go
|
|
@@ -29,6 +29,7 @@ import (
|
|
"google.golang.org/grpc/credentials/insecure"
|
|
"google.golang.org/grpc/internal"
|
|
internalbackoff "google.golang.org/grpc/internal/backoff"
|
|
+ "google.golang.org/grpc/internal/binarylog"
|
|
"google.golang.org/grpc/internal/transport"
|
|
"google.golang.org/grpc/keepalive"
|
|
"google.golang.org/grpc/resolver"
|
|
@@ -36,12 +37,15 @@ import (
|
|
)
|
|
|
|
func init() {
|
|
- internal.AddExtraDialOptions = func(opt ...DialOption) {
|
|
- extraDialOptions = append(extraDialOptions, opt...)
|
|
+ internal.AddGlobalDialOptions = func(opt ...DialOption) {
|
|
+ globalDialOptions = append(globalDialOptions, opt...)
|
|
}
|
|
- internal.ClearExtraDialOptions = func() {
|
|
- extraDialOptions = nil
|
|
+ internal.ClearGlobalDialOptions = func() {
|
|
+ globalDialOptions = nil
|
|
}
|
|
+ internal.WithBinaryLogger = withBinaryLogger
|
|
+ internal.JoinDialOptions = newJoinDialOption
|
|
+ internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
|
|
}
|
|
|
|
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
|
@@ -61,6 +65,7 @@ type dialOptions struct {
|
|
timeout time.Duration
|
|
scChan <-chan ServiceConfig
|
|
authority string
|
|
+ binaryLogger binarylog.Logger
|
|
copts transport.ConnectOptions
|
|
callOptions []CallOption
|
|
channelzParentID *channelz.Identifier
|
|
@@ -72,6 +77,7 @@ type dialOptions struct {
|
|
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
|
|
defaultServiceConfigRawJSON *string
|
|
resolvers []resolver.Builder
|
|
+ idleTimeout time.Duration
|
|
}
|
|
|
|
// DialOption configures how we set up the connection.
|
|
@@ -79,7 +85,7 @@ type DialOption interface {
|
|
apply(*dialOptions)
|
|
}
|
|
|
|
-var extraDialOptions []DialOption
|
|
+var globalDialOptions []DialOption
|
|
|
|
// EmptyDialOption does not alter the dial configuration. It can be embedded in
|
|
// another structure to build custom dial options.
|
|
@@ -92,6 +98,16 @@ type EmptyDialOption struct{}
|
|
|
|
func (EmptyDialOption) apply(*dialOptions) {}
|
|
|
|
+type disableGlobalDialOptions struct{}
|
|
+
|
|
+func (disableGlobalDialOptions) apply(*dialOptions) {}
|
|
+
|
|
+// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn
|
|
+// from applying the global DialOptions (set via AddGlobalDialOptions).
|
|
+func newDisableGlobalDialOptions() DialOption {
|
|
+ return &disableGlobalDialOptions{}
|
|
+}
|
|
+
|
|
// funcDialOption wraps a function that modifies dialOptions into an
|
|
// implementation of the DialOption interface.
|
|
type funcDialOption struct {
|
|
@@ -108,13 +124,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
|
|
}
|
|
}
|
|
|
|
+type joinDialOption struct {
|
|
+ opts []DialOption
|
|
+}
|
|
+
|
|
+func (jdo *joinDialOption) apply(do *dialOptions) {
|
|
+ for _, opt := range jdo.opts {
|
|
+ opt.apply(do)
|
|
+ }
|
|
+}
|
|
+
|
|
+func newJoinDialOption(opts ...DialOption) DialOption {
|
|
+ return &joinDialOption{opts: opts}
|
|
+}
|
|
+
|
|
// WithWriteBufferSize determines how much data can be batched before doing a
|
|
// write on the wire. The corresponding memory allocation for this buffer will
|
|
// be twice the size to keep syscalls low. The default value for this buffer is
|
|
// 32KB.
|
|
//
|
|
-// Zero will disable the write buffer such that each write will be on underlying
|
|
-// connection. Note: A Send call may not directly translate to a write.
|
|
+// Zero or negative values will disable the write buffer such that each write
|
|
+// will be on underlying connection. Note: A Send call may not directly
|
|
+// translate to a write.
|
|
func WithWriteBufferSize(s int) DialOption {
|
|
return newFuncDialOption(func(o *dialOptions) {
|
|
o.copts.WriteBufferSize = s
|
|
@@ -124,8 +155,9 @@ func WithWriteBufferSize(s int) DialOption {
|
|
// WithReadBufferSize lets you set the size of read buffer, this determines how
|
|
// much data can be read at most for each read syscall.
|
|
//
|
|
-// The default value for this buffer is 32KB. Zero will disable read buffer for
|
|
-// a connection so data framer can access the underlying conn directly.
|
|
+// The default value for this buffer is 32KB. Zero or negative values will
|
|
+// disable read buffer for a connection so data framer can access the
|
|
+// underlying conn directly.
|
|
func WithReadBufferSize(s int) DialOption {
|
|
return newFuncDialOption(func(o *dialOptions) {
|
|
o.copts.ReadBufferSize = s
|
|
@@ -264,6 +296,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption {
|
|
// WithBlock returns a DialOption which makes callers of Dial block until the
|
|
// underlying connection is up. Without this, Dial returns immediately and
|
|
// connecting the server happens in background.
|
|
+//
|
|
+// Use of this feature is not recommended. For more information, please see:
|
|
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
|
func WithBlock() DialOption {
|
|
return newFuncDialOption(func(o *dialOptions) {
|
|
o.block = true
|
|
@@ -275,6 +310,9 @@ func WithBlock() DialOption {
|
|
// the context.DeadlineExceeded error.
|
|
// Implies WithBlock()
|
|
//
|
|
+// Use of this feature is not recommended. For more information, please see:
|
|
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
|
+//
|
|
// # Experimental
|
|
//
|
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
@@ -401,6 +439,14 @@ func WithStatsHandler(h stats.Handler) DialOption {
|
|
})
|
|
}
|
|
|
|
+// withBinaryLogger returns a DialOption that specifies the binary logger for
|
|
+// this ClientConn.
|
|
+func withBinaryLogger(bl binarylog.Logger) DialOption {
|
|
+ return newFuncDialOption(func(o *dialOptions) {
|
|
+ o.binaryLogger = bl
|
|
+ })
|
|
+}
|
|
+
|
|
// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
|
|
// non-temporary dial errors. If f is true, and dialer returns a non-temporary
|
|
// error, gRPC will fail the connection to the network address and won't try to
|
|
@@ -409,6 +455,9 @@ func WithStatsHandler(h stats.Handler) DialOption {
|
|
// FailOnNonTempDialError only affects the initial dial, and does not do
|
|
// anything useful unless you are also using WithBlock().
|
|
//
|
|
+// Use of this feature is not recommended. For more information, please see:
|
|
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
|
+//
|
|
// # Experimental
|
|
//
|
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
@@ -607,3 +656,23 @@ func WithResolvers(rs ...resolver.Builder) DialOption {
|
|
o.resolvers = append(o.resolvers, rs...)
|
|
})
|
|
}
|
|
+
|
|
+// WithIdleTimeout returns a DialOption that configures an idle timeout for the
|
|
+// channel. If the channel is idle for the configured timeout, i.e there are no
|
|
+// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode
|
|
+// and as a result the name resolver and load balancer will be shut down. The
|
|
+// channel will exit idle mode when the Connect() method is called or when an
|
|
+// RPC is initiated.
|
|
+//
|
|
+// By default this feature is disabled, which can also be explicitly configured
|
|
+// by passing zero to this function.
|
|
+//
|
|
+// # Experimental
|
|
+//
|
|
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
+// later release.
|
|
+func WithIdleTimeout(d time.Duration) DialOption {
|
|
+ return newFuncDialOption(func(o *dialOptions) {
|
|
+ o.idleTimeout = d
|
|
+ })
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
|
|
index 18e530fc9..07a586135 100644
|
|
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
|
|
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
|
|
@@ -19,7 +19,7 @@
|
|
// Package encoding defines the interface for the compressor and codec, and
|
|
// functions to register and retrieve compressors and codecs.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -28,6 +28,8 @@ package encoding
|
|
import (
|
|
"io"
|
|
"strings"
|
|
+
|
|
+ "google.golang.org/grpc/internal/grpcutil"
|
|
)
|
|
|
|
// Identity specifies the optional encoding for uncompressed streams.
|
|
@@ -73,6 +75,9 @@ var registeredCompressor = make(map[string]Compressor)
|
|
// registered with the same name, the one registered last will take effect.
|
|
func RegisterCompressor(c Compressor) {
|
|
registeredCompressor[c.Name()] = c
|
|
+ if !grpcutil.IsCompressorNameRegistered(c.Name()) {
|
|
+ grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
|
|
+ }
|
|
}
|
|
|
|
// GetCompressor returns Compressor for the given compressor name.
|
|
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
|
|
index 7c1f66409..5de66e40d 100644
|
|
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
|
|
+++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
|
|
@@ -22,7 +22,6 @@ import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
- "io/ioutil"
|
|
"log"
|
|
"os"
|
|
"strconv"
|
|
@@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config)
|
|
// newLoggerV2 creates a loggerV2 to be used as default logger.
|
|
// All logs are written to stderr.
|
|
func newLoggerV2() LoggerV2 {
|
|
- errorW := ioutil.Discard
|
|
- warningW := ioutil.Discard
|
|
- infoW := ioutil.Discard
|
|
+ errorW := io.Discard
|
|
+ warningW := io.Discard
|
|
+ infoW := io.Discard
|
|
|
|
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
|
|
switch logLevel {
|
|
@@ -242,7 +241,7 @@ func (g *loggerT) V(l int) bool {
|
|
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
|
// depth set for trivial functions the logger may ignore.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/idle.go
|
|
new file mode 100644
|
|
index 000000000..dc3dc72f6
|
|
--- /dev/null
|
|
+++ b/vendor/google.golang.org/grpc/idle.go
|
|
@@ -0,0 +1,287 @@
|
|
+/*
|
|
+ *
|
|
+ * Copyright 2023 gRPC authors.
|
|
+ *
|
|
+ * Licensed under the Apache License, Version 2.0 (the "License");
|
|
+ * you may not use this file except in compliance with the License.
|
|
+ * You may obtain a copy of the License at
|
|
+ *
|
|
+ * http://www.apache.org/licenses/LICENSE-2.0
|
|
+ *
|
|
+ * Unless required by applicable law or agreed to in writing, software
|
|
+ * distributed under the License is distributed on an "AS IS" BASIS,
|
|
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
+ * See the License for the specific language governing permissions and
|
|
+ * limitations under the License.
|
|
+ *
|
|
+ */
|
|
+
|
|
+package grpc
|
|
+
|
|
+import (
|
|
+ "fmt"
|
|
+ "math"
|
|
+ "sync"
|
|
+ "sync/atomic"
|
|
+ "time"
|
|
+)
|
|
+
|
|
+// For overriding in unit tests.
|
|
+var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
|
|
+ return time.AfterFunc(d, f)
|
|
+}
|
|
+
|
|
+// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter
|
|
+// and exit from idle mode.
|
|
+type idlenessEnforcer interface {
|
|
+ exitIdleMode() error
|
|
+ enterIdleMode() error
|
|
+}
|
|
+
|
|
+// idlenessManager defines the functionality required to track RPC activity on a
|
|
+// channel.
|
|
+type idlenessManager interface {
|
|
+ onCallBegin() error
|
|
+ onCallEnd()
|
|
+ close()
|
|
+}
|
|
+
|
|
+type noopIdlenessManager struct{}
|
|
+
|
|
+func (noopIdlenessManager) onCallBegin() error { return nil }
|
|
+func (noopIdlenessManager) onCallEnd() {}
|
|
+func (noopIdlenessManager) close() {}
|
|
+
|
|
+// idlenessManagerImpl implements the idlenessManager interface. It uses atomic
|
|
+// operations to synchronize access to shared state and a mutex to guarantee
|
|
+// mutual exclusion in a critical section.
|
|
+type idlenessManagerImpl struct {
|
|
+ // State accessed atomically.
|
|
+ lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
|
|
+ activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
|
|
+ activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback.
|
|
+ closed int32 // Boolean; True when the manager is closed.
|
|
+
|
|
+ // Can be accessed without atomics or mutex since these are set at creation
|
|
+ // time and read-only after that.
|
|
+ enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn.
|
|
+ timeout int64 // Idle timeout duration nanos stored as an int64.
|
|
+
|
|
+ // idleMu is used to guarantee mutual exclusion in two scenarios:
|
|
+ // - Opposing intentions:
|
|
+ // - a: Idle timeout has fired and handleIdleTimeout() is trying to put
|
|
+ // the channel in idle mode because the channel has been inactive.
|
|
+ // - b: At the same time an RPC is made on the channel, and onCallBegin()
|
|
+ // is trying to prevent the channel from going idle.
|
|
+ // - Competing intentions:
|
|
+ // - The channel is in idle mode and there are multiple RPCs starting at
|
|
+ // the same time, all trying to move the channel out of idle. Only one
|
|
+ // of them should succeed in doing so, while the other RPCs should
|
|
+ // piggyback on the first one and be successfully handled.
|
|
+ idleMu sync.RWMutex
|
|
+ actuallyIdle bool
|
|
+ timer *time.Timer
|
|
+}
|
|
+
|
|
+// newIdlenessManager creates a new idleness manager implementation for the
|
|
+// given idle timeout.
|
|
+func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager {
|
|
+ if idleTimeout == 0 {
|
|
+ return noopIdlenessManager{}
|
|
+ }
|
|
+
|
|
+ i := &idlenessManagerImpl{
|
|
+ enforcer: enforcer,
|
|
+ timeout: int64(idleTimeout),
|
|
+ }
|
|
+ i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout)
|
|
+ return i
|
|
+}
|
|
+
|
|
+// resetIdleTimer resets the idle timer to the given duration. This method
|
|
+// should only be called from the timer callback.
|
|
+func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) {
|
|
+ i.idleMu.Lock()
|
|
+ defer i.idleMu.Unlock()
|
|
+
|
|
+ if i.timer == nil {
|
|
+ // Only close sets timer to nil. We are done.
|
|
+ return
|
|
+ }
|
|
+
|
|
+ // It is safe to ignore the return value from Reset() because this method is
|
|
+ // only ever called from the timer callback, which means the timer has
|
|
+ // already fired.
|
|
+ i.timer.Reset(d)
|
|
+}
|
|
+
|
|
+// handleIdleTimeout is the timer callback that is invoked upon expiry of the
|
|
+// configured idle timeout. The channel is considered inactive if there are no
|
|
+// ongoing calls and no RPC activity since the last time the timer fired.
|
|
+func (i *idlenessManagerImpl) handleIdleTimeout() {
|
|
+ if i.isClosed() {
|
|
+ return
|
|
+ }
|
|
+
|
|
+ if atomic.LoadInt32(&i.activeCallsCount) > 0 {
|
|
+ i.resetIdleTimer(time.Duration(i.timeout))
|
|
+ return
|
|
+ }
|
|
+
|
|
+ // There has been activity on the channel since we last got here. Reset the
|
|
+ // timer and return.
|
|
+ if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
|
|
+ // Set the timer to fire after a duration of idle timeout, calculated
|
|
+ // from the time the most recent RPC completed.
|
|
+ atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0)
|
|
+ i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano()))
|
|
+ return
|
|
+ }
|
|
+
|
|
+ // This CAS operation is extremely likely to succeed given that there has
|
|
+ // been no activity since the last time we were here. Setting the
|
|
+ // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the
|
|
+ // channel is either in idle mode or is trying to get there.
|
|
+ if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) {
|
|
+ // This CAS operation can fail if an RPC started after we checked for
|
|
+ // activity at the top of this method, or one was ongoing from before
|
|
+ // the last time we were here. In both case, reset the timer and return.
|
|
+ i.resetIdleTimer(time.Duration(i.timeout))
|
|
+ return
|
|
+ }
|
|
+
|
|
+ // Now that we've set the active calls count to -math.MaxInt32, it's time to
|
|
+ // actually move to idle mode.
|
|
+ if i.tryEnterIdleMode() {
|
|
+ // Successfully entered idle mode. No timer needed until we exit idle.
|
|
+ return
|
|
+ }
|
|
+
|
|
+ // Failed to enter idle mode due to a concurrent RPC that kept the channel
|
|
+ // active, or because of an error from the channel. Undo the attempt to
|
|
+ // enter idle, and reset the timer to try again later.
|
|
+ atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
|
|
+ i.resetIdleTimer(time.Duration(i.timeout))
|
|
+}
|
|
+
|
|
+// tryEnterIdleMode instructs the channel to enter idle mode. But before
|
|
+// that, it performs a last minute check to ensure that no new RPC has come in,
|
|
+// making the channel active.
|
|
+//
|
|
+// Return value indicates whether or not the channel moved to idle mode.
|
|
+//
|
|
+// Holds idleMu which ensures mutual exclusion with exitIdleMode.
|
|
+func (i *idlenessManagerImpl) tryEnterIdleMode() bool {
|
|
+ i.idleMu.Lock()
|
|
+ defer i.idleMu.Unlock()
|
|
+
|
|
+ if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 {
|
|
+ // We raced and lost to a new RPC. Very rare, but stop entering idle.
|
|
+ return false
|
|
+ }
|
|
+ if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
|
|
+ // An very short RPC could have come in (and also finished) after we
|
|
+ // checked for calls count and activity in handleIdleTimeout(), but
|
|
+ // before the CAS operation. So, we need to check for activity again.
|
|
+ return false
|
|
+ }
|
|
+
|
|
+ // No new RPCs have come in since we last set the active calls count value
|
|
+ // -math.MaxInt32 in the timer callback. And since we have the lock, it is
|
|
+ // safe to enter idle mode now.
|
|
+ if err := i.enforcer.enterIdleMode(); err != nil {
|
|
+ logger.Errorf("Failed to enter idle mode: %v", err)
|
|
+ return false
|
|
+ }
|
|
+
|
|
+ // Successfully entered idle mode.
|
|
+ i.actuallyIdle = true
|
|
+ return true
|
|
+}
|
|
+
|
|
+// onCallBegin is invoked at the start of every RPC.
|
|
+func (i *idlenessManagerImpl) onCallBegin() error {
|
|
+ if i.isClosed() {
|
|
+ return nil
|
|
+ }
|
|
+
|
|
+ if atomic.AddInt32(&i.activeCallsCount, 1) > 0 {
|
|
+ // Channel is not idle now. Set the activity bit and allow the call.
|
|
+ atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
|
|
+ return nil
|
|
+ }
|
|
+
|
|
+ // Channel is either in idle mode or is in the process of moving to idle
|
|
+ // mode. Attempt to exit idle mode to allow this RPC.
|
|
+ if err := i.exitIdleMode(); err != nil {
|
|
+ // Undo the increment to calls count, and return an error causing the
|
|
+ // RPC to fail.
|
|
+ atomic.AddInt32(&i.activeCallsCount, -1)
|
|
+ return err
|
|
+ }
|
|
+
|
|
+ atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
|
|
+ return nil
|
|
+}
|
|
+
|
|
+// exitIdleMode instructs the channel to exit idle mode.
|
|
+//
|
|
+// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
|
|
+func (i *idlenessManagerImpl) exitIdleMode() error {
|
|
+ i.idleMu.Lock()
|
|
+ defer i.idleMu.Unlock()
|
|
+
|
|
+ if !i.actuallyIdle {
|
|
+ // This can happen in two scenarios:
|
|
+ // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
|
|
+ // tryEnterIdleMode(). But before the latter could grab the lock, an RPC
|
|
+ // came in and onCallBegin() noticed that the calls count is negative.
|
|
+ // - Channel is in idle mode, and multiple new RPCs come in at the same
|
|
+ // time, all of them notice a negative calls count in onCallBegin and get
|
|
+ // here. The first one to get the lock would got the channel to exit idle.
|
|
+ //
|
|
+ // Either way, nothing to do here.
|
|
+ return nil
|
|
+ }
|
|
+
|
|
+ if err := i.enforcer.exitIdleMode(); err != nil {
|
|
+ return fmt.Errorf("channel failed to exit idle mode: %v", err)
|
|
+ }
|
|
+
|
|
+ // Undo the idle entry process. This also respects any new RPC attempts.
|
|
+ atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
|
|
+ i.actuallyIdle = false
|
|
+
|
|
+ // Start a new timer to fire after the configured idle timeout.
|
|
+ i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout)
|
|
+ return nil
|
|
+}
|
|
+
|
|
+// onCallEnd is invoked at the end of every RPC.
|
|
+func (i *idlenessManagerImpl) onCallEnd() {
|
|
+ if i.isClosed() {
|
|
+ return
|
|
+ }
|
|
+
|
|
+ // Record the time at which the most recent call finished.
|
|
+ atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano())
|
|
+
|
|
+ // Decrement the active calls count. This count can temporarily go negative
|
|
+ // when the timer callback is in the process of moving the channel to idle
|
|
+ // mode, but one or more RPCs come in and complete before the timer callback
|
|
+ // can get done with the process of moving to idle mode.
|
|
+ atomic.AddInt32(&i.activeCallsCount, -1)
|
|
+}
|
|
+
|
|
+func (i *idlenessManagerImpl) isClosed() bool {
|
|
+ return atomic.LoadInt32(&i.closed) == 1
|
|
+}
|
|
+
|
|
+func (i *idlenessManagerImpl) close() {
|
|
+ atomic.StoreInt32(&i.closed, 1)
|
|
+
|
|
+ i.idleMu.Lock()
|
|
+ i.timer.Stop()
|
|
+ i.timer = nil
|
|
+ i.idleMu.Unlock()
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
|
|
index e3dfe204f..755fdebc1 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
|
|
@@ -28,8 +28,13 @@ import (
|
|
"google.golang.org/grpc/internal/grpcutil"
|
|
)
|
|
|
|
-// Logger is the global binary logger. It can be used to get binary logger for
|
|
-// each method.
|
|
+var grpclogLogger = grpclog.Component("binarylog")
|
|
+
|
|
+// Logger specifies MethodLoggers for method names with a Log call that
|
|
+// takes a context.
|
|
+//
|
|
+// This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+// deleted or changed.
|
|
type Logger interface {
|
|
GetMethodLogger(methodName string) MethodLogger
|
|
}
|
|
@@ -37,11 +42,9 @@ type Logger interface {
|
|
// binLogger is the global binary logger for the binary. One of this should be
|
|
// built at init time from the configuration (environment variable or flags).
|
|
//
|
|
-// It is used to get a methodLogger for each individual method.
|
|
+// It is used to get a MethodLogger for each individual method.
|
|
var binLogger Logger
|
|
|
|
-var grpclogLogger = grpclog.Component("binarylog")
|
|
-
|
|
// SetLogger sets the binary logger.
|
|
//
|
|
// Only call this at init time.
|
|
@@ -56,11 +59,11 @@ func GetLogger() Logger {
|
|
return binLogger
|
|
}
|
|
|
|
-// GetMethodLogger returns the methodLogger for the given methodName.
|
|
+// GetMethodLogger returns the MethodLogger for the given methodName.
|
|
//
|
|
// methodName should be in the format of "/service/method".
|
|
//
|
|
-// Each methodLogger returned by this method is a new instance. This is to
|
|
+// Each MethodLogger returned by this method is a new instance. This is to
|
|
// generate sequence id within the call.
|
|
func GetMethodLogger(methodName string) MethodLogger {
|
|
if binLogger == nil {
|
|
@@ -117,7 +120,7 @@ func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
|
|
|
|
// Set method logger for "service/*".
|
|
//
|
|
-// New methodLogger with same service overrides the old one.
|
|
+// New MethodLogger with same service overrides the old one.
|
|
func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
|
|
if _, ok := l.config.Services[service]; ok {
|
|
return fmt.Errorf("conflicting service rules for service %v found", service)
|
|
@@ -131,7 +134,7 @@ func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig)
|
|
|
|
// Set method logger for "service/method".
|
|
//
|
|
-// New methodLogger with same method overrides the old one.
|
|
+// New MethodLogger with same method overrides the old one.
|
|
func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
|
|
if _, ok := l.config.Blacklist[method]; ok {
|
|
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
|
@@ -161,11 +164,11 @@ func (l *logger) setBlacklist(method string) error {
|
|
return nil
|
|
}
|
|
|
|
-// getMethodLogger returns the methodLogger for the given methodName.
|
|
+// getMethodLogger returns the MethodLogger for the given methodName.
|
|
//
|
|
// methodName should be in the format of "/service/method".
|
|
//
|
|
-// Each methodLogger returned by this method is a new instance. This is to
|
|
+// Each MethodLogger returned by this method is a new instance. This is to
|
|
// generate sequence id within the call.
|
|
func (l *logger) GetMethodLogger(methodName string) MethodLogger {
|
|
s, m, err := grpcutil.ParseMethod(methodName)
|
|
@@ -174,16 +177,16 @@ func (l *logger) GetMethodLogger(methodName string) MethodLogger {
|
|
return nil
|
|
}
|
|
if ml, ok := l.config.Methods[s+"/"+m]; ok {
|
|
- return newMethodLogger(ml.Header, ml.Message)
|
|
+ return NewTruncatingMethodLogger(ml.Header, ml.Message)
|
|
}
|
|
if _, ok := l.config.Blacklist[s+"/"+m]; ok {
|
|
return nil
|
|
}
|
|
if ml, ok := l.config.Services[s]; ok {
|
|
- return newMethodLogger(ml.Header, ml.Message)
|
|
+ return NewTruncatingMethodLogger(ml.Header, ml.Message)
|
|
}
|
|
if l.config.All == nil {
|
|
return nil
|
|
}
|
|
- return newMethodLogger(l.config.All.Header, l.config.All.Message)
|
|
+ return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message)
|
|
}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
|
|
index ab589a76b..f9e80e27a 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
|
|
@@ -30,15 +30,15 @@ import (
|
|
// to build a new logger and assign it to binarylog.Logger.
|
|
//
|
|
// Example filter config strings:
|
|
-// - "" Nothing will be logged
|
|
-// - "*" All headers and messages will be fully logged.
|
|
-// - "*{h}" Only headers will be logged.
|
|
-// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
|
-// - "Foo/*" Logs every method in service Foo
|
|
-// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
|
-// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
|
-// /Foo/Bar, logs all headers and messages in every other method in service
|
|
-// Foo.
|
|
+// - "" Nothing will be logged
|
|
+// - "*" All headers and messages will be fully logged.
|
|
+// - "*{h}" Only headers will be logged.
|
|
+// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
|
+// - "Foo/*" Logs every method in service Foo
|
|
+// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
|
+// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
|
+// /Foo/Bar, logs all headers and messages in every other method in service
|
|
+// Foo.
|
|
//
|
|
// If two configs exist for one certain method or service, the one specified
|
|
// later overrides the previous config.
|
|
@@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger {
|
|
return l
|
|
}
|
|
|
|
-// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
|
|
+// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds
|
|
// it to the right map in the logger.
|
|
func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
|
// "" is invalid.
|
|
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
|
|
index 24df0a1a0..6c3f63221 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
|
|
@@ -19,6 +19,7 @@
|
|
package binarylog
|
|
|
|
import (
|
|
+ "context"
|
|
"net"
|
|
"strings"
|
|
"sync/atomic"
|
|
@@ -26,7 +27,7 @@ import (
|
|
|
|
"github.com/golang/protobuf/proto"
|
|
"github.com/golang/protobuf/ptypes"
|
|
- pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
|
+ binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
|
"google.golang.org/grpc/metadata"
|
|
"google.golang.org/grpc/status"
|
|
)
|
|
@@ -48,11 +49,16 @@ func (g *callIDGenerator) reset() {
|
|
var idGen callIDGenerator
|
|
|
|
// MethodLogger is the sub-logger for each method.
|
|
+//
|
|
+// This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+// deleted or changed.
|
|
type MethodLogger interface {
|
|
- Log(LogEntryConfig)
|
|
+ Log(context.Context, LogEntryConfig)
|
|
}
|
|
|
|
-type methodLogger struct {
|
|
+// TruncatingMethodLogger is a method logger that truncates headers and messages
|
|
+// based on configured fields.
|
|
+type TruncatingMethodLogger struct {
|
|
headerMaxLen, messageMaxLen uint64
|
|
|
|
callID uint64
|
|
@@ -61,8 +67,12 @@ type methodLogger struct {
|
|
sink Sink // TODO(blog): make this plugable.
|
|
}
|
|
|
|
-func newMethodLogger(h, m uint64) *methodLogger {
|
|
- return &methodLogger{
|
|
+// NewTruncatingMethodLogger returns a new truncating method logger.
|
|
+//
|
|
+// This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+// deleted or changed.
|
|
+func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
|
|
+ return &TruncatingMethodLogger{
|
|
headerMaxLen: h,
|
|
messageMaxLen: m,
|
|
|
|
@@ -75,8 +85,8 @@ func newMethodLogger(h, m uint64) *methodLogger {
|
|
|
|
// Build is an internal only method for building the proto message out of the
|
|
// input event. It's made public to enable other library to reuse as much logic
|
|
-// in methodLogger as possible.
|
|
-func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
|
+// in TruncatingMethodLogger as possible.
|
|
+func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
|
|
m := c.toProto()
|
|
timestamp, _ := ptypes.TimestampProto(time.Now())
|
|
m.Timestamp = timestamp
|
|
@@ -84,22 +94,22 @@ func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
|
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
|
|
|
|
switch pay := m.Payload.(type) {
|
|
- case *pb.GrpcLogEntry_ClientHeader:
|
|
+ case *binlogpb.GrpcLogEntry_ClientHeader:
|
|
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
|
|
- case *pb.GrpcLogEntry_ServerHeader:
|
|
+ case *binlogpb.GrpcLogEntry_ServerHeader:
|
|
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
|
|
- case *pb.GrpcLogEntry_Message:
|
|
+ case *binlogpb.GrpcLogEntry_Message:
|
|
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
|
}
|
|
return m
|
|
}
|
|
|
|
// Log creates a proto binary log entry, and logs it to the sink.
|
|
-func (ml *methodLogger) Log(c LogEntryConfig) {
|
|
+func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
|
|
ml.sink.Write(ml.Build(c))
|
|
}
|
|
|
|
-func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
|
+func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
|
|
if ml.headerMaxLen == maxUInt {
|
|
return false
|
|
}
|
|
@@ -118,7 +128,7 @@ func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
|
// but not counted towards the size limit.
|
|
continue
|
|
}
|
|
- currentEntryLen := uint64(len(entry.Value))
|
|
+ currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
|
|
if currentEntryLen > bytesLimit {
|
|
break
|
|
}
|
|
@@ -129,7 +139,7 @@ func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
|
return truncated
|
|
}
|
|
|
|
-func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
|
+func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
|
|
if ml.messageMaxLen == maxUInt {
|
|
return false
|
|
}
|
|
@@ -141,8 +151,11 @@ func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
|
}
|
|
|
|
// LogEntryConfig represents the configuration for binary log entry.
|
|
+//
|
|
+// This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+// deleted or changed.
|
|
type LogEntryConfig interface {
|
|
- toProto() *pb.GrpcLogEntry
|
|
+ toProto() *binlogpb.GrpcLogEntry
|
|
}
|
|
|
|
// ClientHeader configs the binary log entry to be a ClientHeader entry.
|
|
@@ -156,10 +169,10 @@ type ClientHeader struct {
|
|
PeerAddr net.Addr
|
|
}
|
|
|
|
-func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
|
+func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
|
|
// This function doesn't need to set all the fields (e.g. seq ID). The Log
|
|
// function will set the fields when necessary.
|
|
- clientHeader := &pb.ClientHeader{
|
|
+ clientHeader := &binlogpb.ClientHeader{
|
|
Metadata: mdToMetadataProto(c.Header),
|
|
MethodName: c.MethodName,
|
|
Authority: c.Authority,
|
|
@@ -167,16 +180,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
|
if c.Timeout > 0 {
|
|
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
|
|
}
|
|
- ret := &pb.GrpcLogEntry{
|
|
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
|
- Payload: &pb.GrpcLogEntry_ClientHeader{
|
|
+ ret := &binlogpb.GrpcLogEntry{
|
|
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
|
+ Payload: &binlogpb.GrpcLogEntry_ClientHeader{
|
|
ClientHeader: clientHeader,
|
|
},
|
|
}
|
|
if c.OnClientSide {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
|
} else {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
|
}
|
|
if c.PeerAddr != nil {
|
|
ret.Peer = addrToProto(c.PeerAddr)
|
|
@@ -192,19 +205,19 @@ type ServerHeader struct {
|
|
PeerAddr net.Addr
|
|
}
|
|
|
|
-func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
|
|
- ret := &pb.GrpcLogEntry{
|
|
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
|
- Payload: &pb.GrpcLogEntry_ServerHeader{
|
|
- ServerHeader: &pb.ServerHeader{
|
|
+func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
|
|
+ ret := &binlogpb.GrpcLogEntry{
|
|
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
|
+ Payload: &binlogpb.GrpcLogEntry_ServerHeader{
|
|
+ ServerHeader: &binlogpb.ServerHeader{
|
|
Metadata: mdToMetadataProto(c.Header),
|
|
},
|
|
},
|
|
}
|
|
if c.OnClientSide {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
|
} else {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
|
}
|
|
if c.PeerAddr != nil {
|
|
ret.Peer = addrToProto(c.PeerAddr)
|
|
@@ -220,7 +233,7 @@ type ClientMessage struct {
|
|
Message interface{}
|
|
}
|
|
|
|
-func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
|
+func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
|
|
var (
|
|
data []byte
|
|
err error
|
|
@@ -235,19 +248,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
|
} else {
|
|
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
|
}
|
|
- ret := &pb.GrpcLogEntry{
|
|
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
|
- Payload: &pb.GrpcLogEntry_Message{
|
|
- Message: &pb.Message{
|
|
+ ret := &binlogpb.GrpcLogEntry{
|
|
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
|
+ Payload: &binlogpb.GrpcLogEntry_Message{
|
|
+ Message: &binlogpb.Message{
|
|
Length: uint32(len(data)),
|
|
Data: data,
|
|
},
|
|
},
|
|
}
|
|
if c.OnClientSide {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
|
} else {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
|
}
|
|
return ret
|
|
}
|
|
@@ -260,7 +273,7 @@ type ServerMessage struct {
|
|
Message interface{}
|
|
}
|
|
|
|
-func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
|
+func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
|
|
var (
|
|
data []byte
|
|
err error
|
|
@@ -275,19 +288,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
|
} else {
|
|
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
|
}
|
|
- ret := &pb.GrpcLogEntry{
|
|
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
|
- Payload: &pb.GrpcLogEntry_Message{
|
|
- Message: &pb.Message{
|
|
+ ret := &binlogpb.GrpcLogEntry{
|
|
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
|
+ Payload: &binlogpb.GrpcLogEntry_Message{
|
|
+ Message: &binlogpb.Message{
|
|
Length: uint32(len(data)),
|
|
Data: data,
|
|
},
|
|
},
|
|
}
|
|
if c.OnClientSide {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
|
} else {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
|
}
|
|
return ret
|
|
}
|
|
@@ -297,15 +310,15 @@ type ClientHalfClose struct {
|
|
OnClientSide bool
|
|
}
|
|
|
|
-func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
|
|
- ret := &pb.GrpcLogEntry{
|
|
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
|
+func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
|
|
+ ret := &binlogpb.GrpcLogEntry{
|
|
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
|
Payload: nil, // No payload here.
|
|
}
|
|
if c.OnClientSide {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
|
} else {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
|
}
|
|
return ret
|
|
}
|
|
@@ -321,7 +334,7 @@ type ServerTrailer struct {
|
|
PeerAddr net.Addr
|
|
}
|
|
|
|
-func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
|
+func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
|
|
st, ok := status.FromError(c.Err)
|
|
if !ok {
|
|
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
|
|
@@ -337,10 +350,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
|
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
|
|
}
|
|
}
|
|
- ret := &pb.GrpcLogEntry{
|
|
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
|
- Payload: &pb.GrpcLogEntry_Trailer{
|
|
- Trailer: &pb.Trailer{
|
|
+ ret := &binlogpb.GrpcLogEntry{
|
|
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
|
+ Payload: &binlogpb.GrpcLogEntry_Trailer{
|
|
+ Trailer: &binlogpb.Trailer{
|
|
Metadata: mdToMetadataProto(c.Trailer),
|
|
StatusCode: uint32(st.Code()),
|
|
StatusMessage: st.Message(),
|
|
@@ -349,9 +362,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
|
},
|
|
}
|
|
if c.OnClientSide {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
|
} else {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
|
}
|
|
if c.PeerAddr != nil {
|
|
ret.Peer = addrToProto(c.PeerAddr)
|
|
@@ -364,15 +377,15 @@ type Cancel struct {
|
|
OnClientSide bool
|
|
}
|
|
|
|
-func (c *Cancel) toProto() *pb.GrpcLogEntry {
|
|
- ret := &pb.GrpcLogEntry{
|
|
- Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
|
+func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
|
|
+ ret := &binlogpb.GrpcLogEntry{
|
|
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
|
Payload: nil,
|
|
}
|
|
if c.OnClientSide {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
|
} else {
|
|
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
|
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
|
}
|
|
return ret
|
|
}
|
|
@@ -389,15 +402,15 @@ func metadataKeyOmit(key string) bool {
|
|
return strings.HasPrefix(key, "grpc-")
|
|
}
|
|
|
|
-func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
|
- ret := &pb.Metadata{}
|
|
+func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
|
|
+ ret := &binlogpb.Metadata{}
|
|
for k, vv := range md {
|
|
if metadataKeyOmit(k) {
|
|
continue
|
|
}
|
|
for _, v := range vv {
|
|
ret.Entry = append(ret.Entry,
|
|
- &pb.MetadataEntry{
|
|
+ &binlogpb.MetadataEntry{
|
|
Key: k,
|
|
Value: []byte(v),
|
|
},
|
|
@@ -407,26 +420,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
|
return ret
|
|
}
|
|
|
|
-func addrToProto(addr net.Addr) *pb.Address {
|
|
- ret := &pb.Address{}
|
|
+func addrToProto(addr net.Addr) *binlogpb.Address {
|
|
+ ret := &binlogpb.Address{}
|
|
switch a := addr.(type) {
|
|
case *net.TCPAddr:
|
|
if a.IP.To4() != nil {
|
|
- ret.Type = pb.Address_TYPE_IPV4
|
|
+ ret.Type = binlogpb.Address_TYPE_IPV4
|
|
} else if a.IP.To16() != nil {
|
|
- ret.Type = pb.Address_TYPE_IPV6
|
|
+ ret.Type = binlogpb.Address_TYPE_IPV6
|
|
} else {
|
|
- ret.Type = pb.Address_TYPE_UNKNOWN
|
|
+ ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
|
// Do not set address and port fields.
|
|
break
|
|
}
|
|
ret.Address = a.IP.String()
|
|
ret.IpPort = uint32(a.Port)
|
|
case *net.UnixAddr:
|
|
- ret.Type = pb.Address_TYPE_UNIX
|
|
+ ret.Type = binlogpb.Address_TYPE_UNIX
|
|
ret.Address = a.String()
|
|
default:
|
|
- ret.Type = pb.Address_TYPE_UNKNOWN
|
|
+ ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
|
}
|
|
return ret
|
|
}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
|
|
index c2fdd58b3..264de387c 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
|
|
@@ -26,7 +26,7 @@ import (
|
|
"time"
|
|
|
|
"github.com/golang/protobuf/proto"
|
|
- pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
|
+ binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
|
)
|
|
|
|
var (
|
|
@@ -42,15 +42,15 @@ type Sink interface {
|
|
// Write will be called to write the log entry into the sink.
|
|
//
|
|
// It should be thread-safe so it can be called in parallel.
|
|
- Write(*pb.GrpcLogEntry) error
|
|
+ Write(*binlogpb.GrpcLogEntry) error
|
|
// Close will be called when the Sink is replaced by a new Sink.
|
|
Close() error
|
|
}
|
|
|
|
type noopSink struct{}
|
|
|
|
-func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
|
|
-func (ns *noopSink) Close() error { return nil }
|
|
+func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
|
|
+func (ns *noopSink) Close() error { return nil }
|
|
|
|
// newWriterSink creates a binary log sink with the given writer.
|
|
//
|
|
@@ -66,7 +66,7 @@ type writerSink struct {
|
|
out io.Writer
|
|
}
|
|
|
|
-func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
|
+func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
|
|
b, err := proto.Marshal(e)
|
|
if err != nil {
|
|
grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
|
|
@@ -96,7 +96,7 @@ type bufferedSink struct {
|
|
done chan struct{}
|
|
}
|
|
|
|
-func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
|
|
+func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
|
|
fs.mu.Lock()
|
|
defer fs.mu.Unlock()
|
|
if !fs.flusherStarted {
|
|
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
|
|
index 9f6a0c120..81c2f5fd7 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
|
|
@@ -35,6 +35,7 @@ import "sync"
|
|
// internal/transport/transport.go for an example of this.
|
|
type Unbounded struct {
|
|
c chan interface{}
|
|
+ closed bool
|
|
mu sync.Mutex
|
|
backlog []interface{}
|
|
}
|
|
@@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded {
|
|
// Put adds t to the unbounded buffer.
|
|
func (b *Unbounded) Put(t interface{}) {
|
|
b.mu.Lock()
|
|
+ defer b.mu.Unlock()
|
|
+ if b.closed {
|
|
+ return
|
|
+ }
|
|
if len(b.backlog) == 0 {
|
|
select {
|
|
case b.c <- t:
|
|
- b.mu.Unlock()
|
|
return
|
|
default:
|
|
}
|
|
}
|
|
b.backlog = append(b.backlog, t)
|
|
- b.mu.Unlock()
|
|
}
|
|
|
|
// Load sends the earliest buffered data, if any, onto the read channel
|
|
@@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) {
|
|
// value from the read channel.
|
|
func (b *Unbounded) Load() {
|
|
b.mu.Lock()
|
|
+ defer b.mu.Unlock()
|
|
+ if b.closed {
|
|
+ return
|
|
+ }
|
|
if len(b.backlog) > 0 {
|
|
select {
|
|
case b.c <- b.backlog[0]:
|
|
@@ -72,7 +79,6 @@ func (b *Unbounded) Load() {
|
|
default:
|
|
}
|
|
}
|
|
- b.mu.Unlock()
|
|
}
|
|
|
|
// Get returns a read channel on which values added to the buffer, via Put(),
|
|
@@ -80,6 +86,20 @@ func (b *Unbounded) Load() {
|
|
//
|
|
// Upon reading a value from this channel, users are expected to call Load() to
|
|
// send the next buffered value onto the channel if there is any.
|
|
+//
|
|
+// If the unbounded buffer is closed, the read channel returned by this method
|
|
+// is closed.
|
|
func (b *Unbounded) Get() <-chan interface{} {
|
|
return b.c
|
|
}
|
|
+
|
|
+// Close closes the unbounded buffer.
|
|
+func (b *Unbounded) Close() {
|
|
+ b.mu.Lock()
|
|
+ defer b.mu.Unlock()
|
|
+ if b.closed {
|
|
+ return
|
|
+ }
|
|
+ b.closed = true
|
|
+ close(b.c)
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
|
|
index ad0ce4dab..7b2f350e2 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/channelz/types.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/channelz/types.go
|
|
@@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) {
|
|
|
|
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
|
|
// The delete process includes two steps:
|
|
-// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
|
-// parent's child list.
|
|
-// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
|
-// will return entry not found error.
|
|
+// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
|
+// parent's child list.
|
|
+// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
|
+// will return entry not found error.
|
|
func (c *channel) deleteSelfIfReady() {
|
|
if !c.deleteSelfFromTree() {
|
|
return
|
|
@@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) {
|
|
|
|
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
|
|
// The delete process includes two steps:
|
|
-// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
|
-// its parent's child list.
|
|
-// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
|
-// by id will return entry not found error.
|
|
+// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
|
+// its parent's child list.
|
|
+// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
|
+// by id will return entry not found error.
|
|
func (sc *subChannel) deleteSelfIfReady() {
|
|
if !sc.deleteSelfFromTree() {
|
|
return
|
|
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
|
|
index 6f0272543..80fd5c7d2 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
|
|
@@ -21,15 +21,46 @@ package envconfig
|
|
|
|
import (
|
|
"os"
|
|
+ "strconv"
|
|
"strings"
|
|
)
|
|
|
|
-const (
|
|
- prefix = "GRPC_GO_"
|
|
- txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
|
|
-)
|
|
-
|
|
var (
|
|
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
|
- TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
|
|
+ TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
|
|
+ // AdvertiseCompressors is set if registered compressor should be advertised
|
|
+ // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
|
|
+ AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
|
|
+ // RingHashCap indicates the maximum ring size which defaults to 4096
|
|
+ // entries but may be overridden by setting the environment variable
|
|
+ // "GRPC_RING_HASH_CAP". This does not override the default bounds
|
|
+ // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
|
+ RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
|
+ // PickFirstLBConfig is set if we should support configuration of the
|
|
+ // pick_first LB policy, which can be enabled by setting the environment
|
|
+ // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true".
|
|
+ PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false)
|
|
)
|
|
+
|
|
+func boolFromEnv(envVar string, def bool) bool {
|
|
+ if def {
|
|
+ // The default is true; return true unless the variable is "false".
|
|
+ return !strings.EqualFold(os.Getenv(envVar), "false")
|
|
+ }
|
|
+ // The default is false; return false unless the variable is "true".
|
|
+ return strings.EqualFold(os.Getenv(envVar), "true")
|
|
+}
|
|
+
|
|
+func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
|
|
+ v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
|
|
+ if err != nil {
|
|
+ return def
|
|
+ }
|
|
+ if v < min {
|
|
+ return min
|
|
+ }
|
|
+ if v > max {
|
|
+ return max
|
|
+ }
|
|
+ return v
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go
|
|
new file mode 100644
|
|
index 000000000..dd314cfb1
|
|
--- /dev/null
|
|
+++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go
|
|
@@ -0,0 +1,42 @@
|
|
+/*
|
|
+ *
|
|
+ * Copyright 2022 gRPC authors.
|
|
+ *
|
|
+ * Licensed under the Apache License, Version 2.0 (the "License");
|
|
+ * you may not use this file except in compliance with the License.
|
|
+ * You may obtain a copy of the License at
|
|
+ *
|
|
+ * http://www.apache.org/licenses/LICENSE-2.0
|
|
+ *
|
|
+ * Unless required by applicable law or agreed to in writing, software
|
|
+ * distributed under the License is distributed on an "AS IS" BASIS,
|
|
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
+ * See the License for the specific language governing permissions and
|
|
+ * limitations under the License.
|
|
+ *
|
|
+ */
|
|
+
|
|
+package envconfig
|
|
+
|
|
+import "os"
|
|
+
|
|
+const (
|
|
+ envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG"
|
|
+ envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE"
|
|
+)
|
|
+
|
|
+var (
|
|
+ // ObservabilityConfig is the json configuration for the gcp/observability
|
|
+ // package specified directly in the envObservabilityConfig env var.
|
|
+ //
|
|
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+ // deleted or changed.
|
|
+ ObservabilityConfig = os.Getenv(envObservabilityConfig)
|
|
+ // ObservabilityConfigFile is the json configuration for the
|
|
+ // gcp/observability specified in a file with the location specified in
|
|
+ // envObservabilityConfigFile env var.
|
|
+ //
|
|
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+ // deleted or changed.
|
|
+ ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
|
|
+)
|
|
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
|
|
index a83b26bb8..02b4b6a1c 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
|
|
@@ -20,7 +20,6 @@ package envconfig
|
|
|
|
import (
|
|
"os"
|
|
- "strings"
|
|
)
|
|
|
|
const (
|
|
@@ -36,15 +35,6 @@ const (
|
|
//
|
|
// When both bootstrap FileName and FileContent are set, FileName is used.
|
|
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
|
|
-
|
|
- ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
|
|
- clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
|
|
- aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
|
- rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
|
- federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
|
|
- rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
|
|
-
|
|
- c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
|
|
)
|
|
|
|
var (
|
|
@@ -63,38 +53,43 @@ var (
|
|
// XDSRingHash indicates whether ring hash support is enabled, which can be
|
|
// disabled by setting the environment variable
|
|
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
|
- XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
|
|
+ XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
|
|
// XDSClientSideSecurity is used to control processing of security
|
|
// configuration on the client-side.
|
|
//
|
|
// Note that there is no env var protection for the server-side because we
|
|
// have a brand new API on the server-side and users explicitly need to use
|
|
// the new API to get security integration on the server.
|
|
- XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
|
|
- // XDSAggregateAndDNS indicates whether processing of aggregated cluster
|
|
- // and DNS cluster is enabled, which can be enabled by setting the
|
|
- // environment variable
|
|
- // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
|
|
- // "true".
|
|
- XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false")
|
|
+ XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
|
|
+ // XDSAggregateAndDNS indicates whether processing of aggregated cluster and
|
|
+ // DNS cluster is enabled, which can be disabled by setting the environment
|
|
+ // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
|
+ // to "false".
|
|
+ XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
|
|
|
|
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
|
// which can be disabled by setting the environment variable
|
|
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
|
- XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
|
|
+ XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
|
|
// XDSOutlierDetection indicates whether outlier detection support is
|
|
- // enabled, which can be enabled by setting the environment variable
|
|
- // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true".
|
|
- XDSOutlierDetection = false
|
|
- // XDSFederation indicates whether federation support is enabled.
|
|
- XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
|
|
+ // enabled, which can be disabled by setting the environment variable
|
|
+ // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
|
|
+ XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
|
|
+ // XDSFederation indicates whether federation support is enabled, which can
|
|
+ // be enabled by setting the environment variable
|
|
+ // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
|
|
+ XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
|
|
|
|
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
|
- // support for the RLS CLuster Specifier is enabled, which can be enabled by
|
|
+ // support for the RLS CLuster Specifier is enabled, which can be disabled by
|
|
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
|
- // "true".
|
|
- XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
|
|
+ // "false".
|
|
+ XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true)
|
|
|
|
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
|
- C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
|
|
+ C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
|
|
+ // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which
|
|
+ // can be disabled by setting the environment variable
|
|
+ // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false".
|
|
+ XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true)
|
|
)
|
|
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
|
|
index 30a3b4258..b68e26a36 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
|
|
@@ -110,7 +110,7 @@ type LoggerV2 interface {
|
|
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
|
|
// It is defined here to avoid a circular dependency.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
|
|
index 82af70e96..02224b42c 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
|
|
@@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
|
|
|
|
// Debugf does info logging at verbose level 2.
|
|
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
|
+ // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
|
|
+ // rewrite PrefixLogger a little to ensure that we don't use the global
|
|
+ // `Logger` here, and instead use the `logger` field.
|
|
if !Logger.V(2) {
|
|
return
|
|
}
|
|
@@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
|
return
|
|
}
|
|
InfoDepth(1, fmt.Sprintf(format, args...))
|
|
+
|
|
+}
|
|
+
|
|
+// V reports whether verbosity level l is at least the requested verbose level.
|
|
+func (pl *PrefixLogger) V(l int) bool {
|
|
+ // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
|
|
+ // rewrite PrefixLogger a little to ensure that we don't use the global
|
|
+ // `Logger` here, and instead use the `logger` field.
|
|
+ return Logger.V(l)
|
|
}
|
|
|
|
// NewPrefixLogger creates a prefix logger with the given prefix.
|
|
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
|
|
index 740f83c2b..d08e3e907 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
|
|
@@ -52,6 +52,13 @@ func Intn(n int) int {
|
|
return r.Intn(n)
|
|
}
|
|
|
|
+// Int31n implements rand.Int31n on the grpcrand global source.
|
|
+func Int31n(n int32) int32 {
|
|
+ mu.Lock()
|
|
+ defer mu.Unlock()
|
|
+ return r.Int31n(n)
|
|
+}
|
|
+
|
|
// Float64 implements rand.Float64 on the grpcrand global source.
|
|
func Float64() float64 {
|
|
mu.Lock()
|
|
@@ -65,3 +72,17 @@ func Uint64() uint64 {
|
|
defer mu.Unlock()
|
|
return r.Uint64()
|
|
}
|
|
+
|
|
+// Uint32 implements rand.Uint32 on the grpcrand global source.
|
|
+func Uint32() uint32 {
|
|
+ mu.Lock()
|
|
+ defer mu.Unlock()
|
|
+ return r.Uint32()
|
|
+}
|
|
+
|
|
+// Shuffle implements rand.Shuffle on the grpcrand global source.
|
|
+var Shuffle = func(n int, f func(int, int)) {
|
|
+ mu.Lock()
|
|
+ defer mu.Unlock()
|
|
+ r.Shuffle(n, f)
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
|
|
new file mode 100644
|
|
index 000000000..37b8d4117
|
|
--- /dev/null
|
|
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
|
|
@@ -0,0 +1,119 @@
|
|
+/*
|
|
+ *
|
|
+ * Copyright 2022 gRPC authors.
|
|
+ *
|
|
+ * Licensed under the Apache License, Version 2.0 (the "License");
|
|
+ * you may not use this file except in compliance with the License.
|
|
+ * You may obtain a copy of the License at
|
|
+ *
|
|
+ * http://www.apache.org/licenses/LICENSE-2.0
|
|
+ *
|
|
+ * Unless required by applicable law or agreed to in writing, software
|
|
+ * distributed under the License is distributed on an "AS IS" BASIS,
|
|
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
+ * See the License for the specific language governing permissions and
|
|
+ * limitations under the License.
|
|
+ *
|
|
+ */
|
|
+
|
|
+package grpcsync
|
|
+
|
|
+import (
|
|
+ "context"
|
|
+ "sync"
|
|
+
|
|
+ "google.golang.org/grpc/internal/buffer"
|
|
+)
|
|
+
|
|
+// CallbackSerializer provides a mechanism to schedule callbacks in a
|
|
+// synchronized manner. It provides a FIFO guarantee on the order of execution
|
|
+// of scheduled callbacks. New callbacks can be scheduled by invoking the
|
|
+// Schedule() method.
|
|
+//
|
|
+// This type is safe for concurrent access.
|
|
+type CallbackSerializer struct {
|
|
+ // Done is closed once the serializer is shut down completely, i.e all
|
|
+ // scheduled callbacks are executed and the serializer has deallocated all
|
|
+ // its resources.
|
|
+ Done chan struct{}
|
|
+
|
|
+ callbacks *buffer.Unbounded
|
|
+ closedMu sync.Mutex
|
|
+ closed bool
|
|
+}
|
|
+
|
|
+// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
|
|
+// context will be passed to the scheduled callbacks. Users should cancel the
|
|
+// provided context to shutdown the CallbackSerializer. It is guaranteed that no
|
|
+// callbacks will be added once this context is canceled, and any pending un-run
|
|
+// callbacks will be executed before the serializer is shut down.
|
|
+func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
|
|
+ t := &CallbackSerializer{
|
|
+ Done: make(chan struct{}),
|
|
+ callbacks: buffer.NewUnbounded(),
|
|
+ }
|
|
+ go t.run(ctx)
|
|
+ return t
|
|
+}
|
|
+
|
|
+// Schedule adds a callback to be scheduled after existing callbacks are run.
|
|
+//
|
|
+// Callbacks are expected to honor the context when performing any blocking
|
|
+// operations, and should return early when the context is canceled.
|
|
+//
|
|
+// Return value indicates if the callback was successfully added to the list of
|
|
+// callbacks to be executed by the serializer. It is not possible to add
|
|
+// callbacks once the context passed to NewCallbackSerializer is cancelled.
|
|
+func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
|
|
+ t.closedMu.Lock()
|
|
+ defer t.closedMu.Unlock()
|
|
+
|
|
+ if t.closed {
|
|
+ return false
|
|
+ }
|
|
+ t.callbacks.Put(f)
|
|
+ return true
|
|
+}
|
|
+
|
|
+func (t *CallbackSerializer) run(ctx context.Context) {
|
|
+ var backlog []func(context.Context)
|
|
+
|
|
+ defer close(t.Done)
|
|
+ for ctx.Err() == nil {
|
|
+ select {
|
|
+ case <-ctx.Done():
|
|
+ // Do nothing here. Next iteration of the for loop will not happen,
|
|
+ // since ctx.Err() would be non-nil.
|
|
+ case callback, ok := <-t.callbacks.Get():
|
|
+ if !ok {
|
|
+ return
|
|
+ }
|
|
+ t.callbacks.Load()
|
|
+ callback.(func(ctx context.Context))(ctx)
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // Fetch pending callbacks if any, and execute them before returning from
|
|
+ // this method and closing t.Done.
|
|
+ t.closedMu.Lock()
|
|
+ t.closed = true
|
|
+ backlog = t.fetchPendingCallbacks()
|
|
+ t.callbacks.Close()
|
|
+ t.closedMu.Unlock()
|
|
+ for _, b := range backlog {
|
|
+ b(ctx)
|
|
+ }
|
|
+}
|
|
+
|
|
+func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
|
|
+ var backlog []func(context.Context)
|
|
+ for {
|
|
+ select {
|
|
+ case b := <-t.callbacks.Get():
|
|
+ backlog = append(backlog, b.(func(context.Context)))
|
|
+ t.callbacks.Load()
|
|
+ default:
|
|
+ return backlog
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
|
|
new file mode 100644
|
|
index 000000000..6635f7bca
|
|
--- /dev/null
|
|
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
|
|
@@ -0,0 +1,32 @@
|
|
+/*
|
|
+ *
|
|
+ * Copyright 2022 gRPC authors.
|
|
+ *
|
|
+ * Licensed under the Apache License, Version 2.0 (the "License");
|
|
+ * you may not use this file except in compliance with the License.
|
|
+ * You may obtain a copy of the License at
|
|
+ *
|
|
+ * http://www.apache.org/licenses/LICENSE-2.0
|
|
+ *
|
|
+ * Unless required by applicable law or agreed to in writing, software
|
|
+ * distributed under the License is distributed on an "AS IS" BASIS,
|
|
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
+ * See the License for the specific language governing permissions and
|
|
+ * limitations under the License.
|
|
+ *
|
|
+ */
|
|
+
|
|
+package grpcsync
|
|
+
|
|
+import (
|
|
+ "sync"
|
|
+)
|
|
+
|
|
+// OnceFunc returns a function wrapping f which ensures f is only executed
|
|
+// once even if the returned function is executed multiple times.
|
|
+func OnceFunc(f func()) func() {
|
|
+ var once sync.Once
|
|
+ return func() {
|
|
+ once.Do(f)
|
|
+ }
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
|
|
new file mode 100644
|
|
index 000000000..9f4090967
|
|
--- /dev/null
|
|
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
|
|
@@ -0,0 +1,47 @@
|
|
+/*
|
|
+ *
|
|
+ * Copyright 2022 gRPC authors.
|
|
+ *
|
|
+ * Licensed under the Apache License, Version 2.0 (the "License");
|
|
+ * you may not use this file except in compliance with the License.
|
|
+ * You may obtain a copy of the License at
|
|
+ *
|
|
+ * http://www.apache.org/licenses/LICENSE-2.0
|
|
+ *
|
|
+ * Unless required by applicable law or agreed to in writing, software
|
|
+ * distributed under the License is distributed on an "AS IS" BASIS,
|
|
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
+ * See the License for the specific language governing permissions and
|
|
+ * limitations under the License.
|
|
+ *
|
|
+ */
|
|
+
|
|
+package grpcutil
|
|
+
|
|
+import (
|
|
+ "strings"
|
|
+
|
|
+ "google.golang.org/grpc/internal/envconfig"
|
|
+)
|
|
+
|
|
+// RegisteredCompressorNames holds names of the registered compressors.
|
|
+var RegisteredCompressorNames []string
|
|
+
|
|
+// IsCompressorNameRegistered returns true when name is available in registry.
|
|
+func IsCompressorNameRegistered(name string) bool {
|
|
+ for _, compressor := range RegisteredCompressorNames {
|
|
+ if compressor == name {
|
|
+ return true
|
|
+ }
|
|
+ }
|
|
+ return false
|
|
+}
|
|
+
|
|
+// RegisteredCompressors returns a string of registered compressor names
|
|
+// separated by comma.
|
|
+func RegisteredCompressors() string {
|
|
+ if !envconfig.AdvertiseCompressors {
|
|
+ return ""
|
|
+ }
|
|
+ return strings.Join(RegisteredCompressorNames, ",")
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
|
|
index e9c4af648..ec62b4775 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
|
|
@@ -25,7 +25,6 @@ import (
|
|
|
|
// ParseMethod splits service and method from the input. It expects format
|
|
// "/service/method".
|
|
-//
|
|
func ParseMethod(methodName string) (service, method string, _ error) {
|
|
if !strings.HasPrefix(methodName, "/") {
|
|
return "", "", errors.New("invalid method name: should start with /")
|
|
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
|
|
index 83018be7c..42ff39c84 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/internal.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/internal.go
|
|
@@ -58,25 +58,69 @@ var (
|
|
// gRPC server. An xDS-enabled server needs to know what type of credentials
|
|
// is configured on the underlying gRPC server. This is set by server.go.
|
|
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
|
|
+ // CanonicalString returns the canonical string of the code defined here:
|
|
+ // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
|
|
+ //
|
|
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+ // deleted or changed.
|
|
+ CanonicalString interface{} // func (codes.Code) string
|
|
// DrainServerTransports initiates a graceful close of existing connections
|
|
// on a gRPC server accepted on the provided listener address. An
|
|
// xDS-enabled server invokes this method on a grpc.Server when a particular
|
|
// listener moves to "not-serving" mode.
|
|
DrainServerTransports interface{} // func(*grpc.Server, string)
|
|
- // AddExtraServerOptions adds an array of ServerOption that will be
|
|
+ // AddGlobalServerOptions adds an array of ServerOption that will be
|
|
// effective globally for newly created servers. The priority will be: 1.
|
|
// user-provided; 2. this method; 3. default values.
|
|
- AddExtraServerOptions interface{} // func(opt ...ServerOption)
|
|
- // ClearExtraServerOptions clears the array of extra ServerOption. This
|
|
+ //
|
|
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+ // deleted or changed.
|
|
+ AddGlobalServerOptions interface{} // func(opt ...ServerOption)
|
|
+ // ClearGlobalServerOptions clears the array of extra ServerOption. This
|
|
// method is useful in testing and benchmarking.
|
|
- ClearExtraServerOptions func()
|
|
- // AddExtraDialOptions adds an array of DialOption that will be effective
|
|
+ //
|
|
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+ // deleted or changed.
|
|
+ ClearGlobalServerOptions func()
|
|
+ // AddGlobalDialOptions adds an array of DialOption that will be effective
|
|
// globally for newly created client channels. The priority will be: 1.
|
|
// user-provided; 2. this method; 3. default values.
|
|
- AddExtraDialOptions interface{} // func(opt ...DialOption)
|
|
- // ClearExtraDialOptions clears the array of extra DialOption. This
|
|
+ //
|
|
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+ // deleted or changed.
|
|
+ AddGlobalDialOptions interface{} // func(opt ...DialOption)
|
|
+ // DisableGlobalDialOptions returns a DialOption that prevents the
|
|
+ // ClientConn from applying the global DialOptions (set via
|
|
+ // AddGlobalDialOptions).
|
|
+ //
|
|
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+ // deleted or changed.
|
|
+ DisableGlobalDialOptions interface{} // func() grpc.DialOption
|
|
+ // ClearGlobalDialOptions clears the array of extra DialOption. This
|
|
// method is useful in testing and benchmarking.
|
|
- ClearExtraDialOptions func()
|
|
+ //
|
|
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+ // deleted or changed.
|
|
+ ClearGlobalDialOptions func()
|
|
+ // JoinDialOptions combines the dial options passed as arguments into a
|
|
+ // single dial option.
|
|
+ JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
|
|
+ // JoinServerOptions combines the server options passed as arguments into a
|
|
+ // single server option.
|
|
+ JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
|
|
+
|
|
+ // WithBinaryLogger returns a DialOption that specifies the binary logger
|
|
+ // for a ClientConn.
|
|
+ //
|
|
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+ // deleted or changed.
|
|
+ WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption
|
|
+ // BinaryLogger returns a ServerOption that can set the binary logger for a
|
|
+ // server.
|
|
+ //
|
|
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
|
|
+ // deleted or changed.
|
|
+ BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption
|
|
|
|
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
|
|
// the provided xds bootstrap config instead of the global configuration from
|
|
@@ -118,21 +162,8 @@ var (
|
|
// TODO: Remove this function once the RBAC env var is removed.
|
|
UnregisterRBACHTTPFilterForTesting func()
|
|
|
|
- // RegisterOutlierDetectionBalancerForTesting registers the Outlier
|
|
- // Detection Balancer for testing purposes, regardless of the Outlier
|
|
- // Detection environment variable.
|
|
- //
|
|
- // TODO: Remove this function once the Outlier Detection env var is removed.
|
|
- RegisterOutlierDetectionBalancerForTesting func()
|
|
-
|
|
- // UnregisterOutlierDetectionBalancerForTesting unregisters the Outlier
|
|
- // Detection Balancer for testing purposes. This is needed because there is
|
|
- // no way to unregister the Outlier Detection Balancer after registering it
|
|
- // solely for testing purposes using
|
|
- // RegisterOutlierDetectionBalancerForTesting().
|
|
- //
|
|
- // TODO: Remove this function once the Outlier Detection env var is removed.
|
|
- UnregisterOutlierDetectionBalancerForTesting func()
|
|
+ // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
|
|
+ ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions)
|
|
)
|
|
|
|
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
|
diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
|
|
index b2980f8ac..c82e608e0 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
|
|
@@ -76,33 +76,11 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address {
|
|
return addr
|
|
}
|
|
|
|
-// Validate returns an error if the input md contains invalid keys or values.
|
|
-//
|
|
-// If the header is not a pseudo-header, the following items are checked:
|
|
-// - header names must contain one or more characters from this set [0-9 a-z _ - .].
|
|
-// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
|
|
-// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
|
|
+// Validate validates every pair in md with ValidatePair.
|
|
func Validate(md metadata.MD) error {
|
|
for k, vals := range md {
|
|
- // pseudo-header will be ignored
|
|
- if k[0] == ':' {
|
|
- continue
|
|
- }
|
|
- // check key, for i that saving a conversion if not using for range
|
|
- for i := 0; i < len(k); i++ {
|
|
- r := k[i]
|
|
- if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
|
|
- return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
|
|
- }
|
|
- }
|
|
- if strings.HasSuffix(k, "-bin") {
|
|
- continue
|
|
- }
|
|
- // check value
|
|
- for _, val := range vals {
|
|
- if hasNotPrintable(val) {
|
|
- return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
|
|
- }
|
|
+ if err := ValidatePair(k, vals...); err != nil {
|
|
+ return err
|
|
}
|
|
}
|
|
return nil
|
|
@@ -118,3 +96,37 @@ func hasNotPrintable(msg string) bool {
|
|
}
|
|
return false
|
|
}
|
|
+
|
|
+// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) :
|
|
+//
|
|
+// - key must contain one or more characters.
|
|
+// - the characters in the key must be contained in [0-9 a-z _ - .].
|
|
+// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed.
|
|
+// - the characters in the every value must be printable (in [%x20-%x7E]).
|
|
+func ValidatePair(key string, vals ...string) error {
|
|
+ // key should not be empty
|
|
+ if key == "" {
|
|
+ return fmt.Errorf("there is an empty key in the header")
|
|
+ }
|
|
+ // pseudo-header will be ignored
|
|
+ if key[0] == ':' {
|
|
+ return nil
|
|
+ }
|
|
+ // check key, for i that saving a conversion if not using for range
|
|
+ for i := 0; i < len(key); i++ {
|
|
+ r := key[i]
|
|
+ if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
|
|
+ return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
|
|
+ }
|
|
+ }
|
|
+ if strings.HasSuffix(key, "-bin") {
|
|
+ return nil
|
|
+ }
|
|
+ // check value
|
|
+ for _, val := range vals {
|
|
+ if hasNotPrintable(val) {
|
|
+ return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key)
|
|
+ }
|
|
+ }
|
|
+ return nil
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
|
|
index 75301c514..09a667f33 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
|
|
@@ -116,7 +116,7 @@ type dnsBuilder struct{}
|
|
|
|
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
|
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
|
- host, port, err := parseTarget(target.Endpoint, defaultPort)
|
|
+ host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
@@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
|
disableServiceConfig: opts.DisableServiceConfig,
|
|
}
|
|
|
|
- if target.Authority == "" {
|
|
+ if target.URL.Host == "" {
|
|
d.resolver = defaultResolver
|
|
} else {
|
|
- d.resolver, err = customAuthorityResolver(target.Authority)
|
|
+ d.resolver, err = customAuthorityResolver(target.URL.Host)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
|
|
index 520d9229e..afac56572 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
|
|
@@ -20,13 +20,20 @@
|
|
// name without scheme back to gRPC as resolved address.
|
|
package passthrough
|
|
|
|
-import "google.golang.org/grpc/resolver"
|
|
+import (
|
|
+ "errors"
|
|
+
|
|
+ "google.golang.org/grpc/resolver"
|
|
+)
|
|
|
|
const scheme = "passthrough"
|
|
|
|
type passthroughBuilder struct{}
|
|
|
|
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
|
+ if target.Endpoint() == "" && opts.Dialer == nil {
|
|
+ return nil, errors.New("passthrough: received empty target in Build()")
|
|
+ }
|
|
r := &passthroughResolver{
|
|
target: target,
|
|
cc: cc,
|
|
@@ -45,7 +52,7 @@ type passthroughResolver struct {
|
|
}
|
|
|
|
func (r *passthroughResolver) start() {
|
|
- r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
|
|
+ r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
|
|
}
|
|
|
|
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
|
|
index 20852e59d..160911687 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
|
|
@@ -34,8 +34,8 @@ type builder struct {
|
|
}
|
|
|
|
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
|
- if target.Authority != "" {
|
|
- return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
|
|
+ if target.URL.Host != "" {
|
|
+ return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
|
|
}
|
|
|
|
// gRPC was parsing the dial target manually before PR #4817, and we
|
|
@@ -49,8 +49,9 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv
|
|
}
|
|
addr := resolver.Address{Addr: endpoint}
|
|
if b.scheme == unixAbstractScheme {
|
|
- // prepend "\x00" to address for unix-abstract
|
|
- addr.Addr = "\x00" + addr.Addr
|
|
+ // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do
|
|
+ // not want trailing \0 in address.
|
|
+ addr.Addr = "@" + addr.Addr
|
|
}
|
|
cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
|
|
return &nopResolver{}, nil
|
|
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
|
|
new file mode 100644
|
|
index 000000000..11d82afcc
|
|
--- /dev/null
|
|
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
|
|
@@ -0,0 +1,130 @@
|
|
+/*
|
|
+ *
|
|
+ * Copyright 2023 gRPC authors.
|
|
+ *
|
|
+ * Licensed under the Apache License, Version 2.0 (the "License");
|
|
+ * you may not use this file except in compliance with the License.
|
|
+ * You may obtain a copy of the License at
|
|
+ *
|
|
+ * http://www.apache.org/licenses/LICENSE-2.0
|
|
+ *
|
|
+ * Unless required by applicable law or agreed to in writing, software
|
|
+ * distributed under the License is distributed on an "AS IS" BASIS,
|
|
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
+ * See the License for the specific language governing permissions and
|
|
+ * limitations under the License.
|
|
+ *
|
|
+ */
|
|
+
|
|
+package serviceconfig
|
|
+
|
|
+import (
|
|
+ "encoding/json"
|
|
+ "fmt"
|
|
+ "math"
|
|
+ "strconv"
|
|
+ "strings"
|
|
+ "time"
|
|
+)
|
|
+
|
|
+// Duration defines JSON marshal and unmarshal methods to conform to the
|
|
+// protobuf JSON spec defined [here].
|
|
+//
|
|
+// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration
|
|
+type Duration time.Duration
|
|
+
|
|
+func (d Duration) String() string {
|
|
+ return fmt.Sprint(time.Duration(d))
|
|
+}
|
|
+
|
|
+// MarshalJSON converts from d to a JSON string output.
|
|
+func (d Duration) MarshalJSON() ([]byte, error) {
|
|
+ ns := time.Duration(d).Nanoseconds()
|
|
+ sec := ns / int64(time.Second)
|
|
+ ns = ns % int64(time.Second)
|
|
+
|
|
+ var sign string
|
|
+ if sec < 0 || ns < 0 {
|
|
+ sign, sec, ns = "-", -1*sec, -1*ns
|
|
+ }
|
|
+
|
|
+ // Generated output always contains 0, 3, 6, or 9 fractional digits,
|
|
+ // depending on required precision.
|
|
+ str := fmt.Sprintf("%s%d.%09d", sign, sec, ns)
|
|
+ str = strings.TrimSuffix(str, "000")
|
|
+ str = strings.TrimSuffix(str, "000")
|
|
+ str = strings.TrimSuffix(str, ".000")
|
|
+ return []byte(fmt.Sprintf("\"%ss\"", str)), nil
|
|
+}
|
|
+
|
|
+// UnmarshalJSON unmarshals b as a duration JSON string into d.
|
|
+func (d *Duration) UnmarshalJSON(b []byte) error {
|
|
+ var s string
|
|
+ if err := json.Unmarshal(b, &s); err != nil {
|
|
+ return err
|
|
+ }
|
|
+ if !strings.HasSuffix(s, "s") {
|
|
+ return fmt.Errorf("malformed duration %q: missing seconds unit", s)
|
|
+ }
|
|
+ neg := false
|
|
+ if s[0] == '-' {
|
|
+ neg = true
|
|
+ s = s[1:]
|
|
+ }
|
|
+ ss := strings.SplitN(s[:len(s)-1], ".", 3)
|
|
+ if len(ss) > 2 {
|
|
+ return fmt.Errorf("malformed duration %q: too many decimals", s)
|
|
+ }
|
|
+ // hasDigits is set if either the whole or fractional part of the number is
|
|
+ // present, since both are optional but one is required.
|
|
+ hasDigits := false
|
|
+ var sec, ns int64
|
|
+ if len(ss[0]) > 0 {
|
|
+ var err error
|
|
+ if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil {
|
|
+ return fmt.Errorf("malformed duration %q: %v", s, err)
|
|
+ }
|
|
+ // Maximum seconds value per the durationpb spec.
|
|
+ const maxProtoSeconds = 315_576_000_000
|
|
+ if sec > maxProtoSeconds {
|
|
+ return fmt.Errorf("out of range: %q", s)
|
|
+ }
|
|
+ hasDigits = true
|
|
+ }
|
|
+ if len(ss) == 2 && len(ss[1]) > 0 {
|
|
+ if len(ss[1]) > 9 {
|
|
+ return fmt.Errorf("malformed duration %q: too many digits after decimal", s)
|
|
+ }
|
|
+ var err error
|
|
+ if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil {
|
|
+ return fmt.Errorf("malformed duration %q: %v", s, err)
|
|
+ }
|
|
+ for i := 9; i > len(ss[1]); i-- {
|
|
+ ns *= 10
|
|
+ }
|
|
+ hasDigits = true
|
|
+ }
|
|
+ if !hasDigits {
|
|
+ return fmt.Errorf("malformed duration %q: contains no numbers", s)
|
|
+ }
|
|
+
|
|
+ if neg {
|
|
+ sec *= -1
|
|
+ ns *= -1
|
|
+ }
|
|
+
|
|
+ // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration.
|
|
+ const maxSeconds = math.MaxInt64 / int64(time.Second)
|
|
+ const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second)
|
|
+ const minSeconds = math.MinInt64 / int64(time.Second)
|
|
+ const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second)
|
|
+
|
|
+ if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) {
|
|
+ *d = Duration(math.MaxInt64)
|
|
+ } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) {
|
|
+ *d = Duration(math.MinInt64)
|
|
+ } else {
|
|
+ *d = Duration(sec*int64(time.Second) + ns)
|
|
+ }
|
|
+ return nil
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
|
|
index badbdbf59..51e733e49 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
|
|
@@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
|
|
// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
|
|
// config. This method iterates through that list in order, and stops at the
|
|
// first policy that is supported.
|
|
-// - If the config for the first supported policy is invalid, the whole service
|
|
-// config is invalid.
|
|
-// - If the list doesn't contain any supported policy, the whole service config
|
|
-// is invalid.
|
|
+// - If the config for the first supported policy is invalid, the whole service
|
|
+// config is invalid.
|
|
+// - If the list doesn't contain any supported policy, the whole service config
|
|
+// is invalid.
|
|
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
|
var ir intermediateBalancerConfig
|
|
err := json.Unmarshal(b, &ir)
|
|
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
|
|
index e5c6513ed..b0ead4f54 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/status/status.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
|
|
@@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool {
|
|
}
|
|
return proto.Equal(e.s.s, tse.s.s)
|
|
}
|
|
+
|
|
+// IsRestrictedControlPlaneCode returns whether the status includes a code
|
|
+// restricted for control plane usage as defined by gRFC A54.
|
|
+func IsRestrictedControlPlaneCode(s *Status) bool {
|
|
+ switch s.Code() {
|
|
+ case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
|
|
+ return true
|
|
+ }
|
|
+ return false
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
|
|
index 244f4b081..be5a9c81e 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
|
|
@@ -22,6 +22,7 @@ import (
|
|
"bytes"
|
|
"errors"
|
|
"fmt"
|
|
+ "net"
|
|
"runtime"
|
|
"strconv"
|
|
"sync"
|
|
@@ -29,6 +30,7 @@ import (
|
|
|
|
"golang.org/x/net/http2"
|
|
"golang.org/x/net/http2/hpack"
|
|
+ "google.golang.org/grpc/internal/grpclog"
|
|
"google.golang.org/grpc/internal/grpcutil"
|
|
"google.golang.org/grpc/status"
|
|
)
|
|
@@ -191,7 +193,7 @@ type goAway struct {
|
|
code http2.ErrCode
|
|
debugData []byte
|
|
headsUp bool
|
|
- closeConn bool
|
|
+ closeConn error // if set, loopyWriter will exit, resulting in conn closure
|
|
}
|
|
|
|
func (*goAway) isTransportResponseFrame() bool { return false }
|
|
@@ -209,6 +211,14 @@ type outFlowControlSizeRequest struct {
|
|
|
|
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
|
|
|
|
+// closeConnection is an instruction to tell the loopy writer to flush the
|
|
+// framer and exit, which will cause the transport's connection to be closed
|
|
+// (by the client or server). The transport itself will close after the reader
|
|
+// encounters the EOF caused by the connection closure.
|
|
+type closeConnection struct{}
|
|
+
|
|
+func (closeConnection) isTransportResponseFrame() bool { return false }
|
|
+
|
|
type outStreamState int
|
|
|
|
const (
|
|
@@ -408,7 +418,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
|
|
select {
|
|
case <-c.ch:
|
|
case <-c.done:
|
|
- return nil, ErrConnClosing
|
|
+ return nil, errors.New("transport closed by client")
|
|
}
|
|
}
|
|
}
|
|
@@ -478,12 +488,14 @@ type loopyWriter struct {
|
|
hEnc *hpack.Encoder // HPACK encoder.
|
|
bdpEst *bdpEstimator
|
|
draining bool
|
|
+ conn net.Conn
|
|
+ logger *grpclog.PrefixLogger
|
|
|
|
// Side-specific handlers
|
|
ssGoAwayHandler func(*goAway) (bool, error)
|
|
}
|
|
|
|
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
|
|
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter {
|
|
var buf bytes.Buffer
|
|
l := &loopyWriter{
|
|
side: s,
|
|
@@ -496,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
|
|
hBuf: &buf,
|
|
hEnc: hpack.NewEncoder(&buf),
|
|
bdpEst: bdpEst,
|
|
+ conn: conn,
|
|
+ logger: logger,
|
|
}
|
|
return l
|
|
}
|
|
@@ -513,23 +527,26 @@ const minBatchSize = 1000
|
|
// 2. Stream level flow control quota available.
|
|
//
|
|
// In each iteration of run loop, other than processing the incoming control
|
|
-// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
|
|
-// This results in writing of HTTP2 frames into an underlying write buffer.
|
|
-// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
|
|
-// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
|
|
-// if the batch size is too low to give stream goroutines a chance to fill it up.
|
|
+// frame, loopy calls processData, which processes one node from the
|
|
+// activeStreams linked-list. This results in writing of HTTP2 frames into an
|
|
+// underlying write buffer. When there's no more control frames to read from
|
|
+// controlBuf, loopy flushes the write buffer. As an optimization, to increase
|
|
+// the batch size for each flush, loopy yields the processor, once if the batch
|
|
+// size is too low to give stream goroutines a chance to fill it up.
|
|
+//
|
|
+// Upon exiting, if the error causing the exit is not an I/O error, run()
|
|
+// flushes and closes the underlying connection. Otherwise, the connection is
|
|
+// left open to allow the I/O error to be encountered by the reader instead.
|
|
func (l *loopyWriter) run() (err error) {
|
|
defer func() {
|
|
- if err == ErrConnClosing {
|
|
- // Don't log ErrConnClosing as error since it happens
|
|
- // 1. When the connection is closed by some other known issue.
|
|
- // 2. User closed the connection.
|
|
- // 3. A graceful close of connection.
|
|
- if logger.V(logLevel) {
|
|
- logger.Infof("transport: loopyWriter.run returning. %v", err)
|
|
- }
|
|
- err = nil
|
|
+ if l.logger.V(logLevel) {
|
|
+ l.logger.Infof("loopyWriter exiting with error: %v", err)
|
|
}
|
|
+ if !isIOError(err) {
|
|
+ l.framer.writer.Flush()
|
|
+ l.conn.Close()
|
|
+ }
|
|
+ l.cbuf.finish()
|
|
}()
|
|
for {
|
|
it, err := l.cbuf.get(true)
|
|
@@ -574,7 +591,6 @@ func (l *loopyWriter) run() (err error) {
|
|
}
|
|
l.framer.writer.Flush()
|
|
break hasdata
|
|
-
|
|
}
|
|
}
|
|
}
|
|
@@ -583,11 +599,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error
|
|
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
|
|
}
|
|
|
|
-func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
|
|
+func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) {
|
|
// Otherwise update the quota.
|
|
if w.streamID == 0 {
|
|
l.sendQuota += w.increment
|
|
- return nil
|
|
+ return
|
|
}
|
|
// Find the stream and update it.
|
|
if str, ok := l.estdStreams[w.streamID]; ok {
|
|
@@ -595,10 +611,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error
|
|
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
|
|
str.state = active
|
|
l.activeStreams.enqueue(str)
|
|
- return nil
|
|
+ return
|
|
}
|
|
}
|
|
- return nil
|
|
}
|
|
|
|
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
|
@@ -606,13 +621,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
|
}
|
|
|
|
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
|
|
- if err := l.applySettings(s.ss); err != nil {
|
|
- return err
|
|
- }
|
|
+ l.applySettings(s.ss)
|
|
return l.framer.fr.WriteSettingsAck()
|
|
}
|
|
|
|
-func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
|
+func (l *loopyWriter) registerStreamHandler(h *registerStream) {
|
|
str := &outStream{
|
|
id: h.streamID,
|
|
state: empty,
|
|
@@ -620,15 +633,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
|
wq: h.wq,
|
|
}
|
|
l.estdStreams[h.streamID] = str
|
|
- return nil
|
|
}
|
|
|
|
func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
|
if l.side == serverSide {
|
|
str, ok := l.estdStreams[h.streamID]
|
|
if !ok {
|
|
- if logger.V(logLevel) {
|
|
- logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
|
+ if l.logger.V(logLevel) {
|
|
+ l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID)
|
|
}
|
|
return nil
|
|
}
|
|
@@ -655,19 +667,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
|
itl: &itemList{},
|
|
wq: h.wq,
|
|
}
|
|
- str.itl.enqueue(h)
|
|
- return l.originateStream(str)
|
|
+ return l.originateStream(str, h)
|
|
}
|
|
|
|
-func (l *loopyWriter) originateStream(str *outStream) error {
|
|
- hdr := str.itl.dequeue().(*headerFrame)
|
|
- if err := hdr.initStream(str.id); err != nil {
|
|
- if err == ErrConnClosing {
|
|
- return err
|
|
- }
|
|
- // Other errors(errStreamDrain) need not close transport.
|
|
+func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
|
|
+ // l.draining is set when handling GoAway. In which case, we want to avoid
|
|
+ // creating new streams.
|
|
+ if l.draining {
|
|
+ // TODO: provide a better error with the reason we are in draining.
|
|
+ hdr.onOrphaned(errStreamDrain)
|
|
return nil
|
|
}
|
|
+ if err := hdr.initStream(str.id); err != nil {
|
|
+ return err
|
|
+ }
|
|
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
|
return err
|
|
}
|
|
@@ -682,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
|
|
l.hBuf.Reset()
|
|
for _, f := range hf {
|
|
if err := l.hEnc.WriteField(f); err != nil {
|
|
- if logger.V(logLevel) {
|
|
- logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
|
|
+ if l.logger.V(logLevel) {
|
|
+ l.logger.Warningf("Encountered error while encoding headers: %v", err)
|
|
}
|
|
}
|
|
}
|
|
@@ -721,10 +734,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
|
|
return nil
|
|
}
|
|
|
|
-func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
|
+func (l *loopyWriter) preprocessData(df *dataFrame) {
|
|
str, ok := l.estdStreams[df.streamID]
|
|
if !ok {
|
|
- return nil
|
|
+ return
|
|
}
|
|
// If we got data for a stream it means that
|
|
// stream was originated and the headers were sent out.
|
|
@@ -733,7 +746,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
|
str.state = active
|
|
l.activeStreams.enqueue(str)
|
|
}
|
|
- return nil
|
|
}
|
|
|
|
func (l *loopyWriter) pingHandler(p *ping) error {
|
|
@@ -744,9 +756,8 @@ func (l *loopyWriter) pingHandler(p *ping) error {
|
|
|
|
}
|
|
|
|
-func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
|
|
+func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) {
|
|
o.resp <- l.sendQuota
|
|
- return nil
|
|
}
|
|
|
|
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
|
@@ -763,8 +774,9 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
|
return err
|
|
}
|
|
}
|
|
- if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
|
|
- return ErrConnClosing
|
|
+ if l.draining && len(l.estdStreams) == 0 {
|
|
+ // Flush and close the connection; we are done with it.
|
|
+ return errors.New("finished processing active streams while in draining mode")
|
|
}
|
|
return nil
|
|
}
|
|
@@ -799,7 +811,8 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
|
if l.side == clientSide {
|
|
l.draining = true
|
|
if len(l.estdStreams) == 0 {
|
|
- return ErrConnClosing
|
|
+ // Flush and close the connection; we are done with it.
|
|
+ return errors.New("received GOAWAY with no active streams")
|
|
}
|
|
}
|
|
return nil
|
|
@@ -820,7 +833,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
|
func (l *loopyWriter) handle(i interface{}) error {
|
|
switch i := i.(type) {
|
|
case *incomingWindowUpdate:
|
|
- return l.incomingWindowUpdateHandler(i)
|
|
+ l.incomingWindowUpdateHandler(i)
|
|
case *outgoingWindowUpdate:
|
|
return l.outgoingWindowUpdateHandler(i)
|
|
case *incomingSettings:
|
|
@@ -830,7 +843,7 @@ func (l *loopyWriter) handle(i interface{}) error {
|
|
case *headerFrame:
|
|
return l.headerHandler(i)
|
|
case *registerStream:
|
|
- return l.registerStreamHandler(i)
|
|
+ l.registerStreamHandler(i)
|
|
case *cleanupStream:
|
|
return l.cleanupStreamHandler(i)
|
|
case *earlyAbortStream:
|
|
@@ -838,19 +851,24 @@ func (l *loopyWriter) handle(i interface{}) error {
|
|
case *incomingGoAway:
|
|
return l.incomingGoAwayHandler(i)
|
|
case *dataFrame:
|
|
- return l.preprocessData(i)
|
|
+ l.preprocessData(i)
|
|
case *ping:
|
|
return l.pingHandler(i)
|
|
case *goAway:
|
|
return l.goAwayHandler(i)
|
|
case *outFlowControlSizeRequest:
|
|
- return l.outFlowControlSizeRequestHandler(i)
|
|
+ l.outFlowControlSizeRequestHandler(i)
|
|
+ case closeConnection:
|
|
+ // Just return a non-I/O error and run() will flush and close the
|
|
+ // connection.
|
|
+ return ErrConnClosing
|
|
default:
|
|
return fmt.Errorf("transport: unknown control message type %T", i)
|
|
}
|
|
+ return nil
|
|
}
|
|
|
|
-func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
|
+func (l *loopyWriter) applySettings(ss []http2.Setting) {
|
|
for _, s := range ss {
|
|
switch s.ID {
|
|
case http2.SettingInitialWindowSize:
|
|
@@ -869,7 +887,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
|
updateHeaderTblSize(l.hEnc, s.Val)
|
|
}
|
|
}
|
|
- return nil
|
|
}
|
|
|
|
// processData removes the first stream from active streams, writes out at most 16KB
|
|
@@ -886,9 +903,9 @@ func (l *loopyWriter) processData() (bool, error) {
|
|
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
|
|
// A data item is represented by a dataFrame, since it later translates into
|
|
// multiple HTTP2 data frames.
|
|
- // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
|
|
+ // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
|
|
// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
|
|
- // maximum possilbe HTTP2 frame size.
|
|
+ // maximum possible HTTP2 frame size.
|
|
|
|
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
|
|
// Client sends out empty data frame with endStream = true
|
|
@@ -903,7 +920,7 @@ func (l *loopyWriter) processData() (bool, error) {
|
|
return false, err
|
|
}
|
|
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
|
- return false, nil
|
|
+ return false, err
|
|
}
|
|
} else {
|
|
l.activeStreams.enqueue(str)
|
|
diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go
|
|
index 9fa306b2e..bc8ee0747 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/transport/defaults.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go
|
|
@@ -47,3 +47,9 @@ const (
|
|
defaultClientMaxHeaderListSize = uint32(16 << 20)
|
|
defaultServerMaxHeaderListSize = uint32(16 << 20)
|
|
)
|
|
+
|
|
+// MaxStreamID is the upper bound for the stream ID before the current
|
|
+// transport gracefully closes and new transport is created for subsequent RPCs.
|
|
+// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
|
|
+// integer. It's exported so that tests can override it.
|
|
+var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
|
|
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
|
|
index 090120925..98f80e3fa 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
|
|
@@ -39,6 +39,7 @@ import (
|
|
"golang.org/x/net/http2"
|
|
"google.golang.org/grpc/codes"
|
|
"google.golang.org/grpc/credentials"
|
|
+ "google.golang.org/grpc/internal/grpclog"
|
|
"google.golang.org/grpc/internal/grpcutil"
|
|
"google.golang.org/grpc/metadata"
|
|
"google.golang.org/grpc/peer"
|
|
@@ -46,24 +47,32 @@ import (
|
|
"google.golang.org/grpc/status"
|
|
)
|
|
|
|
-// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
|
-// from inside an http.Handler. It requires that the http Server
|
|
-// supports HTTP/2.
|
|
+// NewServerHandlerTransport returns a ServerTransport handling gRPC from
|
|
+// inside an http.Handler, or writes an HTTP error to w and returns an error.
|
|
+// It requires that the http Server supports HTTP/2.
|
|
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
|
|
if r.ProtoMajor != 2 {
|
|
- return nil, errors.New("gRPC requires HTTP/2")
|
|
+ msg := "gRPC requires HTTP/2"
|
|
+ http.Error(w, msg, http.StatusBadRequest)
|
|
+ return nil, errors.New(msg)
|
|
}
|
|
if r.Method != "POST" {
|
|
- return nil, errors.New("invalid gRPC request method")
|
|
+ msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
|
|
+ http.Error(w, msg, http.StatusBadRequest)
|
|
+ return nil, errors.New(msg)
|
|
}
|
|
contentType := r.Header.Get("Content-Type")
|
|
// TODO: do we assume contentType is lowercase? we did before
|
|
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
|
|
if !validContentType {
|
|
- return nil, errors.New("invalid gRPC request content-type")
|
|
+ msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
|
|
+ http.Error(w, msg, http.StatusUnsupportedMediaType)
|
|
+ return nil, errors.New(msg)
|
|
}
|
|
if _, ok := w.(http.Flusher); !ok {
|
|
- return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
|
+ msg := "gRPC requires a ResponseWriter supporting http.Flusher"
|
|
+ http.Error(w, msg, http.StatusInternalServerError)
|
|
+ return nil, errors.New(msg)
|
|
}
|
|
|
|
st := &serverHandlerTransport{
|
|
@@ -75,11 +84,14 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
|
|
contentSubtype: contentSubtype,
|
|
stats: stats,
|
|
}
|
|
+ st.logger = prefixLoggerForServerHandlerTransport(st)
|
|
|
|
if v := r.Header.Get("grpc-timeout"); v != "" {
|
|
to, err := decodeTimeout(v)
|
|
if err != nil {
|
|
- return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
|
|
+ msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
|
|
+ http.Error(w, msg, http.StatusBadRequest)
|
|
+ return nil, status.Error(codes.Internal, msg)
|
|
}
|
|
st.timeoutSet = true
|
|
st.timeout = to
|
|
@@ -97,7 +109,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
|
|
for _, v := range vv {
|
|
v, err := decodeMetadataHeader(k, v)
|
|
if err != nil {
|
|
- return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
|
|
+ msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
|
|
+ http.Error(w, msg, http.StatusBadRequest)
|
|
+ return nil, status.Error(codes.Internal, msg)
|
|
}
|
|
metakv = append(metakv, k, v)
|
|
}
|
|
@@ -138,15 +152,19 @@ type serverHandlerTransport struct {
|
|
// TODO make sure this is consistent across handler_server and http2_server
|
|
contentSubtype string
|
|
|
|
- stats []stats.Handler
|
|
+ stats []stats.Handler
|
|
+ logger *grpclog.PrefixLogger
|
|
}
|
|
|
|
-func (ht *serverHandlerTransport) Close() {
|
|
- ht.closeOnce.Do(ht.closeCloseChanOnce)
|
|
+func (ht *serverHandlerTransport) Close(err error) {
|
|
+ ht.closeOnce.Do(func() {
|
|
+ if ht.logger.V(logLevel) {
|
|
+ ht.logger.Infof("Closing: %v", err)
|
|
+ }
|
|
+ close(ht.closedCh)
|
|
+ })
|
|
}
|
|
|
|
-func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
|
|
-
|
|
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
|
|
|
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
|
@@ -236,7 +254,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
|
})
|
|
}
|
|
}
|
|
- ht.Close()
|
|
+ ht.Close(errors.New("finished writing status"))
|
|
return err
|
|
}
|
|
|
|
@@ -346,7 +364,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
|
case <-ht.req.Context().Done():
|
|
}
|
|
cancel()
|
|
- ht.Close()
|
|
+ ht.Close(errors.New("request is done processing"))
|
|
}()
|
|
|
|
req := ht.req
|
|
@@ -435,17 +453,17 @@ func (ht *serverHandlerTransport) IncrMsgSent() {}
|
|
|
|
func (ht *serverHandlerTransport) IncrMsgRecv() {}
|
|
|
|
-func (ht *serverHandlerTransport) Drain() {
|
|
+func (ht *serverHandlerTransport) Drain(debugData string) {
|
|
panic("Drain() is not implemented")
|
|
}
|
|
|
|
// mapRecvMsgError returns the non-nil err into the appropriate
|
|
// error value as expected by callers of *grpc.parser.recvMsg.
|
|
// In particular, in can only be:
|
|
-// * io.EOF
|
|
-// * io.ErrUnexpectedEOF
|
|
-// * of type transport.ConnectionError
|
|
-// * an error from the status package
|
|
+// - io.EOF
|
|
+// - io.ErrUnexpectedEOF
|
|
+// - of type transport.ConnectionError
|
|
+// - an error from the status package
|
|
func mapRecvMsgError(err error) error {
|
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
|
return err
|
|
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
|
|
index 28c77af70..326bf0848 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
|
|
@@ -38,8 +38,11 @@ import (
|
|
"google.golang.org/grpc/credentials"
|
|
"google.golang.org/grpc/internal/channelz"
|
|
icredentials "google.golang.org/grpc/internal/credentials"
|
|
+ "google.golang.org/grpc/internal/grpclog"
|
|
+ "google.golang.org/grpc/internal/grpcsync"
|
|
"google.golang.org/grpc/internal/grpcutil"
|
|
imetadata "google.golang.org/grpc/internal/metadata"
|
|
+ istatus "google.golang.org/grpc/internal/status"
|
|
"google.golang.org/grpc/internal/syscall"
|
|
"google.golang.org/grpc/internal/transport/networktype"
|
|
"google.golang.org/grpc/keepalive"
|
|
@@ -57,11 +60,15 @@ var clientConnectionCounter uint64
|
|
|
|
// http2Client implements the ClientTransport interface with HTTP2.
|
|
type http2Client struct {
|
|
- lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
|
- ctx context.Context
|
|
- cancel context.CancelFunc
|
|
- ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
|
- userAgent string
|
|
+ lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
|
+ ctx context.Context
|
|
+ cancel context.CancelFunc
|
|
+ ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
|
+ userAgent string
|
|
+ // address contains the resolver returned address for this transport.
|
|
+ // If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
|
|
+ // passed to `NewStream`, when determining the :authority header.
|
|
+ address resolver.Address
|
|
md metadata.MD
|
|
conn net.Conn // underlying communication channel
|
|
loopy *loopyWriter
|
|
@@ -99,16 +106,13 @@ type http2Client struct {
|
|
maxSendHeaderListSize *uint32
|
|
|
|
bdpEst *bdpEstimator
|
|
- // onPrefaceReceipt is a callback that client transport calls upon
|
|
- // receiving server preface to signal that a succefull HTTP2
|
|
- // connection was established.
|
|
- onPrefaceReceipt func()
|
|
|
|
maxConcurrentStreams uint32
|
|
streamQuota int64
|
|
streamsQuotaAvailable chan struct{}
|
|
waitingStreams uint32
|
|
nextID uint32
|
|
+ registeredCompressors string
|
|
|
|
// Do not access controlBuf with mu held.
|
|
mu sync.Mutex // guard the following variables
|
|
@@ -137,12 +141,12 @@ type http2Client struct {
|
|
channelzID *channelz.Identifier
|
|
czData *channelzData
|
|
|
|
- onGoAway func(GoAwayReason)
|
|
- onClose func()
|
|
+ onClose func(GoAwayReason)
|
|
|
|
bufferPool *bufferPool
|
|
|
|
connectionID uint64
|
|
+ logger *grpclog.PrefixLogger
|
|
}
|
|
|
|
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
|
|
@@ -194,7 +198,7 @@ func isTemporary(err error) bool {
|
|
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
|
// and starts to receive messages on it. Non-nil error returns if construction
|
|
// fails.
|
|
-func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
|
+func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
|
|
scheme := "http"
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
defer func() {
|
|
@@ -214,14 +218,40 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|
if opts.FailOnNonTempDialError {
|
|
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
|
|
}
|
|
- return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
|
|
+ return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
|
|
}
|
|
+
|
|
// Any further errors will close the underlying connection
|
|
defer func(conn net.Conn) {
|
|
if err != nil {
|
|
conn.Close()
|
|
}
|
|
}(conn)
|
|
+
|
|
+ // The following defer and goroutine monitor the connectCtx for cancelation
|
|
+ // and deadline. On context expiration, the connection is hard closed and
|
|
+ // this function will naturally fail as a result. Otherwise, the defer
|
|
+ // waits for the goroutine to exit to prevent the context from being
|
|
+ // monitored (and to prevent the connection from ever being closed) after
|
|
+ // returning from this function.
|
|
+ ctxMonitorDone := grpcsync.NewEvent()
|
|
+ newClientCtx, newClientDone := context.WithCancel(connectCtx)
|
|
+ defer func() {
|
|
+ newClientDone() // Awaken the goroutine below if connectCtx hasn't expired.
|
|
+ <-ctxMonitorDone.Done() // Wait for the goroutine below to exit.
|
|
+ }()
|
|
+ go func(conn net.Conn) {
|
|
+ defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
|
|
+ <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes.
|
|
+ if err := connectCtx.Err(); err != nil {
|
|
+ // connectCtx expired before exiting the function. Hard close the connection.
|
|
+ if logger.V(logLevel) {
|
|
+ logger.Infof("Aborting due to connect deadline expiring: %v", err)
|
|
+ }
|
|
+ conn.Close()
|
|
+ }
|
|
+ }(conn)
|
|
+
|
|
kp := opts.KeepaliveParams
|
|
// Validate keepalive parameters.
|
|
if kp.Time == 0 {
|
|
@@ -253,15 +283,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|
}
|
|
}
|
|
if transportCreds != nil {
|
|
- rawConn := conn
|
|
- // Pull the deadline from the connectCtx, which will be used for
|
|
- // timeouts in the authentication protocol handshake. Can ignore the
|
|
- // boolean as the deadline will return the zero value, which will make
|
|
- // the conn not timeout on I/O operations.
|
|
- deadline, _ := connectCtx.Deadline()
|
|
- rawConn.SetDeadline(deadline)
|
|
- conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn)
|
|
- rawConn.SetDeadline(time.Time{})
|
|
+ conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
|
|
if err != nil {
|
|
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
|
|
}
|
|
@@ -299,6 +321,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|
ctxDone: ctx.Done(), // Cache Done chan.
|
|
cancel: cancel,
|
|
userAgent: opts.UserAgent,
|
|
+ registeredCompressors: grpcutil.RegisteredCompressors(),
|
|
+ address: addr,
|
|
conn: conn,
|
|
remoteAddr: conn.RemoteAddr(),
|
|
localAddr: conn.LocalAddr(),
|
|
@@ -315,17 +339,18 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|
kp: kp,
|
|
statsHandlers: opts.StatsHandlers,
|
|
initialWindowSize: initialWindowSize,
|
|
- onPrefaceReceipt: onPrefaceReceipt,
|
|
nextID: 1,
|
|
maxConcurrentStreams: defaultMaxStreamsClient,
|
|
streamQuota: defaultMaxStreamsClient,
|
|
streamsQuotaAvailable: make(chan struct{}, 1),
|
|
czData: new(channelzData),
|
|
- onGoAway: onGoAway,
|
|
- onClose: onClose,
|
|
keepaliveEnabled: keepaliveEnabled,
|
|
bufferPool: newBufferPool(),
|
|
+ onClose: onClose,
|
|
}
|
|
+ t.logger = prefixLoggerForClientTransport(t)
|
|
+ // Add peer information to the http2client context.
|
|
+ t.ctx = peer.NewContext(t.ctx, t.getPeer())
|
|
|
|
if md, ok := addr.Metadata.(*metadata.MD); ok {
|
|
t.md = *md
|
|
@@ -361,21 +386,32 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|
t.kpDormancyCond = sync.NewCond(&t.mu)
|
|
go t.keepalive()
|
|
}
|
|
- // Start the reader goroutine for incoming message. Each transport has
|
|
- // a dedicated goroutine which reads HTTP2 frame from network. Then it
|
|
- // dispatches the frame to the corresponding stream entity.
|
|
- go t.reader()
|
|
+
|
|
+ // Start the reader goroutine for incoming messages. Each transport has a
|
|
+ // dedicated goroutine which reads HTTP2 frames from the network. Then it
|
|
+ // dispatches the frame to the corresponding stream entity. When the
|
|
+ // server preface is received, readerErrCh is closed. If an error occurs
|
|
+ // first, an error is pushed to the channel. This must be checked before
|
|
+ // returning from this function.
|
|
+ readerErrCh := make(chan error, 1)
|
|
+ go t.reader(readerErrCh)
|
|
+ defer func() {
|
|
+ if err == nil {
|
|
+ err = <-readerErrCh
|
|
+ }
|
|
+ if err != nil {
|
|
+ t.Close(err)
|
|
+ }
|
|
+ }()
|
|
|
|
// Send connection preface to server.
|
|
n, err := t.conn.Write(clientPreface)
|
|
if err != nil {
|
|
err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
|
|
- t.Close(err)
|
|
return nil, err
|
|
}
|
|
if n != len(clientPreface) {
|
|
err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
|
- t.Close(err)
|
|
return nil, err
|
|
}
|
|
var ss []http2.Setting
|
|
@@ -395,14 +431,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|
err = t.framer.fr.WriteSettings(ss...)
|
|
if err != nil {
|
|
err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
|
|
- t.Close(err)
|
|
return nil, err
|
|
}
|
|
// Adjust the connection flow control window if needed.
|
|
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
|
|
if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
|
|
err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
|
|
- t.Close(err)
|
|
return nil, err
|
|
}
|
|
}
|
|
@@ -413,17 +447,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|
return nil, err
|
|
}
|
|
go func() {
|
|
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
|
|
- err := t.loopy.run()
|
|
- if err != nil {
|
|
- if logger.V(logLevel) {
|
|
- logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
|
- }
|
|
- }
|
|
- // Do not close the transport. Let reader goroutine handle it since
|
|
- // there might be data in the buffers.
|
|
- t.conn.Close()
|
|
- t.controlBuf.finish()
|
|
+ t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
|
|
+ t.loopy.run()
|
|
close(t.writerDone)
|
|
}()
|
|
return t, nil
|
|
@@ -469,7 +494,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
|
func (t *http2Client) getPeer() *peer.Peer {
|
|
return &peer.Peer{
|
|
Addr: t.remoteAddr,
|
|
- AuthInfo: t.authInfo,
|
|
+ AuthInfo: t.authInfo, // Can be nil
|
|
}
|
|
}
|
|
|
|
@@ -505,9 +530,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
|
|
}
|
|
|
|
+ registeredCompressors := t.registeredCompressors
|
|
if callHdr.SendCompress != "" {
|
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
|
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
|
|
+ // Include the outgoing compressor name when compressor is not registered
|
|
+ // via encoding.RegisterCompressor. This is possible when client uses
|
|
+ // WithCompressor dial option.
|
|
+ if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) {
|
|
+ if registeredCompressors != "" {
|
|
+ registeredCompressors += ","
|
|
+ }
|
|
+ registeredCompressors += callHdr.SendCompress
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if registeredCompressors != "" {
|
|
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors})
|
|
}
|
|
if dl, ok := ctx.Deadline(); ok {
|
|
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
|
@@ -587,7 +625,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s
|
|
for _, c := range t.perRPCCreds {
|
|
data, err := c.GetRequestMetadata(ctx, audience)
|
|
if err != nil {
|
|
- if _, ok := status.FromError(err); ok {
|
|
+ if st, ok := status.FromError(err); ok {
|
|
+ // Restrict the code to the list allowed by gRFC A54.
|
|
+ if istatus.IsRestrictedControlPlaneCode(st) {
|
|
+ err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
|
|
+ }
|
|
return nil, err
|
|
}
|
|
|
|
@@ -616,7 +658,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
|
}
|
|
data, err := callCreds.GetRequestMetadata(ctx, audience)
|
|
if err != nil {
|
|
- return nil, status.Errorf(codes.Internal, "transport: %v", err)
|
|
+ if st, ok := status.FromError(err); ok {
|
|
+ // Restrict the code to the list allowed by gRFC A54.
|
|
+ if istatus.IsRestrictedControlPlaneCode(st) {
|
|
+ err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
|
|
+ }
|
|
+ return nil, err
|
|
+ }
|
|
+ return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err)
|
|
}
|
|
callAuthData = make(map[string]string, len(data))
|
|
for k, v := range data {
|
|
@@ -632,13 +681,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
|
// NewStream errors result in transparent retry, as they mean nothing went onto
|
|
// the wire. However, there are two notable exceptions:
|
|
//
|
|
-// 1. If the stream headers violate the max header list size allowed by the
|
|
-// server. It's possible this could succeed on another transport, even if
|
|
-// it's unlikely, but do not transparently retry.
|
|
-// 2. If the credentials errored when requesting their headers. In this case,
|
|
-// it's possible a retry can fix the problem, but indefinitely transparently
|
|
-// retrying is not appropriate as it is likely the credentials, if they can
|
|
-// eventually succeed, would need I/O to do so.
|
|
+// 1. If the stream headers violate the max header list size allowed by the
|
|
+// server. It's possible this could succeed on another transport, even if
|
|
+// it's unlikely, but do not transparently retry.
|
|
+// 2. If the credentials errored when requesting their headers. In this case,
|
|
+// it's possible a retry can fix the problem, but indefinitely transparently
|
|
+// retrying is not appropriate as it is likely the credentials, if they can
|
|
+// eventually succeed, would need I/O to do so.
|
|
type NewStreamError struct {
|
|
Err error
|
|
|
|
@@ -653,6 +702,18 @@ func (e NewStreamError) Error() string {
|
|
// streams. All non-nil errors returned will be *NewStreamError.
|
|
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
|
|
ctx = peer.NewContext(ctx, t.getPeer())
|
|
+
|
|
+ // ServerName field of the resolver returned address takes precedence over
|
|
+ // Host field of CallHdr to determine the :authority header. This is because,
|
|
+ // the ServerName field takes precedence for server authentication during
|
|
+ // TLS handshake, and the :authority header should match the value used
|
|
+ // for server authentication.
|
|
+ if t.address.ServerName != "" {
|
|
+ newCallHdr := *callHdr
|
|
+ newCallHdr.Host = t.address.ServerName
|
|
+ callHdr = &newCallHdr
|
|
+ }
|
|
+
|
|
headerFields, err := t.createHeaderFields(ctx, callHdr)
|
|
if err != nil {
|
|
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
|
|
@@ -677,15 +738,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|
endStream: false,
|
|
initStream: func(id uint32) error {
|
|
t.mu.Lock()
|
|
- if state := t.state; state != reachable {
|
|
+ // TODO: handle transport closure in loopy instead and remove this
|
|
+ // initStream is never called when transport is draining.
|
|
+ if t.state == closing {
|
|
t.mu.Unlock()
|
|
- // Do a quick cleanup.
|
|
- err := error(errStreamDrain)
|
|
- if state == closing {
|
|
- err = ErrConnClosing
|
|
- }
|
|
- cleanup(err)
|
|
- return err
|
|
+ cleanup(ErrConnClosing)
|
|
+ return ErrConnClosing
|
|
}
|
|
if channelz.IsOn() {
|
|
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
|
@@ -703,6 +761,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|
}
|
|
firstTry := true
|
|
var ch chan struct{}
|
|
+ transportDrainRequired := false
|
|
checkForStreamQuota := func(it interface{}) bool {
|
|
if t.streamQuota <= 0 { // Can go negative if server decreases it.
|
|
if firstTry {
|
|
@@ -718,10 +777,15 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|
h := it.(*headerFrame)
|
|
h.streamID = t.nextID
|
|
t.nextID += 2
|
|
+
|
|
+ // Drain client transport if nextID > MaxStreamID which signals gRPC that
|
|
+ // the connection is closed and a new one must be created for subsequent RPCs.
|
|
+ transportDrainRequired = t.nextID > MaxStreamID
|
|
+
|
|
s.id = h.streamID
|
|
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
|
|
t.mu.Lock()
|
|
- if t.activeStreams == nil { // Can be niled from Close().
|
|
+ if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
|
|
t.mu.Unlock()
|
|
return false // Don't create a stream if the transport is already closed.
|
|
}
|
|
@@ -797,6 +861,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|
sh.HandleRPC(s.ctx, outHeader)
|
|
}
|
|
}
|
|
+ if transportDrainRequired {
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Draining transport: t.nextID > MaxStreamID")
|
|
+ }
|
|
+ t.GracefulClose()
|
|
+ }
|
|
return s, nil
|
|
}
|
|
|
|
@@ -878,20 +948,21 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
|
// Close kicks off the shutdown process of the transport. This should be called
|
|
// only once on a transport. Once it is called, the transport should not be
|
|
// accessed any more.
|
|
-//
|
|
-// This method blocks until the addrConn that initiated this transport is
|
|
-// re-connected. This happens because t.onClose() begins reconnect logic at the
|
|
-// addrConn level and blocks until the addrConn is successfully connected.
|
|
func (t *http2Client) Close(err error) {
|
|
t.mu.Lock()
|
|
- // Make sure we only Close once.
|
|
+ // Make sure we only close once.
|
|
if t.state == closing {
|
|
t.mu.Unlock()
|
|
return
|
|
}
|
|
- // Call t.onClose before setting the state to closing to prevent the client
|
|
- // from attempting to create new streams ASAP.
|
|
- t.onClose()
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Closing: %v", err)
|
|
+ }
|
|
+ // Call t.onClose ASAP to prevent the client from attempting to create new
|
|
+ // streams.
|
|
+ if t.state != draining {
|
|
+ t.onClose(GoAwayInvalid)
|
|
+ }
|
|
t.state = closing
|
|
streams := t.activeStreams
|
|
t.activeStreams = nil
|
|
@@ -941,11 +1012,15 @@ func (t *http2Client) GracefulClose() {
|
|
t.mu.Unlock()
|
|
return
|
|
}
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("GracefulClose called")
|
|
+ }
|
|
+ t.onClose(GoAwayInvalid)
|
|
t.state = draining
|
|
active := len(t.activeStreams)
|
|
t.mu.Unlock()
|
|
if active == 0 {
|
|
- t.Close(ErrConnClosing)
|
|
+ t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
|
|
return
|
|
}
|
|
t.controlBuf.put(&incomingGoAway{})
|
|
@@ -1102,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
|
}
|
|
statusCode, ok := http2ErrConvTab[f.ErrCode]
|
|
if !ok {
|
|
- if logger.V(logLevel) {
|
|
- logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode)
|
|
}
|
|
statusCode = codes.Unknown
|
|
}
|
|
@@ -1185,10 +1260,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
|
t.mu.Unlock()
|
|
return
|
|
}
|
|
- if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
|
|
- if logger.V(logLevel) {
|
|
- logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
|
|
- }
|
|
+ if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
|
|
+ // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
|
|
+ // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is
|
|
+ // enabled by default and double the configure KEEPALIVE_TIME used for new connections
|
|
+ // on that channel.
|
|
+ logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".")
|
|
}
|
|
id := f.LastStreamID
|
|
if id > 0 && id%2 == 0 {
|
|
@@ -1221,8 +1298,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
|
// Notify the clientconn about the GOAWAY before we set the state to
|
|
// draining, to allow the client to stop attempting to create streams
|
|
// before disallowing new streams on this connection.
|
|
- t.onGoAway(t.goAwayReason)
|
|
- t.state = draining
|
|
+ if t.state != draining {
|
|
+ t.onClose(t.goAwayReason)
|
|
+ t.state = draining
|
|
+ }
|
|
}
|
|
// All streams with IDs greater than the GoAwayId
|
|
// and smaller than the previous GoAway ID should be killed.
|
|
@@ -1230,24 +1309,35 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
|
if upperLimit == 0 { // This is the first GoAway Frame.
|
|
upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
|
|
}
|
|
+
|
|
+ t.prevGoAwayID = id
|
|
+ if len(t.activeStreams) == 0 {
|
|
+ t.mu.Unlock()
|
|
+ t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
|
|
+ return
|
|
+ }
|
|
+
|
|
+ streamsToClose := make([]*Stream, 0)
|
|
for streamID, stream := range t.activeStreams {
|
|
if streamID > id && streamID <= upperLimit {
|
|
// The stream was unprocessed by the server.
|
|
- atomic.StoreUint32(&stream.unprocessed, 1)
|
|
- t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
|
|
+ if streamID > id && streamID <= upperLimit {
|
|
+ atomic.StoreUint32(&stream.unprocessed, 1)
|
|
+ streamsToClose = append(streamsToClose, stream)
|
|
+ }
|
|
}
|
|
}
|
|
- t.prevGoAwayID = id
|
|
- active := len(t.activeStreams)
|
|
t.mu.Unlock()
|
|
- if active == 0 {
|
|
- t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
|
|
+ // Called outside t.mu because closeStream can take controlBuf's mu, which
|
|
+ // could induce deadlock and is not allowed.
|
|
+ for _, stream := range streamsToClose {
|
|
+ t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
|
|
}
|
|
}
|
|
|
|
// setGoAwayReason sets the value of t.goAwayReason based
|
|
// on the GoAway frame received.
|
|
-// It expects a lock on transport's mutext to be held by
|
|
+// It expects a lock on transport's mutex to be held by
|
|
// the caller.
|
|
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
|
|
t.goAwayReason = GoAwayNoReason
|
|
@@ -1469,33 +1559,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
|
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
|
|
}
|
|
|
|
-// reader runs as a separate goroutine in charge of reading data from network
|
|
-// connection.
|
|
-//
|
|
-// TODO(zhaoq): currently one reader per transport. Investigate whether this is
|
|
-// optimal.
|
|
-// TODO(zhaoq): Check the validity of the incoming frame sequence.
|
|
-func (t *http2Client) reader() {
|
|
- defer close(t.readerDone)
|
|
- // Check the validity of server preface.
|
|
+// readServerPreface reads and handles the initial settings frame from the
|
|
+// server.
|
|
+func (t *http2Client) readServerPreface() error {
|
|
frame, err := t.framer.fr.ReadFrame()
|
|
if err != nil {
|
|
- err = connectionErrorf(true, err, "error reading server preface: %v", err)
|
|
- t.Close(err) // this kicks off resetTransport, so must be last before return
|
|
- return
|
|
- }
|
|
- t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
|
|
- if t.keepaliveEnabled {
|
|
- atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
|
+ return connectionErrorf(true, err, "error reading server preface: %v", err)
|
|
}
|
|
sf, ok := frame.(*http2.SettingsFrame)
|
|
if !ok {
|
|
- // this kicks off resetTransport, so must be last before return
|
|
- t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame))
|
|
- return
|
|
+ return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)
|
|
}
|
|
- t.onPrefaceReceipt()
|
|
t.handleSettings(sf, true)
|
|
+ return nil
|
|
+}
|
|
+
|
|
+// reader verifies the server preface and reads all subsequent data from
|
|
+// network connection. If the server preface is not read successfully, an
|
|
+// error is pushed to errCh; otherwise errCh is closed with no error.
|
|
+func (t *http2Client) reader(errCh chan<- error) {
|
|
+ defer close(t.readerDone)
|
|
+
|
|
+ if err := t.readServerPreface(); err != nil {
|
|
+ errCh <- err
|
|
+ return
|
|
+ }
|
|
+ close(errCh)
|
|
+ if t.keepaliveEnabled {
|
|
+ atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
|
+ }
|
|
|
|
// loop to keep reading incoming messages on this transport.
|
|
for {
|
|
@@ -1698,3 +1790,9 @@ func (t *http2Client) getOutFlowWindow() int64 {
|
|
return -2
|
|
}
|
|
}
|
|
+
|
|
+func (t *http2Client) stateForTesting() transportState {
|
|
+ t.mu.Lock()
|
|
+ defer t.mu.Unlock()
|
|
+ return t.state
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
|
|
index 28bcba0a3..ec4eef213 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
|
|
@@ -21,6 +21,7 @@ package transport
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
+ "errors"
|
|
"fmt"
|
|
"io"
|
|
"math"
|
|
@@ -34,13 +35,16 @@ import (
|
|
"github.com/golang/protobuf/proto"
|
|
"golang.org/x/net/http2"
|
|
"golang.org/x/net/http2/hpack"
|
|
+ "google.golang.org/grpc/internal/grpclog"
|
|
"google.golang.org/grpc/internal/grpcutil"
|
|
+ "google.golang.org/grpc/internal/pretty"
|
|
"google.golang.org/grpc/internal/syscall"
|
|
|
|
"google.golang.org/grpc/codes"
|
|
"google.golang.org/grpc/credentials"
|
|
"google.golang.org/grpc/internal/channelz"
|
|
"google.golang.org/grpc/internal/grpcrand"
|
|
+ "google.golang.org/grpc/internal/grpcsync"
|
|
"google.golang.org/grpc/keepalive"
|
|
"google.golang.org/grpc/metadata"
|
|
"google.golang.org/grpc/peer"
|
|
@@ -101,13 +105,13 @@ type http2Server struct {
|
|
|
|
mu sync.Mutex // guard the following
|
|
|
|
- // drainChan is initialized when Drain() is called the first time.
|
|
- // After which the server writes out the first GoAway(with ID 2^31-1) frame.
|
|
- // Then an independent goroutine will be launched to later send the second GoAway.
|
|
- // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
|
|
- // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is
|
|
- // already underway.
|
|
- drainChan chan struct{}
|
|
+ // drainEvent is initialized when Drain() is called the first time. After
|
|
+ // which the server writes out the first GoAway(with ID 2^31-1) frame. Then
|
|
+ // an independent goroutine will be launched to later send the second
|
|
+ // GoAway. During this time we don't want to write another first GoAway(with
|
|
+ // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
|
|
+ // already initialized since draining is already underway.
|
|
+ drainEvent *grpcsync.Event
|
|
state transportState
|
|
activeStreams map[uint32]*Stream
|
|
// idle is the time instant when the connection went idle.
|
|
@@ -127,6 +131,8 @@ type http2Server struct {
|
|
// This lock may not be taken if mu is already held.
|
|
maxStreamMu sync.Mutex
|
|
maxStreamID uint32 // max stream ID ever seen
|
|
+
|
|
+ logger *grpclog.PrefixLogger
|
|
}
|
|
|
|
// NewServerTransport creates a http2 transport with conn and configuration
|
|
@@ -165,15 +171,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|
ID: http2.SettingMaxFrameSize,
|
|
Val: http2MaxFrameLen,
|
|
}}
|
|
- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
|
- // permitted in the HTTP2 spec.
|
|
- maxStreams := config.MaxStreams
|
|
- if maxStreams == 0 {
|
|
- maxStreams = math.MaxUint32
|
|
- } else {
|
|
+ if config.MaxStreams != math.MaxUint32 {
|
|
isettings = append(isettings, http2.Setting{
|
|
ID: http2.SettingMaxConcurrentStreams,
|
|
- Val: maxStreams,
|
|
+ Val: config.MaxStreams,
|
|
})
|
|
}
|
|
dynamicWindow := true
|
|
@@ -252,7 +253,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|
framer: framer,
|
|
readerDone: make(chan struct{}),
|
|
writerDone: make(chan struct{}),
|
|
- maxStreams: maxStreams,
|
|
+ maxStreams: config.MaxStreams,
|
|
inTapHandle: config.InTapHandle,
|
|
fc: &trInFlow{limit: uint32(icwz)},
|
|
state: reachable,
|
|
@@ -265,6 +266,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|
czData: new(channelzData),
|
|
bufferPool: newBufferPool(),
|
|
}
|
|
+ t.logger = prefixLoggerForServerTransport(t)
|
|
+ // Add peer information to the http2server context.
|
|
+ t.ctx = peer.NewContext(t.ctx, t.getPeer())
|
|
+
|
|
t.controlBuf = newControlBuffer(t.done)
|
|
if dynamicWindow {
|
|
t.bdpEst = &bdpEstimator{
|
|
@@ -290,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|
|
|
defer func() {
|
|
if err != nil {
|
|
- t.Close()
|
|
+ t.Close(err)
|
|
}
|
|
}()
|
|
|
|
@@ -326,23 +331,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|
t.handleSettings(sf)
|
|
|
|
go func() {
|
|
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
|
|
+ t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
|
|
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
|
|
- if err := t.loopy.run(); err != nil {
|
|
- if logger.V(logLevel) {
|
|
- logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
|
- }
|
|
- }
|
|
- t.conn.Close()
|
|
- t.controlBuf.finish()
|
|
+ t.loopy.run()
|
|
close(t.writerDone)
|
|
}()
|
|
go t.keepalive()
|
|
return t, nil
|
|
}
|
|
|
|
-// operateHeader takes action on the decoded headers.
|
|
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
|
|
+// operateHeaders takes action on the decoded headers. Returns an error if fatal
|
|
+// error encountered and transport needs to close, otherwise returns nil.
|
|
+func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error {
|
|
// Acquire max stream ID lock for entire duration
|
|
t.maxStreamMu.Lock()
|
|
defer t.maxStreamMu.Unlock()
|
|
@@ -358,15 +358,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
rstCode: http2.ErrCodeFrameSize,
|
|
onWrite: func() {},
|
|
})
|
|
- return false
|
|
+ return nil
|
|
}
|
|
|
|
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
|
// illegal gRPC stream id.
|
|
- if logger.V(logLevel) {
|
|
- logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
|
- }
|
|
- return true
|
|
+ return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
|
|
}
|
|
t.maxStreamID = streamID
|
|
|
|
@@ -378,13 +375,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
|
}
|
|
var (
|
|
- // If a gRPC Response-Headers has already been received, then it means
|
|
- // that the peer is speaking gRPC and we are in gRPC mode.
|
|
- isGRPC = false
|
|
- mdata = make(map[string][]string)
|
|
- httpMethod string
|
|
- // headerError is set if an error is encountered while parsing the headers
|
|
- headerError bool
|
|
+ // if false, content-type was missing or invalid
|
|
+ isGRPC = false
|
|
+ contentType = ""
|
|
+ mdata = make(metadata.MD, len(frame.Fields))
|
|
+ httpMethod string
|
|
+ // these are set if an error is encountered while parsing the headers
|
|
+ protocolError bool
|
|
+ headerError *status.Status
|
|
|
|
timeoutSet bool
|
|
timeout time.Duration
|
|
@@ -395,11 +393,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
case "content-type":
|
|
contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
|
|
if !validContentType {
|
|
+ contentType = hf.Value
|
|
break
|
|
}
|
|
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
|
s.contentSubtype = contentSubtype
|
|
isGRPC = true
|
|
+
|
|
+ case "grpc-accept-encoding":
|
|
+ mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
|
+ if hf.Value == "" {
|
|
+ continue
|
|
+ }
|
|
+ compressors := hf.Value
|
|
+ if s.clientAdvertisedCompressors != "" {
|
|
+ compressors = s.clientAdvertisedCompressors + "," + compressors
|
|
+ }
|
|
+ s.clientAdvertisedCompressors = compressors
|
|
case "grpc-encoding":
|
|
s.recvCompress = hf.Value
|
|
case ":method":
|
|
@@ -410,23 +420,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
timeoutSet = true
|
|
var err error
|
|
if timeout, err = decodeTimeout(hf.Value); err != nil {
|
|
- headerError = true
|
|
+ headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
|
|
}
|
|
// "Transports must consider requests containing the Connection header
|
|
// as malformed." - A41
|
|
case "connection":
|
|
- if logger.V(logLevel) {
|
|
- logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec")
|
|
}
|
|
- headerError = true
|
|
+ protocolError = true
|
|
default:
|
|
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
|
|
break
|
|
}
|
|
v, err := decodeMetadataHeader(hf.Name, hf.Value)
|
|
if err != nil {
|
|
- headerError = true
|
|
- logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
|
+ headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
|
|
+ t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
|
break
|
|
}
|
|
mdata[hf.Name] = append(mdata[hf.Name], v)
|
|
@@ -440,27 +450,47 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
// error, this takes precedence over a client not speaking gRPC.
|
|
if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
|
|
errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
|
|
- if logger.V(logLevel) {
|
|
- logger.Errorf("transport: %v", errMsg)
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Aborting the stream early: %v", errMsg)
|
|
}
|
|
t.controlBuf.put(&earlyAbortStream{
|
|
- httpStatus: 400,
|
|
+ httpStatus: http.StatusBadRequest,
|
|
streamID: streamID,
|
|
contentSubtype: s.contentSubtype,
|
|
status: status.New(codes.Internal, errMsg),
|
|
rst: !frame.StreamEnded(),
|
|
})
|
|
- return false
|
|
+ return nil
|
|
}
|
|
|
|
- if !isGRPC || headerError {
|
|
+ if protocolError {
|
|
t.controlBuf.put(&cleanupStream{
|
|
streamID: streamID,
|
|
rst: true,
|
|
rstCode: http2.ErrCodeProtocol,
|
|
onWrite: func() {},
|
|
})
|
|
- return false
|
|
+ return nil
|
|
+ }
|
|
+ if !isGRPC {
|
|
+ t.controlBuf.put(&earlyAbortStream{
|
|
+ httpStatus: http.StatusUnsupportedMediaType,
|
|
+ streamID: streamID,
|
|
+ contentSubtype: s.contentSubtype,
|
|
+ status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
|
|
+ rst: !frame.StreamEnded(),
|
|
+ })
|
|
+ return nil
|
|
+ }
|
|
+ if headerError != nil {
|
|
+ t.controlBuf.put(&earlyAbortStream{
|
|
+ httpStatus: http.StatusBadRequest,
|
|
+ streamID: streamID,
|
|
+ contentSubtype: s.contentSubtype,
|
|
+ status: headerError,
|
|
+ rst: !frame.StreamEnded(),
|
|
+ })
|
|
+ return nil
|
|
}
|
|
|
|
// "If :authority is missing, Host must be renamed to :authority." - A41
|
|
@@ -485,14 +515,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
} else {
|
|
s.ctx, s.cancel = context.WithCancel(t.ctx)
|
|
}
|
|
- pr := &peer.Peer{
|
|
- Addr: t.remoteAddr,
|
|
- }
|
|
- // Attach Auth info if there is any.
|
|
- if t.authInfo != nil {
|
|
- pr.AuthInfo = t.authInfo
|
|
- }
|
|
- s.ctx = peer.NewContext(s.ctx, pr)
|
|
+
|
|
// Attach the received metadata to the context.
|
|
if len(mdata) > 0 {
|
|
s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
|
|
@@ -507,7 +530,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
if t.state != reachable {
|
|
t.mu.Unlock()
|
|
s.cancel()
|
|
- return false
|
|
+ return nil
|
|
}
|
|
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
|
t.mu.Unlock()
|
|
@@ -518,13 +541,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
onWrite: func() {},
|
|
})
|
|
s.cancel()
|
|
- return false
|
|
+ return nil
|
|
}
|
|
if httpMethod != http.MethodPost {
|
|
t.mu.Unlock()
|
|
- errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
|
|
- if logger.V(logLevel) {
|
|
- logger.Infof("transport: %v", errMsg)
|
|
+ errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod)
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Aborting the stream early: %v", errMsg)
|
|
}
|
|
t.controlBuf.put(&earlyAbortStream{
|
|
httpStatus: 405,
|
|
@@ -534,14 +557,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
rst: !frame.StreamEnded(),
|
|
})
|
|
s.cancel()
|
|
- return false
|
|
+ return nil
|
|
}
|
|
if t.inTapHandle != nil {
|
|
var err error
|
|
if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
|
|
t.mu.Unlock()
|
|
- if logger.V(logLevel) {
|
|
- logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
|
|
}
|
|
stat, ok := status.FromError(err)
|
|
if !ok {
|
|
@@ -554,7 +577,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
status: stat,
|
|
rst: !frame.StreamEnded(),
|
|
})
|
|
- return false
|
|
+ return nil
|
|
}
|
|
}
|
|
t.activeStreams[streamID] = s
|
|
@@ -578,7 +601,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
LocalAddr: t.localAddr,
|
|
Compression: s.recvCompress,
|
|
WireLength: int(frame.Header().Length),
|
|
- Header: metadata.MD(mdata).Copy(),
|
|
+ Header: mdata.Copy(),
|
|
}
|
|
sh.HandleRPC(s.ctx, inHeader)
|
|
}
|
|
@@ -601,7 +624,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|
wq: s.wq,
|
|
})
|
|
handle(s)
|
|
- return false
|
|
+ return nil
|
|
}
|
|
|
|
// HandleStreams receives incoming streams using the given handler. This is
|
|
@@ -615,8 +638,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
|
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
|
if err != nil {
|
|
if se, ok := err.(http2.StreamError); ok {
|
|
- if logger.V(logLevel) {
|
|
- logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Warningf("Encountered http2.StreamError: %v", se)
|
|
}
|
|
t.mu.Lock()
|
|
s := t.activeStreams[se.StreamID]
|
|
@@ -634,19 +657,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
|
continue
|
|
}
|
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
|
- t.Close()
|
|
+ t.Close(err)
|
|
return
|
|
}
|
|
- if logger.V(logLevel) {
|
|
- logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
|
- }
|
|
- t.Close()
|
|
+ t.Close(err)
|
|
return
|
|
}
|
|
switch frame := frame.(type) {
|
|
case *http2.MetaHeadersFrame:
|
|
- if t.operateHeaders(frame, handle, traceCtx) {
|
|
- t.Close()
|
|
+ if err := t.operateHeaders(frame, handle, traceCtx); err != nil {
|
|
+ t.Close(err)
|
|
break
|
|
}
|
|
case *http2.DataFrame:
|
|
@@ -662,8 +682,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
|
case *http2.GoAwayFrame:
|
|
// TODO: Handle GoAway from the client appropriately.
|
|
default:
|
|
- if logger.V(logLevel) {
|
|
- logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Received unsupported frame type %T", frame)
|
|
}
|
|
}
|
|
}
|
|
@@ -847,8 +867,8 @@ const (
|
|
|
|
func (t *http2Server) handlePing(f *http2.PingFrame) {
|
|
if f.IsAck() {
|
|
- if f.Data == goAwayPing.data && t.drainChan != nil {
|
|
- close(t.drainChan)
|
|
+ if f.Data == goAwayPing.data && t.drainEvent != nil {
|
|
+ t.drainEvent.Fire()
|
|
return
|
|
}
|
|
// Maybe it's a BDP ping.
|
|
@@ -890,10 +910,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
|
|
|
|
if t.pingStrikes > maxPingStrikes {
|
|
// Send goaway and close the connection.
|
|
- if logger.V(logLevel) {
|
|
- logger.Errorf("transport: Got too many pings from the client, closing the connection.")
|
|
- }
|
|
- t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
|
|
+ t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
|
|
}
|
|
}
|
|
|
|
@@ -925,8 +942,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
|
var sz int64
|
|
for _, f := range hdrFrame.hf {
|
|
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
|
|
- if logger.V(logLevel) {
|
|
- logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
|
}
|
|
return false
|
|
}
|
|
@@ -1039,7 +1056,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
|
stBytes, err := proto.Marshal(p)
|
|
if err != nil {
|
|
// TODO: return error instead, when callers are able to handle it.
|
|
- logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
|
|
+ t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
|
|
} else {
|
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
|
|
}
|
|
@@ -1144,20 +1161,20 @@ func (t *http2Server) keepalive() {
|
|
if val <= 0 {
|
|
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
|
|
// Gracefully close the connection.
|
|
- t.Drain()
|
|
+ t.Drain("max_idle")
|
|
return
|
|
}
|
|
idleTimer.Reset(val)
|
|
case <-ageTimer.C:
|
|
- t.Drain()
|
|
+ t.Drain("max_age")
|
|
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
|
|
select {
|
|
case <-ageTimer.C:
|
|
// Close the connection after grace period.
|
|
- if logger.V(logLevel) {
|
|
- logger.Infof("transport: closing server transport due to maximum connection age.")
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Closing server transport due to maximum connection age")
|
|
}
|
|
- t.Close()
|
|
+ t.controlBuf.put(closeConnection{})
|
|
case <-t.done:
|
|
}
|
|
return
|
|
@@ -1173,10 +1190,7 @@ func (t *http2Server) keepalive() {
|
|
continue
|
|
}
|
|
if outstandingPing && kpTimeoutLeft <= 0 {
|
|
- if logger.V(logLevel) {
|
|
- logger.Infof("transport: closing server transport due to idleness.")
|
|
- }
|
|
- t.Close()
|
|
+ t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
|
|
return
|
|
}
|
|
if !outstandingPing {
|
|
@@ -1203,20 +1217,23 @@ func (t *http2Server) keepalive() {
|
|
// Close starts shutting down the http2Server transport.
|
|
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
|
|
// could cause some resource issue. Revisit this later.
|
|
-func (t *http2Server) Close() {
|
|
+func (t *http2Server) Close(err error) {
|
|
t.mu.Lock()
|
|
if t.state == closing {
|
|
t.mu.Unlock()
|
|
return
|
|
}
|
|
+ if t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Closing: %v", err)
|
|
+ }
|
|
t.state = closing
|
|
streams := t.activeStreams
|
|
t.activeStreams = nil
|
|
t.mu.Unlock()
|
|
t.controlBuf.finish()
|
|
close(t.done)
|
|
- if err := t.conn.Close(); err != nil && logger.V(logLevel) {
|
|
- logger.Infof("transport: error closing conn during Close: %v", err)
|
|
+ if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
|
|
+ t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
|
|
}
|
|
channelz.RemoveEntry(t.channelzID)
|
|
// Cancel all active streams.
|
|
@@ -1296,14 +1313,14 @@ func (t *http2Server) RemoteAddr() net.Addr {
|
|
return t.remoteAddr
|
|
}
|
|
|
|
-func (t *http2Server) Drain() {
|
|
+func (t *http2Server) Drain(debugData string) {
|
|
t.mu.Lock()
|
|
defer t.mu.Unlock()
|
|
- if t.drainChan != nil {
|
|
+ if t.drainEvent != nil {
|
|
return
|
|
}
|
|
- t.drainChan = make(chan struct{})
|
|
- t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
|
|
+ t.drainEvent = grpcsync.NewEvent()
|
|
+ t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true})
|
|
}
|
|
|
|
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
|
|
@@ -1323,19 +1340,17 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
|
// Stop accepting more streams now.
|
|
t.state = draining
|
|
sid := t.maxStreamID
|
|
+ retErr := g.closeConn
|
|
if len(t.activeStreams) == 0 {
|
|
- g.closeConn = true
|
|
+ retErr = errors.New("second GOAWAY written and no active streams left to process")
|
|
}
|
|
t.mu.Unlock()
|
|
t.maxStreamMu.Unlock()
|
|
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
|
|
return false, err
|
|
}
|
|
- if g.closeConn {
|
|
- // Abruptly close the connection following the GoAway (via
|
|
- // loopywriter). But flush out what's inside the buffer first.
|
|
- t.framer.writer.Flush()
|
|
- return false, fmt.Errorf("transport: Connection closing")
|
|
+ if retErr != nil {
|
|
+ return false, retErr
|
|
}
|
|
return true, nil
|
|
}
|
|
@@ -1347,7 +1362,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
|
// originated before the GoAway reaches the client.
|
|
// After getting the ack or timer expiration send out another GoAway this
|
|
// time with an ID of the max stream server intends to process.
|
|
- if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
|
|
+ if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil {
|
|
return false, err
|
|
}
|
|
if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
|
|
@@ -1357,7 +1372,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
|
timer := time.NewTimer(time.Minute)
|
|
defer timer.Stop()
|
|
select {
|
|
- case <-t.drainChan:
|
|
+ case <-t.drainEvent.Done():
|
|
case <-timer.C:
|
|
case <-t.done:
|
|
return
|
|
@@ -1416,6 +1431,13 @@ func (t *http2Server) getOutFlowWindow() int64 {
|
|
}
|
|
}
|
|
|
|
+func (t *http2Server) getPeer() *peer.Peer {
|
|
+ return &peer.Peer{
|
|
+ Addr: t.remoteAddr,
|
|
+ AuthInfo: t.authInfo, // Can be nil
|
|
+ }
|
|
+}
|
|
+
|
|
func getJitter(v time.Duration) time.Duration {
|
|
if v == infinity {
|
|
return 0
|
|
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
|
|
index 56e95788d..19cbb18f5 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
|
|
@@ -20,8 +20,8 @@ package transport
|
|
|
|
import (
|
|
"bufio"
|
|
- "bytes"
|
|
"encoding/base64"
|
|
+ "errors"
|
|
"fmt"
|
|
"io"
|
|
"math"
|
|
@@ -38,14 +38,13 @@ import (
|
|
"golang.org/x/net/http2/hpack"
|
|
spb "google.golang.org/genproto/googleapis/rpc/status"
|
|
"google.golang.org/grpc/codes"
|
|
- "google.golang.org/grpc/grpclog"
|
|
"google.golang.org/grpc/status"
|
|
)
|
|
|
|
const (
|
|
// http2MaxFrameLen specifies the max length of a HTTP2 frame.
|
|
http2MaxFrameLen = 16384 // 16KB frame
|
|
- // http://http2.github.io/http2-spec/#SettingValues
|
|
+ // https://httpwg.org/specs/rfc7540.html#SettingValues
|
|
http2InitHeaderTableSize = 4096
|
|
)
|
|
|
|
@@ -86,7 +85,6 @@ var (
|
|
// 504 Gateway timeout - UNAVAILABLE.
|
|
http.StatusGatewayTimeout: codes.Unavailable,
|
|
}
|
|
- logger = grpclog.Component("transport")
|
|
)
|
|
|
|
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
|
@@ -251,13 +249,13 @@ func encodeGrpcMessage(msg string) string {
|
|
}
|
|
|
|
func encodeGrpcMessageUnchecked(msg string) string {
|
|
- var buf bytes.Buffer
|
|
+ var sb strings.Builder
|
|
for len(msg) > 0 {
|
|
r, size := utf8.DecodeRuneInString(msg)
|
|
for _, b := range []byte(string(r)) {
|
|
if size > 1 {
|
|
// If size > 1, r is not ascii. Always do percent encoding.
|
|
- buf.WriteString(fmt.Sprintf("%%%02X", b))
|
|
+ fmt.Fprintf(&sb, "%%%02X", b)
|
|
continue
|
|
}
|
|
|
|
@@ -266,14 +264,14 @@ func encodeGrpcMessageUnchecked(msg string) string {
|
|
//
|
|
// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
|
|
if b >= spaceByte && b <= tildeByte && b != percentByte {
|
|
- buf.WriteByte(b)
|
|
+ sb.WriteByte(b)
|
|
} else {
|
|
- buf.WriteString(fmt.Sprintf("%%%02X", b))
|
|
+ fmt.Fprintf(&sb, "%%%02X", b)
|
|
}
|
|
}
|
|
msg = msg[size:]
|
|
}
|
|
- return buf.String()
|
|
+ return sb.String()
|
|
}
|
|
|
|
// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
|
|
@@ -291,23 +289,23 @@ func decodeGrpcMessage(msg string) string {
|
|
}
|
|
|
|
func decodeGrpcMessageUnchecked(msg string) string {
|
|
- var buf bytes.Buffer
|
|
+ var sb strings.Builder
|
|
lenMsg := len(msg)
|
|
for i := 0; i < lenMsg; i++ {
|
|
c := msg[i]
|
|
if c == percentByte && i+2 < lenMsg {
|
|
parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
|
|
if err != nil {
|
|
- buf.WriteByte(c)
|
|
+ sb.WriteByte(c)
|
|
} else {
|
|
- buf.WriteByte(byte(parsed))
|
|
+ sb.WriteByte(byte(parsed))
|
|
i += 2
|
|
}
|
|
} else {
|
|
- buf.WriteByte(c)
|
|
+ sb.WriteByte(c)
|
|
}
|
|
}
|
|
- return buf.String()
|
|
+ return sb.String()
|
|
}
|
|
|
|
type bufWriter struct {
|
|
@@ -331,7 +329,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) {
|
|
return 0, w.err
|
|
}
|
|
if w.batchSize == 0 { // Buffer has been disabled.
|
|
- return w.conn.Write(b)
|
|
+ n, err = w.conn.Write(b)
|
|
+ return n, toIOError(err)
|
|
}
|
|
for len(b) > 0 {
|
|
nn := copy(w.buf[w.offset:], b)
|
|
@@ -353,10 +352,30 @@ func (w *bufWriter) Flush() error {
|
|
return nil
|
|
}
|
|
_, w.err = w.conn.Write(w.buf[:w.offset])
|
|
+ w.err = toIOError(w.err)
|
|
w.offset = 0
|
|
return w.err
|
|
}
|
|
|
|
+type ioError struct {
|
|
+ error
|
|
+}
|
|
+
|
|
+func (i ioError) Unwrap() error {
|
|
+ return i.error
|
|
+}
|
|
+
|
|
+func isIOError(err error) bool {
|
|
+ return errors.As(err, &ioError{})
|
|
+}
|
|
+
|
|
+func toIOError(err error) error {
|
|
+ if err == nil {
|
|
+ return nil
|
|
+ }
|
|
+ return ioError{error: err}
|
|
+}
|
|
+
|
|
type framer struct {
|
|
writer *bufWriter
|
|
fr *http2.Framer
|
|
diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go
|
|
new file mode 100644
|
|
index 000000000..42ed2b07a
|
|
--- /dev/null
|
|
+++ b/vendor/google.golang.org/grpc/internal/transport/logging.go
|
|
@@ -0,0 +1,40 @@
|
|
+/*
|
|
+ *
|
|
+ * Copyright 2023 gRPC authors.
|
|
+ *
|
|
+ * Licensed under the Apache License, Version 2.0 (the "License");
|
|
+ * you may not use this file except in compliance with the License.
|
|
+ * You may obtain a copy of the License at
|
|
+ *
|
|
+ * http://www.apache.org/licenses/LICENSE-2.0
|
|
+ *
|
|
+ * Unless required by applicable law or agreed to in writing, software
|
|
+ * distributed under the License is distributed on an "AS IS" BASIS,
|
|
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
+ * See the License for the specific language governing permissions and
|
|
+ * limitations under the License.
|
|
+ *
|
|
+ */
|
|
+
|
|
+package transport
|
|
+
|
|
+import (
|
|
+ "fmt"
|
|
+
|
|
+ "google.golang.org/grpc/grpclog"
|
|
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
|
+)
|
|
+
|
|
+var logger = grpclog.Component("transport")
|
|
+
|
|
+func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger {
|
|
+ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p))
|
|
+}
|
|
+
|
|
+func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger {
|
|
+ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p))
|
|
+}
|
|
+
|
|
+func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger {
|
|
+ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p))
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
|
|
index 6c3ba8515..aa1c89659 100644
|
|
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
|
|
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
|
|
@@ -43,6 +43,10 @@ import (
|
|
"google.golang.org/grpc/tap"
|
|
)
|
|
|
|
+// ErrNoHeaders is used as a signal that a trailers only response was received,
|
|
+// and is not a real error.
|
|
+var ErrNoHeaders = errors.New("stream has no headers")
|
|
+
|
|
const logLevel = 2
|
|
|
|
type bufferPool struct {
|
|
@@ -253,6 +257,9 @@ type Stream struct {
|
|
fc *inFlow
|
|
wq *writeQuota
|
|
|
|
+ // Holds compressor names passed in grpc-accept-encoding metadata from the
|
|
+ // client. This is empty for the client side stream.
|
|
+ clientAdvertisedCompressors string
|
|
// Callback to state application's intentions to read data. This
|
|
// is used to adjust flow control, if needed.
|
|
requestRead func(int)
|
|
@@ -341,8 +348,24 @@ func (s *Stream) RecvCompress() string {
|
|
}
|
|
|
|
// SetSendCompress sets the compression algorithm to the stream.
|
|
-func (s *Stream) SetSendCompress(str string) {
|
|
- s.sendCompress = str
|
|
+func (s *Stream) SetSendCompress(name string) error {
|
|
+ if s.isHeaderSent() || s.getState() == streamDone {
|
|
+ return errors.New("transport: set send compressor called after headers sent or stream done")
|
|
+ }
|
|
+
|
|
+ s.sendCompress = name
|
|
+ return nil
|
|
+}
|
|
+
|
|
+// SendCompress returns the send compressor name.
|
|
+func (s *Stream) SendCompress() string {
|
|
+ return s.sendCompress
|
|
+}
|
|
+
|
|
+// ClientAdvertisedCompressors returns the compressor names advertised by the
|
|
+// client via grpc-accept-encoding header.
|
|
+func (s *Stream) ClientAdvertisedCompressors() string {
|
|
+ return s.clientAdvertisedCompressors
|
|
}
|
|
|
|
// Done returns a channel which is closed when it receives the final status
|
|
@@ -366,9 +389,15 @@ func (s *Stream) Header() (metadata.MD, error) {
|
|
return s.header.Copy(), nil
|
|
}
|
|
s.waitOnHeader()
|
|
+
|
|
if !s.headerValid {
|
|
return nil, s.status.Err()
|
|
}
|
|
+
|
|
+ if s.noHeaders {
|
|
+ return nil, ErrNoHeaders
|
|
+ }
|
|
+
|
|
return s.header.Copy(), nil
|
|
}
|
|
|
|
@@ -573,8 +602,8 @@ type ConnectOptions struct {
|
|
|
|
// NewClientTransport establishes the transport with the required ConnectOptions
|
|
// and returns it to the caller.
|
|
-func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
|
- return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose)
|
|
+func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
|
|
+ return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
|
|
}
|
|
|
|
// Options provides additional hints and information for message
|
|
@@ -691,13 +720,13 @@ type ServerTransport interface {
|
|
// Close tears down the transport. Once it is called, the transport
|
|
// should not be accessed any more. All the pending streams and their
|
|
// handlers will be terminated asynchronously.
|
|
- Close()
|
|
+ Close(err error)
|
|
|
|
// RemoteAddr returns the remote network address.
|
|
RemoteAddr() net.Addr
|
|
|
|
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
|
- Drain()
|
|
+ Drain(debugData string)
|
|
|
|
// IncrMsgSent increments the number of message sent through this transport.
|
|
IncrMsgSent()
|
|
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
|
|
index 8e0f6abe8..a2cdcaf12 100644
|
|
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
|
|
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
|
|
@@ -41,16 +41,17 @@ type MD map[string][]string
|
|
// New creates an MD from a given key-value map.
|
|
//
|
|
// Only the following ASCII characters are allowed in keys:
|
|
-// - digits: 0-9
|
|
-// - uppercase letters: A-Z (normalized to lower)
|
|
-// - lowercase letters: a-z
|
|
-// - special characters: -_.
|
|
+// - digits: 0-9
|
|
+// - uppercase letters: A-Z (normalized to lower)
|
|
+// - lowercase letters: a-z
|
|
+// - special characters: -_.
|
|
+//
|
|
// Uppercase letters are automatically converted to lowercase.
|
|
//
|
|
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
|
|
// result in errors if set in metadata.
|
|
func New(m map[string]string) MD {
|
|
- md := MD{}
|
|
+ md := make(MD, len(m))
|
|
for k, val := range m {
|
|
key := strings.ToLower(k)
|
|
md[key] = append(md[key], val)
|
|
@@ -62,10 +63,11 @@ func New(m map[string]string) MD {
|
|
// Pairs panics if len(kv) is odd.
|
|
//
|
|
// Only the following ASCII characters are allowed in keys:
|
|
-// - digits: 0-9
|
|
-// - uppercase letters: A-Z (normalized to lower)
|
|
-// - lowercase letters: a-z
|
|
-// - special characters: -_.
|
|
+// - digits: 0-9
|
|
+// - uppercase letters: A-Z (normalized to lower)
|
|
+// - lowercase letters: a-z
|
|
+// - special characters: -_.
|
|
+//
|
|
// Uppercase letters are automatically converted to lowercase.
|
|
//
|
|
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
|
|
@@ -74,7 +76,7 @@ func Pairs(kv ...string) MD {
|
|
if len(kv)%2 == 1 {
|
|
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
|
|
}
|
|
- md := MD{}
|
|
+ md := make(MD, len(kv)/2)
|
|
for i := 0; i < len(kv); i += 2 {
|
|
key := strings.ToLower(kv[i])
|
|
md[key] = append(md[key], kv[i+1])
|
|
@@ -89,7 +91,11 @@ func (md MD) Len() int {
|
|
|
|
// Copy returns a copy of md.
|
|
func (md MD) Copy() MD {
|
|
- return Join(md)
|
|
+ out := make(MD, len(md))
|
|
+ for k, v := range md {
|
|
+ out[k] = copyOf(v)
|
|
+ }
|
|
+ return out
|
|
}
|
|
|
|
// Get obtains the values for a given key.
|
|
@@ -169,8 +175,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context
|
|
md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
|
|
added := make([][]string, len(md.added)+1)
|
|
copy(added, md.added)
|
|
- added[len(added)-1] = make([]string, len(kv))
|
|
- copy(added[len(added)-1], kv)
|
|
+ kvCopy := make([]string, 0, len(kv))
|
|
+ for i := 0; i < len(kv); i += 2 {
|
|
+ kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
|
|
+ }
|
|
+ added[len(added)-1] = kvCopy
|
|
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
|
|
}
|
|
|
|
@@ -182,19 +191,51 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
|
|
if !ok {
|
|
return nil, false
|
|
}
|
|
- out := MD{}
|
|
+ out := make(MD, len(md))
|
|
for k, v := range md {
|
|
// We need to manually convert all keys to lower case, because MD is a
|
|
// map, and there's no guarantee that the MD attached to the context is
|
|
// created using our helper functions.
|
|
key := strings.ToLower(k)
|
|
- s := make([]string, len(v))
|
|
- copy(s, v)
|
|
- out[key] = s
|
|
+ out[key] = copyOf(v)
|
|
}
|
|
return out, true
|
|
}
|
|
|
|
+// ValueFromIncomingContext returns the metadata value corresponding to the metadata
|
|
+// key from the incoming metadata if it exists. Key must be lower-case.
|
|
+//
|
|
+// # Experimental
|
|
+//
|
|
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
+// later release.
|
|
+func ValueFromIncomingContext(ctx context.Context, key string) []string {
|
|
+ md, ok := ctx.Value(mdIncomingKey{}).(MD)
|
|
+ if !ok {
|
|
+ return nil
|
|
+ }
|
|
+
|
|
+ if v, ok := md[key]; ok {
|
|
+ return copyOf(v)
|
|
+ }
|
|
+ for k, v := range md {
|
|
+ // We need to manually convert all keys to lower case, because MD is a
|
|
+ // map, and there's no guarantee that the MD attached to the context is
|
|
+ // created using our helper functions.
|
|
+ if strings.ToLower(k) == key {
|
|
+ return copyOf(v)
|
|
+ }
|
|
+ }
|
|
+ return nil
|
|
+}
|
|
+
|
|
+// the returned slice must not be modified in place
|
|
+func copyOf(v []string) []string {
|
|
+ vals := make([]string, len(v))
|
|
+ copy(vals, v)
|
|
+ return vals
|
|
+}
|
|
+
|
|
// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
|
|
//
|
|
// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
|
|
@@ -222,15 +263,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) {
|
|
return nil, false
|
|
}
|
|
|
|
- out := MD{}
|
|
+ mdSize := len(raw.md)
|
|
+ for i := range raw.added {
|
|
+ mdSize += len(raw.added[i]) / 2
|
|
+ }
|
|
+
|
|
+ out := make(MD, mdSize)
|
|
for k, v := range raw.md {
|
|
// We need to manually convert all keys to lower case, because MD is a
|
|
// map, and there's no guarantee that the MD attached to the context is
|
|
// created using our helper functions.
|
|
key := strings.ToLower(k)
|
|
- s := make([]string, len(v))
|
|
- copy(s, v)
|
|
- out[key] = s
|
|
+ out[key] = copyOf(v)
|
|
}
|
|
for _, added := range raw.added {
|
|
if len(added)%2 == 1 {
|
|
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
|
|
index 843633c91..02f975951 100644
|
|
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
|
|
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
|
|
@@ -26,6 +26,7 @@ import (
|
|
"google.golang.org/grpc/balancer"
|
|
"google.golang.org/grpc/codes"
|
|
"google.golang.org/grpc/internal/channelz"
|
|
+ istatus "google.golang.org/grpc/internal/status"
|
|
"google.golang.org/grpc/internal/transport"
|
|
"google.golang.org/grpc/status"
|
|
)
|
|
@@ -35,6 +36,7 @@ import (
|
|
type pickerWrapper struct {
|
|
mu sync.Mutex
|
|
done bool
|
|
+ idle bool
|
|
blockingCh chan struct{}
|
|
picker balancer.Picker
|
|
}
|
|
@@ -46,7 +48,11 @@ func newPickerWrapper() *pickerWrapper {
|
|
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
|
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
|
pw.mu.Lock()
|
|
- if pw.done {
|
|
+ if pw.done || pw.idle {
|
|
+ // There is a small window where a picker update from the LB policy can
|
|
+ // race with the channel going to idle mode. If the picker is idle here,
|
|
+ // it is because the channel asked it to do so, and therefore it is sage
|
|
+ // to ignore the update from the LB policy.
|
|
pw.mu.Unlock()
|
|
return
|
|
}
|
|
@@ -57,12 +63,16 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
|
pw.mu.Unlock()
|
|
}
|
|
|
|
-func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
|
|
- acw.mu.Lock()
|
|
- ac := acw.ac
|
|
- acw.mu.Unlock()
|
|
+// doneChannelzWrapper performs the following:
|
|
+// - increments the calls started channelz counter
|
|
+// - wraps the done function in the passed in result to increment the calls
|
|
+// failed or calls succeeded channelz counter before invoking the actual
|
|
+// done function.
|
|
+func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
|
|
+ ac := acbw.ac
|
|
ac.incrCallsStarted()
|
|
- return func(b balancer.DoneInfo) {
|
|
+ done := result.Done
|
|
+ result.Done = func(b balancer.DoneInfo) {
|
|
if b.Err != nil && b.Err != io.EOF {
|
|
ac.incrCallsFailed()
|
|
} else {
|
|
@@ -81,7 +91,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f
|
|
// - the current picker returns other errors and failfast is false.
|
|
// - the subConn returned by the current picker is not READY
|
|
// When one of these situations happens, pick blocks until the picker gets updated.
|
|
-func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
|
+func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) {
|
|
var ch chan struct{}
|
|
|
|
var lastPickErr error
|
|
@@ -89,7 +99,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
|
pw.mu.Lock()
|
|
if pw.done {
|
|
pw.mu.Unlock()
|
|
- return nil, nil, ErrClientConnClosing
|
|
+ return nil, balancer.PickResult{}, ErrClientConnClosing
|
|
}
|
|
|
|
if pw.picker == nil {
|
|
@@ -110,9 +120,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
|
}
|
|
switch ctx.Err() {
|
|
case context.DeadlineExceeded:
|
|
- return nil, nil, status.Error(codes.DeadlineExceeded, errStr)
|
|
+ return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr)
|
|
case context.Canceled:
|
|
- return nil, nil, status.Error(codes.Canceled, errStr)
|
|
+ return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr)
|
|
}
|
|
case <-ch:
|
|
}
|
|
@@ -124,14 +134,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
|
pw.mu.Unlock()
|
|
|
|
pickResult, err := p.Pick(info)
|
|
-
|
|
if err != nil {
|
|
if err == balancer.ErrNoSubConnAvailable {
|
|
continue
|
|
}
|
|
- if _, ok := status.FromError(err); ok {
|
|
+ if st, ok := status.FromError(err); ok {
|
|
// Status error: end the RPC unconditionally with this status.
|
|
- return nil, nil, dropError{error: err}
|
|
+ // First restrict the code to the list allowed by gRFC A54.
|
|
+ if istatus.IsRestrictedControlPlaneCode(st) {
|
|
+ err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
|
|
+ }
|
|
+ return nil, balancer.PickResult{}, dropError{error: err}
|
|
}
|
|
// For all other errors, wait for ready RPCs should block and other
|
|
// RPCs should fail with unavailable.
|
|
@@ -139,19 +152,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
|
lastPickErr = err
|
|
continue
|
|
}
|
|
- return nil, nil, status.Error(codes.Unavailable, err.Error())
|
|
+ return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
|
|
}
|
|
|
|
- acw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
|
+ acbw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
|
if !ok {
|
|
logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)
|
|
continue
|
|
}
|
|
- if t := acw.getAddrConn().getReadyTransport(); t != nil {
|
|
+ if t := acbw.ac.getReadyTransport(); t != nil {
|
|
if channelz.IsOn() {
|
|
- return t, doneChannelzWrapper(acw, pickResult.Done), nil
|
|
+ doneChannelzWrapper(acbw, &pickResult)
|
|
+ return t, pickResult, nil
|
|
}
|
|
- return t, pickResult.Done, nil
|
|
+ return t, pickResult, nil
|
|
}
|
|
if pickResult.Done != nil {
|
|
// Calling done with nil error, no bytes sent and no bytes received.
|
|
@@ -176,6 +190,25 @@ func (pw *pickerWrapper) close() {
|
|
close(pw.blockingCh)
|
|
}
|
|
|
|
+func (pw *pickerWrapper) enterIdleMode() {
|
|
+ pw.mu.Lock()
|
|
+ defer pw.mu.Unlock()
|
|
+ if pw.done {
|
|
+ return
|
|
+ }
|
|
+ pw.idle = true
|
|
+}
|
|
+
|
|
+func (pw *pickerWrapper) exitIdleMode() {
|
|
+ pw.mu.Lock()
|
|
+ defer pw.mu.Unlock()
|
|
+ if pw.done {
|
|
+ return
|
|
+ }
|
|
+ pw.blockingCh = make(chan struct{})
|
|
+ pw.idle = false
|
|
+}
|
|
+
|
|
// dropError is a wrapper error that indicates the LB policy wishes to drop the
|
|
// RPC and not retry it.
|
|
type dropError struct {
|
|
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
|
|
index fb7a99e0a..abe266b02 100644
|
|
--- a/vendor/google.golang.org/grpc/pickfirst.go
|
|
+++ b/vendor/google.golang.org/grpc/pickfirst.go
|
|
@@ -19,11 +19,15 @@
|
|
package grpc
|
|
|
|
import (
|
|
+ "encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
|
|
"google.golang.org/grpc/balancer"
|
|
"google.golang.org/grpc/connectivity"
|
|
+ "google.golang.org/grpc/internal/envconfig"
|
|
+ "google.golang.org/grpc/internal/grpcrand"
|
|
+ "google.golang.org/grpc/serviceconfig"
|
|
)
|
|
|
|
// PickFirstBalancerName is the name of the pick_first balancer.
|
|
@@ -43,15 +47,33 @@ func (*pickfirstBuilder) Name() string {
|
|
return PickFirstBalancerName
|
|
}
|
|
|
|
+type pfConfig struct {
|
|
+ serviceconfig.LoadBalancingConfig `json:"-"`
|
|
+
|
|
+ // If set to true, instructs the LB policy to shuffle the order of the list
|
|
+ // of addresses received from the name resolver before attempting to
|
|
+ // connect to them.
|
|
+ ShuffleAddressList bool `json:"shuffleAddressList"`
|
|
+}
|
|
+
|
|
+func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
|
+ cfg := &pfConfig{}
|
|
+ if err := json.Unmarshal(js, cfg); err != nil {
|
|
+ return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
|
|
+ }
|
|
+ return cfg, nil
|
|
+}
|
|
+
|
|
type pickfirstBalancer struct {
|
|
state connectivity.State
|
|
cc balancer.ClientConn
|
|
subConn balancer.SubConn
|
|
+ cfg *pfConfig
|
|
}
|
|
|
|
func (b *pickfirstBalancer) ResolverError(err error) {
|
|
if logger.V(2) {
|
|
- logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
|
+ logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err)
|
|
}
|
|
if b.subConn == nil {
|
|
b.state = connectivity.TransientFailure
|
|
@@ -69,7 +91,8 @@ func (b *pickfirstBalancer) ResolverError(err error) {
|
|
}
|
|
|
|
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
|
- if len(state.ResolverState.Addresses) == 0 {
|
|
+ addrs := state.ResolverState.Addresses
|
|
+ if len(addrs) == 0 {
|
|
// The resolver reported an empty address list. Treat it like an error by
|
|
// calling b.ResolverError.
|
|
if b.subConn != nil {
|
|
@@ -82,12 +105,23 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
|
return balancer.ErrBadResolverState
|
|
}
|
|
|
|
+ if state.BalancerConfig != nil {
|
|
+ cfg, ok := state.BalancerConfig.(*pfConfig)
|
|
+ if !ok {
|
|
+ return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
|
|
+ }
|
|
+ b.cfg = cfg
|
|
+ }
|
|
+
|
|
+ if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList {
|
|
+ grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
|
|
+ }
|
|
if b.subConn != nil {
|
|
- b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses)
|
|
+ b.cc.UpdateAddresses(b.subConn, addrs)
|
|
return nil
|
|
}
|
|
|
|
- subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{})
|
|
+ subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
|
|
if err != nil {
|
|
if logger.V(2) {
|
|
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
|
@@ -102,8 +136,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
|
b.subConn = subConn
|
|
b.state = connectivity.Idle
|
|
b.cc.UpdateState(balancer.State{
|
|
- ConnectivityState: connectivity.Idle,
|
|
- Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}},
|
|
+ ConnectivityState: connectivity.Connecting,
|
|
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
|
})
|
|
b.subConn.Connect()
|
|
return nil
|
|
@@ -119,7 +153,6 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
|
|
}
|
|
return
|
|
}
|
|
- b.state = state.ConnectivityState
|
|
if state.ConnectivityState == connectivity.Shutdown {
|
|
b.subConn = nil
|
|
return
|
|
@@ -132,11 +165,21 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
|
|
Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
|
|
})
|
|
case connectivity.Connecting:
|
|
+ if b.state == connectivity.TransientFailure {
|
|
+ // We stay in TransientFailure until we are Ready. See A62.
|
|
+ return
|
|
+ }
|
|
b.cc.UpdateState(balancer.State{
|
|
ConnectivityState: state.ConnectivityState,
|
|
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
|
})
|
|
case connectivity.Idle:
|
|
+ if b.state == connectivity.TransientFailure {
|
|
+ // We stay in TransientFailure until we are Ready. Also kick the
|
|
+ // subConn out of Idle into Connecting. See A62.
|
|
+ b.subConn.Connect()
|
|
+ return
|
|
+ }
|
|
b.cc.UpdateState(balancer.State{
|
|
ConnectivityState: state.ConnectivityState,
|
|
Picker: &idlePicker{subConn: subConn},
|
|
@@ -147,6 +190,7 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
|
|
Picker: &picker{err: state.ConnectionError},
|
|
})
|
|
}
|
|
+ b.state = state.ConnectivityState
|
|
}
|
|
|
|
func (b *pickfirstBalancer) Close() {
|
|
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
|
|
index 0a1e975ad..cd4554785 100644
|
|
--- a/vendor/google.golang.org/grpc/preloader.go
|
|
+++ b/vendor/google.golang.org/grpc/preloader.go
|
|
@@ -25,7 +25,7 @@ import (
|
|
|
|
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
|
|
index 99db79faf..a6f26c8ab 100644
|
|
--- a/vendor/google.golang.org/grpc/regenerate.sh
|
|
+++ b/vendor/google.golang.org/grpc/regenerate.sh
|
|
@@ -57,7 +57,8 @@ LEGACY_SOURCES=(
|
|
${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
|
|
${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
|
|
profiling/proto/service.proto
|
|
- reflection/grpc_reflection_v1alpha/reflection.proto
|
|
+ ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
|
|
+ ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
|
|
)
|
|
|
|
# Generates only the new gRPC Service symbols
|
|
@@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/
|
|
# see grpc_testing_not_regenerate/README.md for details.
|
|
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
|
|
|
|
-# grpc/testing does not have a go_package option.
|
|
-mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/
|
|
-mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/
|
|
-
|
|
cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
|
|
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
|
|
index ca2e35a35..353c10b69 100644
|
|
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
|
|
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
|
|
@@ -22,12 +22,13 @@ package resolver
|
|
|
|
import (
|
|
"context"
|
|
+ "fmt"
|
|
"net"
|
|
"net/url"
|
|
+ "strings"
|
|
|
|
"google.golang.org/grpc/attributes"
|
|
"google.golang.org/grpc/credentials"
|
|
- "google.golang.org/grpc/internal/pretty"
|
|
"google.golang.org/grpc/serviceconfig"
|
|
)
|
|
|
|
@@ -40,8 +41,9 @@ var (
|
|
|
|
// TODO(bar) install dns resolver in init(){}.
|
|
|
|
-// Register registers the resolver builder to the resolver map. b.Scheme will be
|
|
-// used as the scheme registered with this builder.
|
|
+// Register registers the resolver builder to the resolver map. b.Scheme will
|
|
+// be used as the scheme registered with this builder. The registry is case
|
|
+// sensitive, and schemes should not contain any uppercase characters.
|
|
//
|
|
// NOTE: this function must only be called during initialization time (i.e. in
|
|
// an init() function), and is not thread-safe. If multiple Resolvers are
|
|
@@ -96,7 +98,7 @@ const (
|
|
|
|
// Address represents a server the client connects to.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -122,7 +124,7 @@ type Address struct {
|
|
Attributes *attributes.Attributes
|
|
|
|
// BalancerAttributes contains arbitrary data about this address intended
|
|
- // for consumption by the LB policy. These attribes do not affect SubConn
|
|
+ // for consumption by the LB policy. These attributes do not affect SubConn
|
|
// creation, connection establishment, handshaking, etc.
|
|
BalancerAttributes *attributes.Attributes
|
|
|
|
@@ -149,7 +151,17 @@ func (a Address) Equal(o Address) bool {
|
|
|
|
// String returns JSON formatted string representation of the address.
|
|
func (a Address) String() string {
|
|
- return pretty.ToJSON(a)
|
|
+ var sb strings.Builder
|
|
+ sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr))
|
|
+ sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName))
|
|
+ if a.Attributes != nil {
|
|
+ sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String()))
|
|
+ }
|
|
+ if a.BalancerAttributes != nil {
|
|
+ sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String()))
|
|
+ }
|
|
+ sb.WriteString("}")
|
|
+ return sb.String()
|
|
}
|
|
|
|
// BuildOptions includes additional information for the builder to create
|
|
@@ -202,6 +214,15 @@ type State struct {
|
|
// gRPC to add new methods to this interface.
|
|
type ClientConn interface {
|
|
// UpdateState updates the state of the ClientConn appropriately.
|
|
+ //
|
|
+ // If an error is returned, the resolver should try to resolve the
|
|
+ // target again. The resolver should use a backoff timer to prevent
|
|
+ // overloading the server with requests. If a resolver is certain that
|
|
+ // reresolving will not change the result, e.g. because it is
|
|
+ // a watch-based resolver, returned errors can be ignored.
|
|
+ //
|
|
+ // If the resolved State is the same as the last reported one, calling
|
|
+ // UpdateState can be omitted.
|
|
UpdateState(State) error
|
|
// ReportError notifies the ClientConn that the Resolver encountered an
|
|
// error. The ClientConn will notify the load balancer and begin calling
|
|
@@ -236,20 +257,17 @@ type ClientConn interface {
|
|
//
|
|
// Examples:
|
|
//
|
|
-// - "dns://some_authority/foo.bar"
|
|
-// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
|
|
-// - "foo.bar"
|
|
-// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
|
|
-// - "unknown_scheme://authority/endpoint"
|
|
-// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
|
|
+// - "dns://some_authority/foo.bar"
|
|
+// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
|
|
+// - "foo.bar"
|
|
+// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
|
|
+// - "unknown_scheme://authority/endpoint"
|
|
+// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
|
|
type Target struct {
|
|
// Deprecated: use URL.Scheme instead.
|
|
Scheme string
|
|
// Deprecated: use URL.Host instead.
|
|
Authority string
|
|
- // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when
|
|
- // the former is empty.
|
|
- Endpoint string
|
|
// URL contains the parsed dial target with an optional default scheme added
|
|
// to it if the original dial target contained no scheme or contained an
|
|
// unregistered scheme. Any query params specified in the original dial
|
|
@@ -257,6 +275,24 @@ type Target struct {
|
|
URL url.URL
|
|
}
|
|
|
|
+// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
|
|
+// or `URL.Opaque`. The latter is used when the former is empty.
|
|
+func (t Target) Endpoint() string {
|
|
+ endpoint := t.URL.Path
|
|
+ if endpoint == "" {
|
|
+ endpoint = t.URL.Opaque
|
|
+ }
|
|
+ // For targets of the form "[scheme]://[authority]/endpoint, the endpoint
|
|
+ // value returned from url.Parse() contains a leading "/". Although this is
|
|
+ // in accordance with RFC 3986, we do not want to break existing resolver
|
|
+ // implementations which expect the endpoint without the leading "/". So, we
|
|
+ // end up stripping the leading "/" here. But this will result in an
|
|
+ // incorrect parsing for something like "unix:///path/to/socket". Since we
|
|
+ // own the "unix" resolver, we can workaround in the unix resolver by using
|
|
+ // the `URL` field.
|
|
+ return strings.TrimPrefix(endpoint, "/")
|
|
+}
|
|
+
|
|
// Builder creates a resolver that will be used to watch name resolution updates.
|
|
type Builder interface {
|
|
// Build creates a new resolver for the given target.
|
|
@@ -264,8 +300,10 @@ type Builder interface {
|
|
// gRPC dial calls Build synchronously, and fails if the returned error is
|
|
// not nil.
|
|
Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
|
|
- // Scheme returns the scheme supported by this resolver.
|
|
- // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
|
+ // Scheme returns the scheme supported by this resolver. Scheme is defined
|
|
+ // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned
|
|
+ // string should not contain uppercase characters, as they will not match
|
|
+ // the parsed target's scheme as defined in RFC 3986.
|
|
Scheme() string
|
|
}
|
|
|
|
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
|
|
index 05a9d4e0b..b408b3688 100644
|
|
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
|
|
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
|
|
@@ -19,11 +19,11 @@
|
|
package grpc
|
|
|
|
import (
|
|
+ "context"
|
|
"strings"
|
|
"sync"
|
|
|
|
"google.golang.org/grpc/balancer"
|
|
- "google.golang.org/grpc/credentials"
|
|
"google.golang.org/grpc/internal/channelz"
|
|
"google.golang.org/grpc/internal/grpcsync"
|
|
"google.golang.org/grpc/internal/pretty"
|
|
@@ -31,129 +31,192 @@ import (
|
|
"google.golang.org/grpc/serviceconfig"
|
|
)
|
|
|
|
+// resolverStateUpdater wraps the single method used by ccResolverWrapper to
|
|
+// report a state update from the actual resolver implementation.
|
|
+type resolverStateUpdater interface {
|
|
+ updateResolverState(s resolver.State, err error) error
|
|
+}
|
|
+
|
|
// ccResolverWrapper is a wrapper on top of cc for resolvers.
|
|
// It implements resolver.ClientConn interface.
|
|
type ccResolverWrapper struct {
|
|
- cc *ClientConn
|
|
- resolverMu sync.Mutex
|
|
- resolver resolver.Resolver
|
|
- done *grpcsync.Event
|
|
- curState resolver.State
|
|
+ // The following fields are initialized when the wrapper is created and are
|
|
+ // read-only afterwards, and therefore can be accessed without a mutex.
|
|
+ cc resolverStateUpdater
|
|
+ channelzID *channelz.Identifier
|
|
+ ignoreServiceConfig bool
|
|
+ opts ccResolverWrapperOpts
|
|
+ serializer *grpcsync.CallbackSerializer // To serialize all incoming calls.
|
|
+ serializerCancel context.CancelFunc // To close the serializer, accessed only from close().
|
|
+
|
|
+ // All incoming (resolver --> gRPC) calls are guaranteed to execute in a
|
|
+ // mutually exclusive manner as they are scheduled on the serializer.
|
|
+ // Fields accessed *only* in these serializer callbacks, can therefore be
|
|
+ // accessed without a mutex.
|
|
+ curState resolver.State
|
|
+
|
|
+ // mu guards access to the below fields.
|
|
+ mu sync.Mutex
|
|
+ closed bool
|
|
+ resolver resolver.Resolver // Accessed only from outgoing calls.
|
|
+}
|
|
|
|
- incomingMu sync.Mutex // Synchronizes all the incoming calls.
|
|
+// ccResolverWrapperOpts wraps the arguments to be passed when creating a new
|
|
+// ccResolverWrapper.
|
|
+type ccResolverWrapperOpts struct {
|
|
+ target resolver.Target // User specified dial target to resolve.
|
|
+ builder resolver.Builder // Resolver builder to use.
|
|
+ bOpts resolver.BuildOptions // Resolver build options to use.
|
|
+ channelzID *channelz.Identifier // Channelz identifier for the channel.
|
|
}
|
|
|
|
// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
|
|
// returns a ccResolverWrapper object which wraps the newly built resolver.
|
|
-func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) {
|
|
+func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) {
|
|
+ ctx, cancel := context.WithCancel(context.Background())
|
|
ccr := &ccResolverWrapper{
|
|
- cc: cc,
|
|
- done: grpcsync.NewEvent(),
|
|
- }
|
|
-
|
|
- var credsClone credentials.TransportCredentials
|
|
- if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
|
- credsClone = creds.Clone()
|
|
- }
|
|
- rbo := resolver.BuildOptions{
|
|
- DisableServiceConfig: cc.dopts.disableServiceConfig,
|
|
- DialCreds: credsClone,
|
|
- CredsBundle: cc.dopts.copts.CredsBundle,
|
|
- Dialer: cc.dopts.copts.Dialer,
|
|
- }
|
|
-
|
|
- var err error
|
|
- // We need to hold the lock here while we assign to the ccr.resolver field
|
|
- // to guard against a data race caused by the following code path,
|
|
- // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
|
|
- // accessing ccr.resolver which is being assigned here.
|
|
- ccr.resolverMu.Lock()
|
|
- defer ccr.resolverMu.Unlock()
|
|
- ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
|
|
+ cc: cc,
|
|
+ channelzID: opts.channelzID,
|
|
+ ignoreServiceConfig: opts.bOpts.DisableServiceConfig,
|
|
+ opts: opts,
|
|
+ serializer: grpcsync.NewCallbackSerializer(ctx),
|
|
+ serializerCancel: cancel,
|
|
+ }
|
|
+
|
|
+ // Cannot hold the lock at build time because the resolver can send an
|
|
+ // update or error inline and these incoming calls grab the lock to schedule
|
|
+ // a callback in the serializer.
|
|
+ r, err := opts.builder.Build(opts.target, ccr, opts.bOpts)
|
|
if err != nil {
|
|
+ cancel()
|
|
return nil, err
|
|
}
|
|
+
|
|
+ // Any error reported by the resolver at build time that leads to a
|
|
+ // re-resolution request from the balancer is dropped by grpc until we
|
|
+ // return from this function. So, we don't have to handle pending resolveNow
|
|
+ // requests here.
|
|
+ ccr.mu.Lock()
|
|
+ ccr.resolver = r
|
|
+ ccr.mu.Unlock()
|
|
+
|
|
return ccr, nil
|
|
}
|
|
|
|
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
|
|
- ccr.resolverMu.Lock()
|
|
- if !ccr.done.HasFired() {
|
|
- ccr.resolver.ResolveNow(o)
|
|
+ ccr.mu.Lock()
|
|
+ defer ccr.mu.Unlock()
|
|
+
|
|
+ // ccr.resolver field is set only after the call to Build() returns. But in
|
|
+ // the process of building, the resolver may send an error update which when
|
|
+ // propagated to the balancer may result in a re-resolution request.
|
|
+ if ccr.closed || ccr.resolver == nil {
|
|
+ return
|
|
}
|
|
- ccr.resolverMu.Unlock()
|
|
+ ccr.resolver.ResolveNow(o)
|
|
}
|
|
|
|
func (ccr *ccResolverWrapper) close() {
|
|
- ccr.resolverMu.Lock()
|
|
- ccr.resolver.Close()
|
|
- ccr.done.Fire()
|
|
- ccr.resolverMu.Unlock()
|
|
+ ccr.mu.Lock()
|
|
+ if ccr.closed {
|
|
+ ccr.mu.Unlock()
|
|
+ return
|
|
+ }
|
|
+
|
|
+ channelz.Info(logger, ccr.channelzID, "Closing the name resolver")
|
|
+
|
|
+ // Close the serializer to ensure that no more calls from the resolver are
|
|
+ // handled, before actually closing the resolver.
|
|
+ ccr.serializerCancel()
|
|
+ ccr.closed = true
|
|
+ r := ccr.resolver
|
|
+ ccr.mu.Unlock()
|
|
+
|
|
+ // Give enqueued callbacks a chance to finish.
|
|
+ <-ccr.serializer.Done
|
|
+
|
|
+ // Spawn a goroutine to close the resolver (since it may block trying to
|
|
+ // cleanup all allocated resources) and return early.
|
|
+ go r.Close()
|
|
+}
|
|
+
|
|
+// serializerScheduleLocked is a convenience method to schedule a function to be
|
|
+// run on the serializer while holding ccr.mu.
|
|
+func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) {
|
|
+ ccr.mu.Lock()
|
|
+ ccr.serializer.Schedule(f)
|
|
+ ccr.mu.Unlock()
|
|
}
|
|
|
|
+// UpdateState is called by resolver implementations to report new state to gRPC
|
|
+// which includes addresses and service config.
|
|
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
|
|
- ccr.incomingMu.Lock()
|
|
- defer ccr.incomingMu.Unlock()
|
|
- if ccr.done.HasFired() {
|
|
+ errCh := make(chan error, 1)
|
|
+ ok := ccr.serializer.Schedule(func(context.Context) {
|
|
+ ccr.addChannelzTraceEvent(s)
|
|
+ ccr.curState = s
|
|
+ if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
|
|
+ errCh <- balancer.ErrBadResolverState
|
|
+ return
|
|
+ }
|
|
+ errCh <- nil
|
|
+ })
|
|
+ if !ok {
|
|
+ // The only time when Schedule() fail to add the callback to the
|
|
+ // serializer is when the serializer is closed, and this happens only
|
|
+ // when the resolver wrapper is closed.
|
|
return nil
|
|
}
|
|
- ccr.addChannelzTraceEvent(s)
|
|
- ccr.curState = s
|
|
- if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
|
|
- return balancer.ErrBadResolverState
|
|
- }
|
|
- return nil
|
|
+ return <-errCh
|
|
}
|
|
|
|
+// ReportError is called by resolver implementations to report errors
|
|
+// encountered during name resolution to gRPC.
|
|
func (ccr *ccResolverWrapper) ReportError(err error) {
|
|
- ccr.incomingMu.Lock()
|
|
- defer ccr.incomingMu.Unlock()
|
|
- if ccr.done.HasFired() {
|
|
- return
|
|
- }
|
|
- channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
|
|
- ccr.cc.updateResolverState(resolver.State{}, err)
|
|
+ ccr.serializerScheduleLocked(func(_ context.Context) {
|
|
+ channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
|
|
+ ccr.cc.updateResolverState(resolver.State{}, err)
|
|
+ })
|
|
}
|
|
|
|
-// NewAddress is called by the resolver implementation to send addresses to gRPC.
|
|
+// NewAddress is called by the resolver implementation to send addresses to
|
|
+// gRPC.
|
|
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
|
- ccr.incomingMu.Lock()
|
|
- defer ccr.incomingMu.Unlock()
|
|
- if ccr.done.HasFired() {
|
|
- return
|
|
- }
|
|
- ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
|
- ccr.curState.Addresses = addrs
|
|
- ccr.cc.updateResolverState(ccr.curState, nil)
|
|
+ ccr.serializerScheduleLocked(func(_ context.Context) {
|
|
+ ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
|
+ ccr.curState.Addresses = addrs
|
|
+ ccr.cc.updateResolverState(ccr.curState, nil)
|
|
+ })
|
|
}
|
|
|
|
// NewServiceConfig is called by the resolver implementation to send service
|
|
// configs to gRPC.
|
|
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
|
- ccr.incomingMu.Lock()
|
|
- defer ccr.incomingMu.Unlock()
|
|
- if ccr.done.HasFired() {
|
|
- return
|
|
- }
|
|
- channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc)
|
|
- if ccr.cc.dopts.disableServiceConfig {
|
|
- channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
|
|
- return
|
|
- }
|
|
- scpr := parseServiceConfig(sc)
|
|
- if scpr.Err != nil {
|
|
- channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
|
- return
|
|
- }
|
|
- ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
|
- ccr.curState.ServiceConfig = scpr
|
|
- ccr.cc.updateResolverState(ccr.curState, nil)
|
|
+ ccr.serializerScheduleLocked(func(_ context.Context) {
|
|
+ channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc)
|
|
+ if ccr.ignoreServiceConfig {
|
|
+ channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config")
|
|
+ return
|
|
+ }
|
|
+ scpr := parseServiceConfig(sc)
|
|
+ if scpr.Err != nil {
|
|
+ channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
|
+ return
|
|
+ }
|
|
+ ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
|
+ ccr.curState.ServiceConfig = scpr
|
|
+ ccr.cc.updateResolverState(ccr.curState, nil)
|
|
+ })
|
|
}
|
|
|
|
+// ParseServiceConfig is called by resolver implementations to parse a JSON
|
|
+// representation of the service config.
|
|
func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
|
|
return parseServiceConfig(scJSON)
|
|
}
|
|
|
|
+// addChannelzTraceEvent adds a channelz trace event containing the new
|
|
+// state received from resolver implementations.
|
|
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
|
var updates []string
|
|
var oldSC, newSC *ServiceConfig
|
|
@@ -172,5 +235,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
|
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
|
|
updates = append(updates, "resolver returned new addresses")
|
|
}
|
|
- channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
|
|
+ channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
|
|
}
|
|
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
|
|
index 5d407b004..2030736a3 100644
|
|
--- a/vendor/google.golang.org/grpc/rpc_util.go
|
|
+++ b/vendor/google.golang.org/grpc/rpc_util.go
|
|
@@ -25,7 +25,6 @@ import (
|
|
"encoding/binary"
|
|
"fmt"
|
|
"io"
|
|
- "io/ioutil"
|
|
"math"
|
|
"strings"
|
|
"sync"
|
|
@@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
|
|
return &gzipCompressor{
|
|
pool: sync.Pool{
|
|
New: func() interface{} {
|
|
- w, err := gzip.NewWriterLevel(ioutil.Discard, level)
|
|
+ w, err := gzip.NewWriterLevel(io.Discard, level)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
@@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
|
|
z.Close()
|
|
d.pool.Put(z)
|
|
}()
|
|
- return ioutil.ReadAll(z)
|
|
+ return io.ReadAll(z)
|
|
}
|
|
|
|
func (d *gzipDecompressor) Type() string {
|
|
@@ -160,6 +159,7 @@ type callInfo struct {
|
|
contentSubtype string
|
|
codec baseCodec
|
|
maxRetryRPCBufferSize int
|
|
+ onFinish []func(err error)
|
|
}
|
|
|
|
func defaultCallInfo() *callInfo {
|
|
@@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption {
|
|
// HeaderCallOption is a CallOption for collecting response header metadata.
|
|
// The metadata field will be populated *after* the RPC completes.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption {
|
|
// TrailerCallOption is a CallOption for collecting response trailer metadata.
|
|
// The metadata field will be populated *after* the RPC completes.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption {
|
|
// PeerCallOption is a CallOption for collecting the identity of the remote
|
|
// peer. The peer field will be populated *after* the RPC completes.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption {
|
|
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
|
|
// fast or not.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -296,8 +296,44 @@ func (o FailFastCallOption) before(c *callInfo) error {
|
|
}
|
|
func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
|
|
|
|
+// OnFinish returns a CallOption that configures a callback to be called when
|
|
+// the call completes. The error passed to the callback is the status of the
|
|
+// RPC, and may be nil. The onFinish callback provided will only be called once
|
|
+// by gRPC. This is mainly used to be used by streaming interceptors, to be
|
|
+// notified when the RPC completes along with information about the status of
|
|
+// the RPC.
|
|
+//
|
|
+// # Experimental
|
|
+//
|
|
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
+// later release.
|
|
+func OnFinish(onFinish func(err error)) CallOption {
|
|
+ return OnFinishCallOption{
|
|
+ OnFinish: onFinish,
|
|
+ }
|
|
+}
|
|
+
|
|
+// OnFinishCallOption is CallOption that indicates a callback to be called when
|
|
+// the call completes.
|
|
+//
|
|
+// # Experimental
|
|
+//
|
|
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
+// later release.
|
|
+type OnFinishCallOption struct {
|
|
+ OnFinish func(error)
|
|
+}
|
|
+
|
|
+func (o OnFinishCallOption) before(c *callInfo) error {
|
|
+ c.onFinish = append(c.onFinish, o.OnFinish)
|
|
+ return nil
|
|
+}
|
|
+
|
|
+func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {}
|
|
+
|
|
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
|
|
-// in bytes the client can receive.
|
|
+// in bytes the client can receive. If this is not set, gRPC uses the default
|
|
+// 4MB.
|
|
func MaxCallRecvMsgSize(bytes int) CallOption {
|
|
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
|
|
}
|
|
@@ -305,7 +341,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption {
|
|
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
|
|
// size in bytes the client can receive.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -320,7 +356,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
|
|
func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
|
|
|
|
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
|
|
-// in bytes the client can send.
|
|
+// in bytes the client can send. If this is not set, gRPC uses the default
|
|
+// `math.MaxInt32`.
|
|
func MaxCallSendMsgSize(bytes int) CallOption {
|
|
return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
|
|
}
|
|
@@ -328,7 +365,7 @@ func MaxCallSendMsgSize(bytes int) CallOption {
|
|
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
|
|
// size in bytes the client can send.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -351,7 +388,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
|
|
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
|
|
// credentials to use for the call.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -369,7 +406,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
|
|
// sending the request. If WithCompressor is also set, UseCompressor has
|
|
// higher priority.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -379,7 +416,7 @@ func UseCompressor(name string) CallOption {
|
|
|
|
// CompressorCallOption is a CallOption that indicates the compressor to use.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -416,7 +453,7 @@ func CallContentSubtype(contentSubtype string) CallOption {
|
|
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
|
|
// used for marshaling messages.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -444,7 +481,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
|
|
// This function is provided for advanced users; prefer to use only
|
|
// CallContentSubtype to select a registered codec instead.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -455,7 +492,7 @@ func ForceCodec(codec encoding.Codec) CallOption {
|
|
// ForceCodecCallOption is a CallOption that indicates the codec used for
|
|
// marshaling messages.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -480,7 +517,7 @@ func CallCustomCodec(codec Codec) CallOption {
|
|
// CustomCodecCallOption is a CallOption that indicates the codec used for
|
|
// marshaling messages.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -497,7 +534,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
|
|
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
|
|
// used for buffering this RPC's requests for retry purposes.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -508,7 +545,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption {
|
|
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
|
|
// memory to be used for caching this RPC for retry purposes.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
@@ -548,10 +585,11 @@ type parser struct {
|
|
// format. The caller owns the returned msg memory.
|
|
//
|
|
// If there is an error, possible values are:
|
|
-// * io.EOF, when no messages remain
|
|
-// * io.ErrUnexpectedEOF
|
|
-// * of type transport.ConnectionError
|
|
-// * an error from the status package
|
|
+// - io.EOF, when no messages remain
|
|
+// - io.ErrUnexpectedEOF
|
|
+// - of type transport.ConnectionError
|
|
+// - an error from the status package
|
|
+//
|
|
// No other error values or types must be returned, which also means
|
|
// that the underlying io.Reader must not return an incompatible
|
|
// error.
|
|
@@ -656,12 +694,13 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
|
|
|
|
func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
|
|
return &stats.OutPayload{
|
|
- Client: client,
|
|
- Payload: msg,
|
|
- Data: data,
|
|
- Length: len(data),
|
|
- WireLength: len(payload) + headerLen,
|
|
- SentTime: t,
|
|
+ Client: client,
|
|
+ Payload: msg,
|
|
+ Data: data,
|
|
+ Length: len(data),
|
|
+ WireLength: len(payload) + headerLen,
|
|
+ CompressedLength: len(payload),
|
|
+ SentTime: t,
|
|
}
|
|
}
|
|
|
|
@@ -682,7 +721,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
|
|
}
|
|
|
|
type payloadInfo struct {
|
|
- wireLength int // The compressed length got from wire.
|
|
+ compressedLength int // The compressed length got from wire.
|
|
uncompressedBytes []byte
|
|
}
|
|
|
|
@@ -692,7 +731,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
|
return nil, err
|
|
}
|
|
if payInfo != nil {
|
|
- payInfo.wireLength = len(d)
|
|
+ payInfo.compressedLength = len(d)
|
|
}
|
|
|
|
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
|
@@ -710,7 +749,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
|
d, size, err = decompress(compressor, d, maxReceiveMessageSize)
|
|
}
|
|
if err != nil {
|
|
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
|
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
|
}
|
|
if size > maxReceiveMessageSize {
|
|
// TODO: Revisit the error code. Currently keep it consistent with java
|
|
@@ -745,7 +784,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
|
|
}
|
|
// Read from LimitReader with limit max+1. So if the underlying
|
|
// reader is over limit, the result will be bigger than max.
|
|
- d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
|
+ d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
|
return d, len(d), err
|
|
}
|
|
|
|
@@ -758,7 +797,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf
|
|
return err
|
|
}
|
|
if err := c.Unmarshal(d, m); err != nil {
|
|
- return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
|
+ return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
|
|
}
|
|
if payInfo != nil {
|
|
payInfo.uncompressedBytes = d
|
|
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
|
|
index 2ad9da7bf..8869cc906 100644
|
|
--- a/vendor/google.golang.org/grpc/server.go
|
|
+++ b/vendor/google.golang.org/grpc/server.go
|
|
@@ -43,8 +43,8 @@ import (
|
|
"google.golang.org/grpc/internal"
|
|
"google.golang.org/grpc/internal/binarylog"
|
|
"google.golang.org/grpc/internal/channelz"
|
|
- "google.golang.org/grpc/internal/grpcrand"
|
|
"google.golang.org/grpc/internal/grpcsync"
|
|
+ "google.golang.org/grpc/internal/grpcutil"
|
|
"google.golang.org/grpc/internal/transport"
|
|
"google.golang.org/grpc/keepalive"
|
|
"google.golang.org/grpc/metadata"
|
|
@@ -73,12 +73,14 @@ func init() {
|
|
internal.DrainServerTransports = func(srv *Server, addr string) {
|
|
srv.drainServerTransports(addr)
|
|
}
|
|
- internal.AddExtraServerOptions = func(opt ...ServerOption) {
|
|
- extraServerOptions = opt
|
|
+ internal.AddGlobalServerOptions = func(opt ...ServerOption) {
|
|
+ globalServerOptions = append(globalServerOptions, opt...)
|
|
}
|
|
- internal.ClearExtraServerOptions = func() {
|
|
- extraServerOptions = nil
|
|
+ internal.ClearGlobalServerOptions = func() {
|
|
+ globalServerOptions = nil
|
|
}
|
|
+ internal.BinaryLogger = binaryLogger
|
|
+ internal.JoinServerOptions = newJoinServerOption
|
|
}
|
|
|
|
var statusOK = status.New(codes.OK, "")
|
|
@@ -113,12 +115,6 @@ type serviceInfo struct {
|
|
mdata interface{}
|
|
}
|
|
|
|
-type serverWorkerData struct {
|
|
- st transport.ServerTransport
|
|
- wg *sync.WaitGroup
|
|
- stream *transport.Stream
|
|
-}
|
|
-
|
|
// Server is a gRPC server to serve RPC requests.
|
|
type Server struct {
|
|
opts serverOptions
|
|
@@ -143,7 +139,7 @@ type Server struct {
|
|
channelzID *channelz.Identifier
|
|
czData *channelzData
|
|
|
|
- serverWorkerChannels []chan *serverWorkerData
|
|
+ serverWorkerChannel chan func()
|
|
}
|
|
|
|
type serverOptions struct {
|
|
@@ -155,6 +151,7 @@ type serverOptions struct {
|
|
streamInt StreamServerInterceptor
|
|
chainUnaryInts []UnaryServerInterceptor
|
|
chainStreamInts []StreamServerInterceptor
|
|
+ binaryLogger binarylog.Logger
|
|
inTapHandle tap.ServerInHandle
|
|
statsHandlers []stats.Handler
|
|
maxConcurrentStreams uint32
|
|
@@ -174,13 +171,14 @@ type serverOptions struct {
|
|
}
|
|
|
|
var defaultServerOptions = serverOptions{
|
|
+ maxConcurrentStreams: math.MaxUint32,
|
|
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
|
|
maxSendMessageSize: defaultServerMaxSendMessageSize,
|
|
connectionTimeout: 120 * time.Second,
|
|
writeBufferSize: defaultWriteBufSize,
|
|
readBufferSize: defaultReadBufSize,
|
|
}
|
|
-var extraServerOptions []ServerOption
|
|
+var globalServerOptions []ServerOption
|
|
|
|
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
|
|
type ServerOption interface {
|
|
@@ -214,10 +212,27 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
|
|
}
|
|
}
|
|
|
|
-// WriteBufferSize determines how much data can be batched before doing a write on the wire.
|
|
-// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
|
|
-// The default value for this buffer is 32KB.
|
|
-// Zero will disable the write buffer such that each write will be on underlying connection.
|
|
+// joinServerOption provides a way to combine arbitrary number of server
|
|
+// options into one.
|
|
+type joinServerOption struct {
|
|
+ opts []ServerOption
|
|
+}
|
|
+
|
|
+func (mdo *joinServerOption) apply(do *serverOptions) {
|
|
+ for _, opt := range mdo.opts {
|
|
+ opt.apply(do)
|
|
+ }
|
|
+}
|
|
+
|
|
+func newJoinServerOption(opts ...ServerOption) ServerOption {
|
|
+ return &joinServerOption{opts: opts}
|
|
+}
|
|
+
|
|
+// WriteBufferSize determines how much data can be batched before doing a write
|
|
+// on the wire. The corresponding memory allocation for this buffer will be
|
|
+// twice the size to keep syscalls low. The default value for this buffer is
|
|
+// 32KB. Zero or negative values will disable the write buffer such that each
|
|
+// write will be on underlying connection.
|
|
// Note: A Send call may not directly translate to a write.
|
|
func WriteBufferSize(s int) ServerOption {
|
|
return newFuncServerOption(func(o *serverOptions) {
|
|
@@ -225,11 +240,10 @@ func WriteBufferSize(s int) ServerOption {
|
|
})
|
|
}
|
|
|
|
-// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
|
|
-// for one read syscall.
|
|
-// The default value for this buffer is 32KB.
|
|
-// Zero will disable read buffer for a connection so data framer can access the underlying
|
|
-// conn directly.
|
|
+// ReadBufferSize lets you set the size of read buffer, this determines how much
|
|
+// data can be read at most for one read syscall. The default value for this
|
|
+// buffer is 32KB. Zero or negative values will disable read buffer for a
|
|
+// connection so data framer can access the underlying conn directly.
|
|
func ReadBufferSize(s int) ServerOption {
|
|
return newFuncServerOption(func(o *serverOptions) {
|
|
o.readBufferSize = s
|
|
@@ -368,6 +382,9 @@ func MaxSendMsgSize(m int) ServerOption {
|
|
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
|
|
// of concurrent streams to each ServerTransport.
|
|
func MaxConcurrentStreams(n uint32) ServerOption {
|
|
+ if n == 0 {
|
|
+ n = math.MaxUint32
|
|
+ }
|
|
return newFuncServerOption(func(o *serverOptions) {
|
|
o.maxConcurrentStreams = n
|
|
})
|
|
@@ -452,6 +469,14 @@ func StatsHandler(h stats.Handler) ServerOption {
|
|
})
|
|
}
|
|
|
|
+// binaryLogger returns a ServerOption that can set the binary logger for the
|
|
+// server.
|
|
+func binaryLogger(bl binarylog.Logger) ServerOption {
|
|
+ return newFuncServerOption(func(o *serverOptions) {
|
|
+ o.binaryLogger = bl
|
|
+ })
|
|
+}
|
|
+
|
|
// UnknownServiceHandler returns a ServerOption that allows for adding a custom
|
|
// unknown service handler. The provided method is a bidi-streaming RPC service
|
|
// handler that will be invoked instead of returning the "unimplemented" gRPC
|
|
@@ -533,47 +558,40 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
|
|
const serverWorkerResetThreshold = 1 << 16
|
|
|
|
// serverWorkers blocks on a *transport.Stream channel forever and waits for
|
|
-// data to be fed by serveStreams. This allows different requests to be
|
|
+// data to be fed by serveStreams. This allows multiple requests to be
|
|
// processed by the same goroutine, removing the need for expensive stack
|
|
// re-allocations (see the runtime.morestack problem [1]).
|
|
//
|
|
// [1] https://github.com/golang/go/issues/18138
|
|
-func (s *Server) serverWorker(ch chan *serverWorkerData) {
|
|
- // To make sure all server workers don't reset at the same time, choose a
|
|
- // random number of iterations before resetting.
|
|
- threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
|
|
- for completed := 0; completed < threshold; completed++ {
|
|
- data, ok := <-ch
|
|
+func (s *Server) serverWorker() {
|
|
+ for completed := 0; completed < serverWorkerResetThreshold; completed++ {
|
|
+ f, ok := <-s.serverWorkerChannel
|
|
if !ok {
|
|
return
|
|
}
|
|
- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
|
|
- data.wg.Done()
|
|
+ f()
|
|
}
|
|
- go s.serverWorker(ch)
|
|
+ go s.serverWorker()
|
|
}
|
|
|
|
-// initServerWorkers creates worker goroutines and channels to process incoming
|
|
+// initServerWorkers creates worker goroutines and a channel to process incoming
|
|
// connections to reduce the time spent overall on runtime.morestack.
|
|
func (s *Server) initServerWorkers() {
|
|
- s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
|
|
+ s.serverWorkerChannel = make(chan func())
|
|
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
|
- s.serverWorkerChannels[i] = make(chan *serverWorkerData)
|
|
- go s.serverWorker(s.serverWorkerChannels[i])
|
|
+ go s.serverWorker()
|
|
}
|
|
}
|
|
|
|
func (s *Server) stopServerWorkers() {
|
|
- for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
|
- close(s.serverWorkerChannels[i])
|
|
- }
|
|
+ close(s.serverWorkerChannel)
|
|
}
|
|
|
|
// NewServer creates a gRPC server which has no service registered and has not
|
|
// started to accept requests yet.
|
|
func NewServer(opt ...ServerOption) *Server {
|
|
opts := defaultServerOptions
|
|
- for _, o := range extraServerOptions {
|
|
+ for _, o := range globalServerOptions {
|
|
o.apply(&opts)
|
|
}
|
|
for _, o := range opt {
|
|
@@ -870,7 +888,7 @@ func (s *Server) drainServerTransports(addr string) {
|
|
s.mu.Lock()
|
|
conns := s.conns[addr]
|
|
for st := range conns {
|
|
- st.Drain()
|
|
+ st.Drain("")
|
|
}
|
|
s.mu.Unlock()
|
|
}
|
|
@@ -915,29 +933,29 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
|
}
|
|
|
|
func (s *Server) serveStreams(st transport.ServerTransport) {
|
|
- defer st.Close()
|
|
+ defer st.Close(errors.New("finished serving streams for the server transport"))
|
|
var wg sync.WaitGroup
|
|
|
|
- var roundRobinCounter uint32
|
|
+ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
|
|
st.HandleStreams(func(stream *transport.Stream) {
|
|
wg.Add(1)
|
|
+
|
|
+ streamQuota.acquire()
|
|
+ f := func() {
|
|
+ defer streamQuota.release()
|
|
+ defer wg.Done()
|
|
+ s.handleStream(st, stream, s.traceInfo(st, stream))
|
|
+ }
|
|
+
|
|
if s.opts.numServerWorkers > 0 {
|
|
- data := &serverWorkerData{st: st, wg: &wg, stream: stream}
|
|
select {
|
|
- case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
|
|
+ case s.serverWorkerChannel <- f:
|
|
+ return
|
|
default:
|
|
// If all stream workers are busy, fallback to the default code path.
|
|
- go func() {
|
|
- s.handleStream(st, stream, s.traceInfo(st, stream))
|
|
- wg.Done()
|
|
- }()
|
|
}
|
|
- } else {
|
|
- go func() {
|
|
- defer wg.Done()
|
|
- s.handleStream(st, stream, s.traceInfo(st, stream))
|
|
- }()
|
|
}
|
|
+ go f()
|
|
}, func(ctx context.Context, method string) context.Context {
|
|
if !EnableTracing {
|
|
return ctx
|
|
@@ -981,7 +999,8 @@ var _ http.Handler = (*Server)(nil)
|
|
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
|
|
if err != nil {
|
|
- http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
+ // Errors returned from transport.NewServerHandlerTransport have
|
|
+ // already been written to w.
|
|
return
|
|
}
|
|
if !s.addConn(listenerAddressForServeHTTP, st) {
|
|
@@ -1019,13 +1038,13 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
if s.conns == nil {
|
|
- st.Close()
|
|
+ st.Close(errors.New("Server.addConn called when server has already been stopped"))
|
|
return false
|
|
}
|
|
if s.drain {
|
|
// Transport added after we drained our existing conns: drain it
|
|
// immediately.
|
|
- st.Drain()
|
|
+ st.Drain("")
|
|
}
|
|
|
|
if s.conns[addr] == nil {
|
|
@@ -1123,21 +1142,16 @@ func chainUnaryServerInterceptors(s *Server) {
|
|
|
|
func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
|
|
return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
|
|
- // the struct ensures the variables are allocated together, rather than separately, since we
|
|
- // know they should be garbage collected together. This saves 1 allocation and decreases
|
|
- // time/call by about 10% on the microbenchmark.
|
|
- var state struct {
|
|
- i int
|
|
- next UnaryHandler
|
|
- }
|
|
- state.next = func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
- if state.i == len(interceptors)-1 {
|
|
- return interceptors[state.i](ctx, req, info, handler)
|
|
- }
|
|
- state.i++
|
|
- return interceptors[state.i-1](ctx, req, info, state.next)
|
|
- }
|
|
- return state.next(ctx, req)
|
|
+ return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
|
|
+ }
|
|
+}
|
|
+
|
|
+func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
|
|
+ if curr == len(interceptors)-1 {
|
|
+ return finalHandler
|
|
+ }
|
|
+ return func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
+ return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
|
|
}
|
|
}
|
|
|
|
@@ -1199,9 +1213,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|
}
|
|
}()
|
|
}
|
|
-
|
|
- binlog := binarylog.GetMethodLogger(stream.Method())
|
|
- if binlog != nil {
|
|
+ var binlogs []binarylog.MethodLogger
|
|
+ if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
|
|
+ binlogs = append(binlogs, ml)
|
|
+ }
|
|
+ if s.opts.binaryLogger != nil {
|
|
+ if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
|
|
+ binlogs = append(binlogs, ml)
|
|
+ }
|
|
+ }
|
|
+ if len(binlogs) != 0 {
|
|
ctx := stream.Context()
|
|
md, _ := metadata.FromIncomingContext(ctx)
|
|
logEntry := &binarylog.ClientHeader{
|
|
@@ -1221,7 +1242,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|
if peer, ok := peer.FromContext(ctx); ok {
|
|
logEntry.PeerAddr = peer.Addr
|
|
}
|
|
- binlog.Log(logEntry)
|
|
+ for _, binlog := range binlogs {
|
|
+ binlog.Log(ctx, logEntry)
|
|
+ }
|
|
}
|
|
|
|
// comp and cp are used for compression. decomp and dc are used for
|
|
@@ -1231,6 +1254,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|
var comp, decomp encoding.Compressor
|
|
var cp Compressor
|
|
var dc Decompressor
|
|
+ var sendCompressorName string
|
|
|
|
// If dc is set and matches the stream's compression, use it. Otherwise, try
|
|
// to find a matching registered compressor for decomp.
|
|
@@ -1251,23 +1275,29 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
|
|
if s.opts.cp != nil {
|
|
cp = s.opts.cp
|
|
- stream.SetSendCompress(cp.Type())
|
|
+ sendCompressorName = cp.Type()
|
|
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
|
|
// Legacy compressor not specified; attempt to respond with same encoding.
|
|
comp = encoding.GetCompressor(rc)
|
|
if comp != nil {
|
|
- stream.SetSendCompress(rc)
|
|
+ sendCompressorName = comp.Name()
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if sendCompressorName != "" {
|
|
+ if err := stream.SetSendCompress(sendCompressorName); err != nil {
|
|
+ return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
|
|
}
|
|
}
|
|
|
|
var payInfo *payloadInfo
|
|
- if len(shs) != 0 || binlog != nil {
|
|
+ if len(shs) != 0 || len(binlogs) != 0 {
|
|
payInfo = &payloadInfo{}
|
|
}
|
|
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
|
if err != nil {
|
|
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
|
- channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
|
|
+ channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
|
}
|
|
return err
|
|
}
|
|
@@ -1280,17 +1310,21 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|
}
|
|
for _, sh := range shs {
|
|
sh.HandleRPC(stream.Context(), &stats.InPayload{
|
|
- RecvTime: time.Now(),
|
|
- Payload: v,
|
|
- WireLength: payInfo.wireLength + headerLen,
|
|
- Data: d,
|
|
- Length: len(d),
|
|
+ RecvTime: time.Now(),
|
|
+ Payload: v,
|
|
+ Length: len(d),
|
|
+ WireLength: payInfo.compressedLength + headerLen,
|
|
+ CompressedLength: payInfo.compressedLength,
|
|
+ Data: d,
|
|
})
|
|
}
|
|
- if binlog != nil {
|
|
- binlog.Log(&binarylog.ClientMessage{
|
|
+ if len(binlogs) != 0 {
|
|
+ cm := &binarylog.ClientMessage{
|
|
Message: d,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range binlogs {
|
|
+ binlog.Log(stream.Context(), cm)
|
|
+ }
|
|
}
|
|
if trInfo != nil {
|
|
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
|
|
@@ -1314,18 +1348,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|
if e := t.WriteStatus(stream, appStatus); e != nil {
|
|
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
|
}
|
|
- if binlog != nil {
|
|
+ if len(binlogs) != 0 {
|
|
if h, _ := stream.Header(); h.Len() > 0 {
|
|
// Only log serverHeader if there was header. Otherwise it can
|
|
// be trailer only.
|
|
- binlog.Log(&binarylog.ServerHeader{
|
|
+ sh := &binarylog.ServerHeader{
|
|
Header: h,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range binlogs {
|
|
+ binlog.Log(stream.Context(), sh)
|
|
+ }
|
|
}
|
|
- binlog.Log(&binarylog.ServerTrailer{
|
|
+ st := &binarylog.ServerTrailer{
|
|
Trailer: stream.Trailer(),
|
|
Err: appErr,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range binlogs {
|
|
+ binlog.Log(stream.Context(), st)
|
|
+ }
|
|
}
|
|
return appErr
|
|
}
|
|
@@ -1334,6 +1374,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|
}
|
|
opts := &transport.Options{Last: true}
|
|
|
|
+ // Server handler could have set new compressor by calling SetSendCompressor.
|
|
+ // In case it is set, we need to use it for compressing outbound message.
|
|
+ if stream.SendCompress() != sendCompressorName {
|
|
+ comp = encoding.GetCompressor(stream.SendCompress())
|
|
+ }
|
|
if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
|
|
if err == io.EOF {
|
|
// The entire stream is done (for unary RPC only).
|
|
@@ -1351,26 +1396,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|
panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
|
|
}
|
|
}
|
|
- if binlog != nil {
|
|
+ if len(binlogs) != 0 {
|
|
h, _ := stream.Header()
|
|
- binlog.Log(&binarylog.ServerHeader{
|
|
+ sh := &binarylog.ServerHeader{
|
|
Header: h,
|
|
- })
|
|
- binlog.Log(&binarylog.ServerTrailer{
|
|
+ }
|
|
+ st := &binarylog.ServerTrailer{
|
|
Trailer: stream.Trailer(),
|
|
Err: appErr,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range binlogs {
|
|
+ binlog.Log(stream.Context(), sh)
|
|
+ binlog.Log(stream.Context(), st)
|
|
+ }
|
|
}
|
|
return err
|
|
}
|
|
- if binlog != nil {
|
|
+ if len(binlogs) != 0 {
|
|
h, _ := stream.Header()
|
|
- binlog.Log(&binarylog.ServerHeader{
|
|
+ sh := &binarylog.ServerHeader{
|
|
Header: h,
|
|
- })
|
|
- binlog.Log(&binarylog.ServerMessage{
|
|
+ }
|
|
+ sm := &binarylog.ServerMessage{
|
|
Message: reply,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range binlogs {
|
|
+ binlog.Log(stream.Context(), sh)
|
|
+ binlog.Log(stream.Context(), sm)
|
|
+ }
|
|
}
|
|
if channelz.IsOn() {
|
|
t.IncrMsgSent()
|
|
@@ -1381,14 +1434,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|
// TODO: Should we be logging if writing status failed here, like above?
|
|
// Should the logging be in WriteStatus? Should we ignore the WriteStatus
|
|
// error or allow the stats handler to see it?
|
|
- err = t.WriteStatus(stream, statusOK)
|
|
- if binlog != nil {
|
|
- binlog.Log(&binarylog.ServerTrailer{
|
|
+ if len(binlogs) != 0 {
|
|
+ st := &binarylog.ServerTrailer{
|
|
Trailer: stream.Trailer(),
|
|
Err: appErr,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range binlogs {
|
|
+ binlog.Log(stream.Context(), st)
|
|
+ }
|
|
}
|
|
- return err
|
|
+ return t.WriteStatus(stream, statusOK)
|
|
}
|
|
|
|
// chainStreamServerInterceptors chains all stream server interceptors into one.
|
|
@@ -1414,21 +1469,16 @@ func chainStreamServerInterceptors(s *Server) {
|
|
|
|
func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
|
|
return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
|
|
- // the struct ensures the variables are allocated together, rather than separately, since we
|
|
- // know they should be garbage collected together. This saves 1 allocation and decreases
|
|
- // time/call by about 10% on the microbenchmark.
|
|
- var state struct {
|
|
- i int
|
|
- next StreamHandler
|
|
- }
|
|
- state.next = func(srv interface{}, ss ServerStream) error {
|
|
- if state.i == len(interceptors)-1 {
|
|
- return interceptors[state.i](srv, ss, info, handler)
|
|
- }
|
|
- state.i++
|
|
- return interceptors[state.i-1](srv, ss, info, state.next)
|
|
- }
|
|
- return state.next(srv, ss)
|
|
+ return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
|
|
+ }
|
|
+}
|
|
+
|
|
+func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
|
|
+ if curr == len(interceptors)-1 {
|
|
+ return finalHandler
|
|
+ }
|
|
+ return func(srv interface{}, stream ServerStream) error {
|
|
+ return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
|
|
}
|
|
}
|
|
|
|
@@ -1499,8 +1549,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|
}()
|
|
}
|
|
|
|
- ss.binlog = binarylog.GetMethodLogger(stream.Method())
|
|
- if ss.binlog != nil {
|
|
+ if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
|
|
+ ss.binlogs = append(ss.binlogs, ml)
|
|
+ }
|
|
+ if s.opts.binaryLogger != nil {
|
|
+ if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
|
|
+ ss.binlogs = append(ss.binlogs, ml)
|
|
+ }
|
|
+ }
|
|
+ if len(ss.binlogs) != 0 {
|
|
md, _ := metadata.FromIncomingContext(ctx)
|
|
logEntry := &binarylog.ClientHeader{
|
|
Header: md,
|
|
@@ -1519,7 +1576,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|
if peer, ok := peer.FromContext(ss.Context()); ok {
|
|
logEntry.PeerAddr = peer.Addr
|
|
}
|
|
- ss.binlog.Log(logEntry)
|
|
+ for _, binlog := range ss.binlogs {
|
|
+ binlog.Log(stream.Context(), logEntry)
|
|
+ }
|
|
}
|
|
|
|
// If dc is set and matches the stream's compression, use it. Otherwise, try
|
|
@@ -1541,12 +1600,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
|
|
if s.opts.cp != nil {
|
|
ss.cp = s.opts.cp
|
|
- stream.SetSendCompress(s.opts.cp.Type())
|
|
+ ss.sendCompressorName = s.opts.cp.Type()
|
|
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
|
|
// Legacy compressor not specified; attempt to respond with same encoding.
|
|
ss.comp = encoding.GetCompressor(rc)
|
|
if ss.comp != nil {
|
|
- stream.SetSendCompress(rc)
|
|
+ ss.sendCompressorName = rc
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if ss.sendCompressorName != "" {
|
|
+ if err := stream.SetSendCompress(ss.sendCompressorName); err != nil {
|
|
+ return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
|
|
}
|
|
}
|
|
|
|
@@ -1584,13 +1649,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|
ss.trInfo.tr.SetError()
|
|
ss.mu.Unlock()
|
|
}
|
|
- t.WriteStatus(ss.s, appStatus)
|
|
- if ss.binlog != nil {
|
|
- ss.binlog.Log(&binarylog.ServerTrailer{
|
|
+ if len(ss.binlogs) != 0 {
|
|
+ st := &binarylog.ServerTrailer{
|
|
Trailer: ss.s.Trailer(),
|
|
Err: appErr,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range ss.binlogs {
|
|
+ binlog.Log(stream.Context(), st)
|
|
+ }
|
|
}
|
|
+ t.WriteStatus(ss.s, appStatus)
|
|
// TODO: Should we log an error from WriteStatus here and below?
|
|
return appErr
|
|
}
|
|
@@ -1599,14 +1667,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|
ss.trInfo.tr.LazyLog(stringer("OK"), false)
|
|
ss.mu.Unlock()
|
|
}
|
|
- err = t.WriteStatus(ss.s, statusOK)
|
|
- if ss.binlog != nil {
|
|
- ss.binlog.Log(&binarylog.ServerTrailer{
|
|
+ if len(ss.binlogs) != 0 {
|
|
+ st := &binarylog.ServerTrailer{
|
|
Trailer: ss.s.Trailer(),
|
|
Err: appErr,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range ss.binlogs {
|
|
+ binlog.Log(stream.Context(), st)
|
|
+ }
|
|
}
|
|
- return err
|
|
+ return t.WriteStatus(ss.s, statusOK)
|
|
}
|
|
|
|
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
|
@@ -1748,7 +1818,7 @@ func (s *Server) Stop() {
|
|
}
|
|
for _, cs := range conns {
|
|
for st := range cs {
|
|
- st.Close()
|
|
+ st.Close(errors.New("Server.Stop called"))
|
|
}
|
|
}
|
|
if s.opts.numServerWorkers > 0 {
|
|
@@ -1784,7 +1854,7 @@ func (s *Server) GracefulStop() {
|
|
if !s.drain {
|
|
for _, conns := range s.conns {
|
|
for st := range conns {
|
|
- st.Drain()
|
|
+ st.Drain("graceful_stop")
|
|
}
|
|
}
|
|
s.drain = true
|
|
@@ -1873,6 +1943,60 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
|
|
return nil
|
|
}
|
|
|
|
+// SetSendCompressor sets a compressor for outbound messages from the server.
|
|
+// It must not be called after any event that causes headers to be sent
|
|
+// (see ServerStream.SetHeader for the complete list). Provided compressor is
|
|
+// used when below conditions are met:
|
|
+//
|
|
+// - compressor is registered via encoding.RegisterCompressor
|
|
+// - compressor name must exist in the client advertised compressor names
|
|
+// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to
|
|
+// get client supported compressor names.
|
|
+//
|
|
+// The context provided must be the context passed to the server's handler.
|
|
+// It must be noted that compressor name encoding.Identity disables the
|
|
+// outbound compression.
|
|
+// By default, server messages will be sent using the same compressor with
|
|
+// which request messages were sent.
|
|
+//
|
|
+// It is not safe to call SetSendCompressor concurrently with SendHeader and
|
|
+// SendMsg.
|
|
+//
|
|
+// # Experimental
|
|
+//
|
|
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
|
|
+// later release.
|
|
+func SetSendCompressor(ctx context.Context, name string) error {
|
|
+ stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
|
|
+ if !ok || stream == nil {
|
|
+ return fmt.Errorf("failed to fetch the stream from the given context")
|
|
+ }
|
|
+
|
|
+ if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil {
|
|
+ return fmt.Errorf("unable to set send compressor: %w", err)
|
|
+ }
|
|
+
|
|
+ return stream.SetSendCompress(name)
|
|
+}
|
|
+
|
|
+// ClientSupportedCompressors returns compressor names advertised by the client
|
|
+// via grpc-accept-encoding header.
|
|
+//
|
|
+// The context provided must be the context passed to the server's handler.
|
|
+//
|
|
+// # Experimental
|
|
+//
|
|
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
|
|
+// later release.
|
|
+func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
|
|
+ stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
|
|
+ if !ok || stream == nil {
|
|
+ return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
|
|
+ }
|
|
+
|
|
+ return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil
|
|
+}
|
|
+
|
|
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
|
|
// When called more than once, all the provided metadata will be merged.
|
|
//
|
|
@@ -1907,3 +2031,51 @@ type channelzServer struct {
|
|
func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
|
|
return c.s.channelzMetric()
|
|
}
|
|
+
|
|
+// validateSendCompressor returns an error when given compressor name cannot be
|
|
+// handled by the server or the client based on the advertised compressors.
|
|
+func validateSendCompressor(name, clientCompressors string) error {
|
|
+ if name == encoding.Identity {
|
|
+ return nil
|
|
+ }
|
|
+
|
|
+ if !grpcutil.IsCompressorNameRegistered(name) {
|
|
+ return fmt.Errorf("compressor not registered %q", name)
|
|
+ }
|
|
+
|
|
+ for _, c := range strings.Split(clientCompressors, ",") {
|
|
+ if c == name {
|
|
+ return nil // found match
|
|
+ }
|
|
+ }
|
|
+ return fmt.Errorf("client does not support compressor %q", name)
|
|
+}
|
|
+
|
|
+// atomicSemaphore implements a blocking, counting semaphore. acquire should be
|
|
+// called synchronously; release may be called asynchronously.
|
|
+type atomicSemaphore struct {
|
|
+ n int64
|
|
+ wait chan struct{}
|
|
+}
|
|
+
|
|
+func (q *atomicSemaphore) acquire() {
|
|
+ if atomic.AddInt64(&q.n, -1) < 0 {
|
|
+ // We ran out of quota. Block until a release happens.
|
|
+ <-q.wait
|
|
+ }
|
|
+}
|
|
+
|
|
+func (q *atomicSemaphore) release() {
|
|
+ // N.B. the "<= 0" check below should allow for this to work with multiple
|
|
+ // concurrent calls to acquire, but also note that with synchronous calls to
|
|
+ // acquire, as our system does, n will never be less than -1. There are
|
|
+ // fairness issues (queuing) to consider if this was to be generalized.
|
|
+ if atomic.AddInt64(&q.n, 1) <= 0 {
|
|
+ // An acquire was waiting on us. Unblock it.
|
|
+ q.wait <- struct{}{}
|
|
+ }
|
|
+}
|
|
+
|
|
+func newHandlerQuota(n uint32) *atomicSemaphore {
|
|
+ return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)}
|
|
+}
|
|
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
|
|
index 01bbb2025..0df11fc09 100644
|
|
--- a/vendor/google.golang.org/grpc/service_config.go
|
|
+++ b/vendor/google.golang.org/grpc/service_config.go
|
|
@@ -23,8 +23,6 @@ import (
|
|
"errors"
|
|
"fmt"
|
|
"reflect"
|
|
- "strconv"
|
|
- "strings"
|
|
"time"
|
|
|
|
"google.golang.org/grpc/codes"
|
|
@@ -106,8 +104,8 @@ type healthCheckConfig struct {
|
|
|
|
type jsonRetryPolicy struct {
|
|
MaxAttempts int
|
|
- InitialBackoff string
|
|
- MaxBackoff string
|
|
+ InitialBackoff internalserviceconfig.Duration
|
|
+ MaxBackoff internalserviceconfig.Duration
|
|
BackoffMultiplier float64
|
|
RetryableStatusCodes []codes.Code
|
|
}
|
|
@@ -129,50 +127,6 @@ type retryThrottlingPolicy struct {
|
|
TokenRatio float64
|
|
}
|
|
|
|
-func parseDuration(s *string) (*time.Duration, error) {
|
|
- if s == nil {
|
|
- return nil, nil
|
|
- }
|
|
- if !strings.HasSuffix(*s, "s") {
|
|
- return nil, fmt.Errorf("malformed duration %q", *s)
|
|
- }
|
|
- ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
|
|
- if len(ss) > 2 {
|
|
- return nil, fmt.Errorf("malformed duration %q", *s)
|
|
- }
|
|
- // hasDigits is set if either the whole or fractional part of the number is
|
|
- // present, since both are optional but one is required.
|
|
- hasDigits := false
|
|
- var d time.Duration
|
|
- if len(ss[0]) > 0 {
|
|
- i, err := strconv.ParseInt(ss[0], 10, 32)
|
|
- if err != nil {
|
|
- return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
|
- }
|
|
- d = time.Duration(i) * time.Second
|
|
- hasDigits = true
|
|
- }
|
|
- if len(ss) == 2 && len(ss[1]) > 0 {
|
|
- if len(ss[1]) > 9 {
|
|
- return nil, fmt.Errorf("malformed duration %q", *s)
|
|
- }
|
|
- f, err := strconv.ParseInt(ss[1], 10, 64)
|
|
- if err != nil {
|
|
- return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
|
- }
|
|
- for i := 9; i > len(ss[1]); i-- {
|
|
- f *= 10
|
|
- }
|
|
- d += time.Duration(f)
|
|
- hasDigits = true
|
|
- }
|
|
- if !hasDigits {
|
|
- return nil, fmt.Errorf("malformed duration %q", *s)
|
|
- }
|
|
-
|
|
- return &d, nil
|
|
-}
|
|
-
|
|
type jsonName struct {
|
|
Service string
|
|
Method string
|
|
@@ -201,7 +155,7 @@ func (j jsonName) generatePath() (string, error) {
|
|
type jsonMC struct {
|
|
Name *[]jsonName
|
|
WaitForReady *bool
|
|
- Timeout *string
|
|
+ Timeout *internalserviceconfig.Duration
|
|
MaxRequestMessageBytes *int64
|
|
MaxResponseMessageBytes *int64
|
|
RetryPolicy *jsonRetryPolicy
|
|
@@ -226,7 +180,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
|
var rsc jsonSC
|
|
err := json.Unmarshal([]byte(js), &rsc)
|
|
if err != nil {
|
|
- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
|
+ logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
|
return &serviceconfig.ParseResult{Err: err}
|
|
}
|
|
sc := ServiceConfig{
|
|
@@ -252,18 +206,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
|
if m.Name == nil {
|
|
continue
|
|
}
|
|
- d, err := parseDuration(m.Timeout)
|
|
- if err != nil {
|
|
- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
|
- return &serviceconfig.ParseResult{Err: err}
|
|
- }
|
|
|
|
mc := MethodConfig{
|
|
WaitForReady: m.WaitForReady,
|
|
- Timeout: d,
|
|
+ Timeout: (*time.Duration)(m.Timeout),
|
|
}
|
|
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
|
- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
|
+ logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
|
return &serviceconfig.ParseResult{Err: err}
|
|
}
|
|
if m.MaxRequestMessageBytes != nil {
|
|
@@ -283,13 +232,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
|
for i, n := range *m.Name {
|
|
path, err := n.generatePath()
|
|
if err != nil {
|
|
- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
|
|
+ logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
|
return &serviceconfig.ParseResult{Err: err}
|
|
}
|
|
|
|
if _, ok := paths[path]; ok {
|
|
err = errDuplicatedName
|
|
- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
|
|
+ logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
|
return &serviceconfig.ParseResult{Err: err}
|
|
}
|
|
paths[path] = struct{}{}
|
|
@@ -312,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol
|
|
if jrp == nil {
|
|
return nil, nil
|
|
}
|
|
- ib, err := parseDuration(&jrp.InitialBackoff)
|
|
- if err != nil {
|
|
- return nil, err
|
|
- }
|
|
- mb, err := parseDuration(&jrp.MaxBackoff)
|
|
- if err != nil {
|
|
- return nil, err
|
|
- }
|
|
|
|
if jrp.MaxAttempts <= 1 ||
|
|
- *ib <= 0 ||
|
|
- *mb <= 0 ||
|
|
+ jrp.InitialBackoff <= 0 ||
|
|
+ jrp.MaxBackoff <= 0 ||
|
|
jrp.BackoffMultiplier <= 0 ||
|
|
len(jrp.RetryableStatusCodes) == 0 {
|
|
logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
|
|
@@ -332,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol
|
|
|
|
rp := &internalserviceconfig.RetryPolicy{
|
|
MaxAttempts: jrp.MaxAttempts,
|
|
- InitialBackoff: *ib,
|
|
- MaxBackoff: *mb,
|
|
+ InitialBackoff: time.Duration(jrp.InitialBackoff),
|
|
+ MaxBackoff: time.Duration(jrp.MaxBackoff),
|
|
BackoffMultiplier: jrp.BackoffMultiplier,
|
|
RetryableStatusCodes: make(map[codes.Code]bool),
|
|
}
|
|
diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
|
|
index 73a2f9266..35e7a20a0 100644
|
|
--- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
|
|
+++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
|
|
@@ -19,7 +19,7 @@
|
|
// Package serviceconfig defines types and methods for operating on gRPC
|
|
// service configs.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
|
|
index 0285dcc6a..7a552a9b7 100644
|
|
--- a/vendor/google.golang.org/grpc/stats/stats.go
|
|
+++ b/vendor/google.golang.org/grpc/stats/stats.go
|
|
@@ -67,10 +67,18 @@ type InPayload struct {
|
|
Payload interface{}
|
|
// Data is the serialized message payload.
|
|
Data []byte
|
|
- // Length is the length of uncompressed data.
|
|
+
|
|
+ // Length is the size of the uncompressed payload data. Does not include any
|
|
+ // framing (gRPC or HTTP/2).
|
|
Length int
|
|
- // WireLength is the length of data on wire (compressed, signed, encrypted).
|
|
+ // CompressedLength is the size of the compressed payload data. Does not
|
|
+ // include any framing (gRPC or HTTP/2). Same as Length if compression not
|
|
+ // enabled.
|
|
+ CompressedLength int
|
|
+ // WireLength is the size of the compressed payload data plus gRPC framing.
|
|
+ // Does not include HTTP/2 framing.
|
|
WireLength int
|
|
+
|
|
// RecvTime is the time when the payload is received.
|
|
RecvTime time.Time
|
|
}
|
|
@@ -129,9 +137,15 @@ type OutPayload struct {
|
|
Payload interface{}
|
|
// Data is the serialized message payload.
|
|
Data []byte
|
|
- // Length is the length of uncompressed data.
|
|
+ // Length is the size of the uncompressed payload data. Does not include any
|
|
+ // framing (gRPC or HTTP/2).
|
|
Length int
|
|
- // WireLength is the length of data on wire (compressed, signed, encrypted).
|
|
+ // CompressedLength is the size of the compressed payload data. Does not
|
|
+ // include any framing (gRPC or HTTP/2). Same as Length if compression not
|
|
+ // enabled.
|
|
+ CompressedLength int
|
|
+ // WireLength is the size of the compressed payload data plus gRPC framing.
|
|
+ // Does not include HTTP/2 framing.
|
|
WireLength int
|
|
// SentTime is the time when the payload is sent.
|
|
SentTime time.Time
|
|
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
|
|
index 6d163b6e3..bcf2e4d81 100644
|
|
--- a/vendor/google.golang.org/grpc/status/status.go
|
|
+++ b/vendor/google.golang.org/grpc/status/status.go
|
|
@@ -76,22 +76,50 @@ func FromProto(s *spb.Status) *Status {
|
|
|
|
// FromError returns a Status representation of err.
|
|
//
|
|
-// - If err was produced by this package or implements the method `GRPCStatus()
|
|
-// *Status`, the appropriate Status is returned.
|
|
+// - If err was produced by this package or implements the method `GRPCStatus()
|
|
+// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type
|
|
+// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped
|
|
+// errors, the message returned contains the entire err.Error() text and not
|
|
+// just the wrapped status. In that case, ok is true.
|
|
//
|
|
-// - If err is nil, a Status is returned with codes.OK and no message.
|
|
+// - If err is nil, a Status is returned with codes.OK and no message, and ok
|
|
+// is true.
|
|
//
|
|
-// - Otherwise, err is an error not compatible with this package. In this
|
|
-// case, a Status is returned with codes.Unknown and err's Error() message,
|
|
-// and ok is false.
|
|
+// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()`
|
|
+// returns nil (which maps to Codes.OK), or if err wraps a type
|
|
+// satisfying this, a Status is returned with codes.Unknown and err's
|
|
+// Error() message, and ok is false.
|
|
+//
|
|
+// - Otherwise, err is an error not compatible with this package. In this
|
|
+// case, a Status is returned with codes.Unknown and err's Error() message,
|
|
+// and ok is false.
|
|
func FromError(err error) (s *Status, ok bool) {
|
|
if err == nil {
|
|
return nil, true
|
|
}
|
|
- if se, ok := err.(interface {
|
|
- GRPCStatus() *Status
|
|
- }); ok {
|
|
- return se.GRPCStatus(), true
|
|
+ type grpcstatus interface{ GRPCStatus() *Status }
|
|
+ if gs, ok := err.(grpcstatus); ok {
|
|
+ if gs.GRPCStatus() == nil {
|
|
+ // Error has status nil, which maps to codes.OK. There
|
|
+ // is no sensible behavior for this, so we turn it into
|
|
+ // an error with codes.Unknown and discard the existing
|
|
+ // status.
|
|
+ return New(codes.Unknown, err.Error()), false
|
|
+ }
|
|
+ return gs.GRPCStatus(), true
|
|
+ }
|
|
+ var gs grpcstatus
|
|
+ if errors.As(err, &gs) {
|
|
+ if gs.GRPCStatus() == nil {
|
|
+ // Error wraps an error that has status nil, which maps
|
|
+ // to codes.OK. There is no sensible behavior for this,
|
|
+ // so we turn it into an error with codes.Unknown and
|
|
+ // discard the existing status.
|
|
+ return New(codes.Unknown, err.Error()), false
|
|
+ }
|
|
+ p := gs.GRPCStatus().Proto()
|
|
+ p.Message = err.Error()
|
|
+ return status.FromProto(p), true
|
|
}
|
|
return New(codes.Unknown, err.Error()), false
|
|
}
|
|
@@ -103,19 +131,16 @@ func Convert(err error) *Status {
|
|
return s
|
|
}
|
|
|
|
-// Code returns the Code of the error if it is a Status error, codes.OK if err
|
|
-// is nil, or codes.Unknown otherwise.
|
|
+// Code returns the Code of the error if it is a Status error or if it wraps a
|
|
+// Status error. If that is not the case, it returns codes.OK if err is nil, or
|
|
+// codes.Unknown otherwise.
|
|
func Code(err error) codes.Code {
|
|
// Don't use FromError to avoid allocation of OK status.
|
|
if err == nil {
|
|
return codes.OK
|
|
}
|
|
- if se, ok := err.(interface {
|
|
- GRPCStatus() *Status
|
|
- }); ok {
|
|
- return se.GRPCStatus().Code()
|
|
- }
|
|
- return codes.Unknown
|
|
+
|
|
+ return Convert(err).Code()
|
|
}
|
|
|
|
// FromContextError converts a context error or wrapped context error into a
|
|
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
|
|
index 446a91e32..10092685b 100644
|
|
--- a/vendor/google.golang.org/grpc/stream.go
|
|
+++ b/vendor/google.golang.org/grpc/stream.go
|
|
@@ -39,6 +39,7 @@ import (
|
|
imetadata "google.golang.org/grpc/internal/metadata"
|
|
iresolver "google.golang.org/grpc/internal/resolver"
|
|
"google.golang.org/grpc/internal/serviceconfig"
|
|
+ istatus "google.golang.org/grpc/internal/status"
|
|
"google.golang.org/grpc/internal/transport"
|
|
"google.golang.org/grpc/metadata"
|
|
"google.golang.org/grpc/peer"
|
|
@@ -122,6 +123,9 @@ type ClientStream interface {
|
|
// calling RecvMsg on the same stream at the same time, but it is not safe
|
|
// to call SendMsg on the same stream in different goroutines. It is also
|
|
// not safe to call CloseSend concurrently with SendMsg.
|
|
+ //
|
|
+ // It is not safe to modify the message after calling SendMsg. Tracing
|
|
+ // libraries and stats handlers may use the message lazily.
|
|
SendMsg(m interface{}) error
|
|
// RecvMsg blocks until it receives a message into m or the stream is
|
|
// done. It returns io.EOF when the stream completes successfully. On
|
|
@@ -151,6 +155,11 @@ type ClientStream interface {
|
|
// If none of the above happen, a goroutine and a context will be leaked, and grpc
|
|
// will not call the optionally-configured stats handler with a stats.End message.
|
|
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
|
+ if err := cc.idlenessMgr.onCallBegin(); err != nil {
|
|
+ return nil, err
|
|
+ }
|
|
+ defer cc.idlenessMgr.onCallEnd()
|
|
+
|
|
// allow interceptor to see all applicable call options, which means those
|
|
// configured as defaults from dial option as well as per-call options
|
|
opts = combine(cc.dopts.callOptions, opts)
|
|
@@ -167,10 +176,19 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|
}
|
|
|
|
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
|
- if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
|
+ if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
|
+ // validate md
|
|
if err := imetadata.Validate(md); err != nil {
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
}
|
|
+ // validate added
|
|
+ for _, kvs := range added {
|
|
+ for i := 0; i < len(kvs); i += 2 {
|
|
+ if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil {
|
|
+ return nil, status.Error(codes.Internal, err.Error())
|
|
+ }
|
|
+ }
|
|
+ }
|
|
}
|
|
if channelz.IsOn() {
|
|
cc.incrCallsStarted()
|
|
@@ -195,6 +213,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|
rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
|
|
rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
|
|
if err != nil {
|
|
+ if st, ok := status.FromError(err); ok {
|
|
+ // Restrict the code to the list allowed by gRFC A54.
|
|
+ if istatus.IsRestrictedControlPlaneCode(st) {
|
|
+ err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err)
|
|
+ }
|
|
+ return nil, err
|
|
+ }
|
|
return nil, toRPCErr(err)
|
|
}
|
|
|
|
@@ -301,7 +326,14 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
|
|
if !cc.dopts.disableRetry {
|
|
cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
|
|
}
|
|
- cs.binlog = binarylog.GetMethodLogger(method)
|
|
+ if ml := binarylog.GetMethodLogger(method); ml != nil {
|
|
+ cs.binlogs = append(cs.binlogs, ml)
|
|
+ }
|
|
+ if cc.dopts.binaryLogger != nil {
|
|
+ if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil {
|
|
+ cs.binlogs = append(cs.binlogs, ml)
|
|
+ }
|
|
+ }
|
|
|
|
// Pick the transport to use and create a new stream on the transport.
|
|
// Assign cs.attempt upon success.
|
|
@@ -322,7 +354,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
|
|
return nil, err
|
|
}
|
|
|
|
- if cs.binlog != nil {
|
|
+ if len(cs.binlogs) != 0 {
|
|
md, _ := metadata.FromOutgoingContext(ctx)
|
|
logEntry := &binarylog.ClientHeader{
|
|
OnClientSide: true,
|
|
@@ -336,7 +368,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
|
|
logEntry.Timeout = 0
|
|
}
|
|
}
|
|
- cs.binlog.Log(logEntry)
|
|
+ for _, binlog := range cs.binlogs {
|
|
+ binlog.Log(cs.ctx, logEntry)
|
|
+ }
|
|
}
|
|
|
|
if desc != unaryStreamDesc {
|
|
@@ -399,7 +433,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
|
|
ctx = trace.NewContext(ctx, trInfo.tr)
|
|
}
|
|
|
|
- if cs.cc.parsedTarget.Scheme == "xds" {
|
|
+ if cs.cc.parsedTarget.URL.Scheme == "xds" {
|
|
// Add extra metadata (metadata that will be added by transport) to context
|
|
// so the balancer can see them.
|
|
ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
|
|
@@ -421,7 +455,7 @@ func (a *csAttempt) getTransport() error {
|
|
cs := a.cs
|
|
|
|
var err error
|
|
- a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
|
+ a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
|
if err != nil {
|
|
if de, ok := err.(dropError); ok {
|
|
err = de.error
|
|
@@ -438,6 +472,25 @@ func (a *csAttempt) getTransport() error {
|
|
func (a *csAttempt) newStream() error {
|
|
cs := a.cs
|
|
cs.callHdr.PreviousAttempts = cs.numRetries
|
|
+
|
|
+ // Merge metadata stored in PickResult, if any, with existing call metadata.
|
|
+ // It is safe to overwrite the csAttempt's context here, since all state
|
|
+ // maintained in it are local to the attempt. When the attempt has to be
|
|
+ // retried, a new instance of csAttempt will be created.
|
|
+ if a.pickResult.Metadata != nil {
|
|
+ // We currently do not have a function it the metadata package which
|
|
+ // merges given metadata with existing metadata in a context. Existing
|
|
+ // function `AppendToOutgoingContext()` takes a variadic argument of key
|
|
+ // value pairs.
|
|
+ //
|
|
+ // TODO: Make it possible to retrieve key value pairs from metadata.MD
|
|
+ // in a form passable to AppendToOutgoingContext(), or create a version
|
|
+ // of AppendToOutgoingContext() that accepts a metadata.MD.
|
|
+ md, _ := metadata.FromOutgoingContext(a.ctx)
|
|
+ md = metadata.Join(md, a.pickResult.Metadata)
|
|
+ a.ctx = metadata.NewOutgoingContext(a.ctx, md)
|
|
+ }
|
|
+
|
|
s, err := a.t.NewStream(a.ctx, cs.callHdr)
|
|
if err != nil {
|
|
nse, ok := err.(*transport.NewStreamError)
|
|
@@ -480,7 +533,7 @@ type clientStream struct {
|
|
|
|
retryThrottler *retryThrottler // The throttler active when the RPC began.
|
|
|
|
- binlog binarylog.MethodLogger // Binary logger, can be nil.
|
|
+ binlogs []binarylog.MethodLogger
|
|
// serverHeaderBinlogged is a boolean for whether server header has been
|
|
// logged. Server header will be logged when the first time one of those
|
|
// happens: stream.Header(), stream.Recv().
|
|
@@ -512,12 +565,12 @@ type clientStream struct {
|
|
// csAttempt implements a single transport stream attempt within a
|
|
// clientStream.
|
|
type csAttempt struct {
|
|
- ctx context.Context
|
|
- cs *clientStream
|
|
- t transport.ClientTransport
|
|
- s *transport.Stream
|
|
- p *parser
|
|
- done func(balancer.DoneInfo)
|
|
+ ctx context.Context
|
|
+ cs *clientStream
|
|
+ t transport.ClientTransport
|
|
+ s *transport.Stream
|
|
+ p *parser
|
|
+ pickResult balancer.PickResult
|
|
|
|
finished bool
|
|
dc Decompressor
|
|
@@ -735,17 +788,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
|
|
|
|
func (cs *clientStream) Header() (metadata.MD, error) {
|
|
var m metadata.MD
|
|
+ noHeader := false
|
|
err := cs.withRetry(func(a *csAttempt) error {
|
|
var err error
|
|
m, err = a.s.Header()
|
|
+ if err == transport.ErrNoHeaders {
|
|
+ noHeader = true
|
|
+ return nil
|
|
+ }
|
|
return toRPCErr(err)
|
|
}, cs.commitAttemptLocked)
|
|
+
|
|
if err != nil {
|
|
cs.finish(err)
|
|
return nil, err
|
|
}
|
|
- if cs.binlog != nil && !cs.serverHeaderBinlogged {
|
|
- // Only log if binary log is on and header has not been logged.
|
|
+
|
|
+ if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader {
|
|
+ // Only log if binary log is on and header has not been logged, and
|
|
+ // there is actually headers to log.
|
|
logEntry := &binarylog.ServerHeader{
|
|
OnClientSide: true,
|
|
Header: m,
|
|
@@ -754,8 +815,10 @@ func (cs *clientStream) Header() (metadata.MD, error) {
|
|
if peer, ok := peer.FromContext(cs.Context()); ok {
|
|
logEntry.PeerAddr = peer.Addr
|
|
}
|
|
- cs.binlog.Log(logEntry)
|
|
cs.serverHeaderBinlogged = true
|
|
+ for _, binlog := range cs.binlogs {
|
|
+ binlog.Log(cs.ctx, logEntry)
|
|
+ }
|
|
}
|
|
return m, nil
|
|
}
|
|
@@ -829,38 +892,44 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
|
return a.sendMsg(m, hdr, payload, data)
|
|
}
|
|
err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
|
|
- if cs.binlog != nil && err == nil {
|
|
- cs.binlog.Log(&binarylog.ClientMessage{
|
|
+ if len(cs.binlogs) != 0 && err == nil {
|
|
+ cm := &binarylog.ClientMessage{
|
|
OnClientSide: true,
|
|
Message: data,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range cs.binlogs {
|
|
+ binlog.Log(cs.ctx, cm)
|
|
+ }
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (cs *clientStream) RecvMsg(m interface{}) error {
|
|
- if cs.binlog != nil && !cs.serverHeaderBinlogged {
|
|
+ if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
|
|
// Call Header() to binary log header if it's not already logged.
|
|
cs.Header()
|
|
}
|
|
var recvInfo *payloadInfo
|
|
- if cs.binlog != nil {
|
|
+ if len(cs.binlogs) != 0 {
|
|
recvInfo = &payloadInfo{}
|
|
}
|
|
err := cs.withRetry(func(a *csAttempt) error {
|
|
return a.recvMsg(m, recvInfo)
|
|
}, cs.commitAttemptLocked)
|
|
- if cs.binlog != nil && err == nil {
|
|
- cs.binlog.Log(&binarylog.ServerMessage{
|
|
+ if len(cs.binlogs) != 0 && err == nil {
|
|
+ sm := &binarylog.ServerMessage{
|
|
OnClientSide: true,
|
|
Message: recvInfo.uncompressedBytes,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range cs.binlogs {
|
|
+ binlog.Log(cs.ctx, sm)
|
|
+ }
|
|
}
|
|
if err != nil || !cs.desc.ServerStreams {
|
|
// err != nil or non-server-streaming indicates end of stream.
|
|
cs.finish(err)
|
|
|
|
- if cs.binlog != nil {
|
|
+ if len(cs.binlogs) != 0 {
|
|
// finish will not log Trailer. Log Trailer here.
|
|
logEntry := &binarylog.ServerTrailer{
|
|
OnClientSide: true,
|
|
@@ -873,7 +942,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error {
|
|
if peer, ok := peer.FromContext(cs.Context()); ok {
|
|
logEntry.PeerAddr = peer.Addr
|
|
}
|
|
- cs.binlog.Log(logEntry)
|
|
+ for _, binlog := range cs.binlogs {
|
|
+ binlog.Log(cs.ctx, logEntry)
|
|
+ }
|
|
}
|
|
}
|
|
return err
|
|
@@ -894,10 +965,13 @@ func (cs *clientStream) CloseSend() error {
|
|
return nil
|
|
}
|
|
cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
|
|
- if cs.binlog != nil {
|
|
- cs.binlog.Log(&binarylog.ClientHalfClose{
|
|
+ if len(cs.binlogs) != 0 {
|
|
+ chc := &binarylog.ClientHalfClose{
|
|
OnClientSide: true,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range cs.binlogs {
|
|
+ binlog.Log(cs.ctx, chc)
|
|
+ }
|
|
}
|
|
// We never returned an error here for reasons.
|
|
return nil
|
|
@@ -914,6 +988,9 @@ func (cs *clientStream) finish(err error) {
|
|
return
|
|
}
|
|
cs.finished = true
|
|
+ for _, onFinish := range cs.callInfo.onFinish {
|
|
+ onFinish(err)
|
|
+ }
|
|
cs.commitAttemptLocked()
|
|
if cs.attempt != nil {
|
|
cs.attempt.finish(err)
|
|
@@ -930,10 +1007,13 @@ func (cs *clientStream) finish(err error) {
|
|
//
|
|
// Only one of cancel or trailer needs to be logged. In the cases where
|
|
// users don't call RecvMsg, users must have already canceled the RPC.
|
|
- if cs.binlog != nil && status.Code(err) == codes.Canceled {
|
|
- cs.binlog.Log(&binarylog.Cancel{
|
|
+ if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled {
|
|
+ c := &binarylog.Cancel{
|
|
OnClientSide: true,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range cs.binlogs {
|
|
+ binlog.Log(cs.ctx, c)
|
|
+ }
|
|
}
|
|
if err == nil {
|
|
cs.retryThrottler.successfulRPC()
|
|
@@ -1005,6 +1085,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
|
|
}
|
|
return io.EOF // indicates successful end of stream.
|
|
}
|
|
+
|
|
return toRPCErr(err)
|
|
}
|
|
if a.trInfo != nil {
|
|
@@ -1020,9 +1101,10 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
|
|
RecvTime: time.Now(),
|
|
Payload: m,
|
|
// TODO truncate large payload.
|
|
- Data: payInfo.uncompressedBytes,
|
|
- WireLength: payInfo.wireLength + headerLen,
|
|
- Length: len(payInfo.uncompressedBytes),
|
|
+ Data: payInfo.uncompressedBytes,
|
|
+ WireLength: payInfo.compressedLength + headerLen,
|
|
+ CompressedLength: payInfo.compressedLength,
|
|
+ Length: len(payInfo.uncompressedBytes),
|
|
})
|
|
}
|
|
if channelz.IsOn() {
|
|
@@ -1061,12 +1143,12 @@ func (a *csAttempt) finish(err error) {
|
|
tr = a.s.Trailer()
|
|
}
|
|
|
|
- if a.done != nil {
|
|
+ if a.pickResult.Done != nil {
|
|
br := false
|
|
if a.s != nil {
|
|
br = a.s.BytesReceived()
|
|
}
|
|
- a.done(balancer.DoneInfo{
|
|
+ a.pickResult.Done(balancer.DoneInfo{
|
|
Err: err,
|
|
Trailer: tr,
|
|
BytesSent: a.s != nil,
|
|
@@ -1191,14 +1273,19 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
|
|
as.p = &parser{r: s}
|
|
ac.incrCallsStarted()
|
|
if desc != unaryStreamDesc {
|
|
- // Listen on cc and stream contexts to cleanup when the user closes the
|
|
- // ClientConn or cancels the stream context. In all other cases, an error
|
|
- // should already be injected into the recv buffer by the transport, which
|
|
- // the client will eventually receive, and then we will cancel the stream's
|
|
- // context in clientStream.finish.
|
|
+ // Listen on stream context to cleanup when the stream context is
|
|
+ // canceled. Also listen for the addrConn's context in case the
|
|
+ // addrConn is closed or reconnects to a different address. In all
|
|
+ // other cases, an error should already be injected into the recv
|
|
+ // buffer by the transport, which the client will eventually receive,
|
|
+ // and then we will cancel the stream's context in
|
|
+ // addrConnStream.finish.
|
|
go func() {
|
|
+ ac.mu.Lock()
|
|
+ acCtx := ac.ctx
|
|
+ ac.mu.Unlock()
|
|
select {
|
|
- case <-ac.ctx.Done():
|
|
+ case <-acCtx.Done():
|
|
as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
|
|
case <-ctx.Done():
|
|
as.finish(toRPCErr(ctx.Err()))
|
|
@@ -1422,6 +1509,9 @@ type ServerStream interface {
|
|
// It is safe to have a goroutine calling SendMsg and another goroutine
|
|
// calling RecvMsg on the same stream at the same time, but it is not safe
|
|
// to call SendMsg on the same stream in different goroutines.
|
|
+ //
|
|
+ // It is not safe to modify the message after calling SendMsg. Tracing
|
|
+ // libraries and stats handlers may use the message lazily.
|
|
SendMsg(m interface{}) error
|
|
// RecvMsg blocks until it receives a message into m or the stream is
|
|
// done. It returns io.EOF when the client has performed a CloseSend. On
|
|
@@ -1447,13 +1537,15 @@ type serverStream struct {
|
|
comp encoding.Compressor
|
|
decomp encoding.Compressor
|
|
|
|
+ sendCompressorName string
|
|
+
|
|
maxReceiveMessageSize int
|
|
maxSendMessageSize int
|
|
trInfo *traceInfo
|
|
|
|
statsHandler []stats.Handler
|
|
|
|
- binlog binarylog.MethodLogger
|
|
+ binlogs []binarylog.MethodLogger
|
|
// serverHeaderBinlogged indicates whether server header has been logged. It
|
|
// will happen when one of the following two happens: stream.SendHeader(),
|
|
// stream.Send().
|
|
@@ -1487,12 +1579,15 @@ func (ss *serverStream) SendHeader(md metadata.MD) error {
|
|
}
|
|
|
|
err = ss.t.WriteHeader(ss.s, md)
|
|
- if ss.binlog != nil && !ss.serverHeaderBinlogged {
|
|
+ if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
|
|
h, _ := ss.s.Header()
|
|
- ss.binlog.Log(&binarylog.ServerHeader{
|
|
+ sh := &binarylog.ServerHeader{
|
|
Header: h,
|
|
- })
|
|
+ }
|
|
ss.serverHeaderBinlogged = true
|
|
+ for _, binlog := range ss.binlogs {
|
|
+ binlog.Log(ss.ctx, sh)
|
|
+ }
|
|
}
|
|
return err
|
|
}
|
|
@@ -1536,6 +1631,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
|
}
|
|
}()
|
|
|
|
+ // Server handler could have set new compressor by calling SetSendCompressor.
|
|
+ // In case it is set, we need to use it for compressing outbound message.
|
|
+ if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
|
|
+ ss.comp = encoding.GetCompressor(sendCompressorsName)
|
|
+ ss.sendCompressorName = sendCompressorsName
|
|
+ }
|
|
+
|
|
// load hdr, payload, data
|
|
hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
|
|
if err != nil {
|
|
@@ -1549,17 +1651,23 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
|
if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
|
|
return toRPCErr(err)
|
|
}
|
|
- if ss.binlog != nil {
|
|
+ if len(ss.binlogs) != 0 {
|
|
if !ss.serverHeaderBinlogged {
|
|
h, _ := ss.s.Header()
|
|
- ss.binlog.Log(&binarylog.ServerHeader{
|
|
+ sh := &binarylog.ServerHeader{
|
|
Header: h,
|
|
- })
|
|
+ }
|
|
ss.serverHeaderBinlogged = true
|
|
+ for _, binlog := range ss.binlogs {
|
|
+ binlog.Log(ss.ctx, sh)
|
|
+ }
|
|
}
|
|
- ss.binlog.Log(&binarylog.ServerMessage{
|
|
+ sm := &binarylog.ServerMessage{
|
|
Message: data,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range ss.binlogs {
|
|
+ binlog.Log(ss.ctx, sm)
|
|
+ }
|
|
}
|
|
if len(ss.statsHandler) != 0 {
|
|
for _, sh := range ss.statsHandler {
|
|
@@ -1598,13 +1706,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|
}
|
|
}()
|
|
var payInfo *payloadInfo
|
|
- if len(ss.statsHandler) != 0 || ss.binlog != nil {
|
|
+ if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
|
|
payInfo = &payloadInfo{}
|
|
}
|
|
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
|
|
if err == io.EOF {
|
|
- if ss.binlog != nil {
|
|
- ss.binlog.Log(&binarylog.ClientHalfClose{})
|
|
+ if len(ss.binlogs) != 0 {
|
|
+ chc := &binarylog.ClientHalfClose{}
|
|
+ for _, binlog := range ss.binlogs {
|
|
+ binlog.Log(ss.ctx, chc)
|
|
+ }
|
|
}
|
|
return err
|
|
}
|
|
@@ -1619,16 +1730,20 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|
RecvTime: time.Now(),
|
|
Payload: m,
|
|
// TODO truncate large payload.
|
|
- Data: payInfo.uncompressedBytes,
|
|
- WireLength: payInfo.wireLength + headerLen,
|
|
- Length: len(payInfo.uncompressedBytes),
|
|
+ Data: payInfo.uncompressedBytes,
|
|
+ Length: len(payInfo.uncompressedBytes),
|
|
+ WireLength: payInfo.compressedLength + headerLen,
|
|
+ CompressedLength: payInfo.compressedLength,
|
|
})
|
|
}
|
|
}
|
|
- if ss.binlog != nil {
|
|
- ss.binlog.Log(&binarylog.ClientMessage{
|
|
+ if len(ss.binlogs) != 0 {
|
|
+ cm := &binarylog.ClientMessage{
|
|
Message: payInfo.uncompressedBytes,
|
|
- })
|
|
+ }
|
|
+ for _, binlog := range ss.binlogs {
|
|
+ binlog.Log(ss.ctx, cm)
|
|
+ }
|
|
}
|
|
return nil
|
|
}
|
|
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
|
|
index dbf34e6bb..bfa5dfa40 100644
|
|
--- a/vendor/google.golang.org/grpc/tap/tap.go
|
|
+++ b/vendor/google.golang.org/grpc/tap/tap.go
|
|
@@ -19,7 +19,7 @@
|
|
// Package tap defines the function handles which are executed on the transport
|
|
// layer of gRPC-Go and related information.
|
|
//
|
|
-// Experimental
|
|
+// # Experimental
|
|
//
|
|
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
// later release.
|
|
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
|
|
index 8934f06bc..3cc754062 100644
|
|
--- a/vendor/google.golang.org/grpc/version.go
|
|
+++ b/vendor/google.golang.org/grpc/version.go
|
|
@@ -19,4 +19,4 @@
|
|
package grpc
|
|
|
|
// Version is the current grpc version.
|
|
-const Version = "1.49.0"
|
|
+const Version = "1.56.3"
|
|
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
|
|
index c3fc8253b..a8e4732b3 100644
|
|
--- a/vendor/google.golang.org/grpc/vet.sh
|
|
+++ b/vendor/google.golang.org/grpc/vet.sh
|
|
@@ -41,16 +41,8 @@ if [[ "$1" = "-install" ]]; then
|
|
github.com/client9/misspell/cmd/misspell
|
|
popd
|
|
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
|
- if [[ "${TRAVIS}" = "true" ]]; then
|
|
- PROTOBUF_VERSION=3.14.0
|
|
- PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
|
|
- pushd /home/travis
|
|
- wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
|
|
- unzip ${PROTOC_FILENAME}
|
|
- bin/protoc --version
|
|
- popd
|
|
- elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then
|
|
- PROTOBUF_VERSION=3.14.0
|
|
+ if [[ "${GITHUB_ACTIONS}" = "true" ]]; then
|
|
+ PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files.
|
|
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
|
|
pushd /home/runner/go
|
|
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
|
|
@@ -66,8 +58,20 @@ elif [[ "$#" -ne 0 ]]; then
|
|
die "Unknown argument(s): $*"
|
|
fi
|
|
|
|
+# - Check that generated proto files are up to date.
|
|
+if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
|
+ make proto && git status --porcelain 2>&1 | fail_on_output || \
|
|
+ (git status; git --no-pager diff; exit 1)
|
|
+fi
|
|
+
|
|
+if [[ -n "${VET_ONLY_PROTO}" ]]; then
|
|
+ exit 0
|
|
+fi
|
|
+
|
|
# - Ensure all source files contain a copyright message.
|
|
-not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go'
|
|
+# (Done in two parts because Darwin "git grep" has broken support for compound
|
|
+# exclusion matches.)
|
|
+(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output
|
|
|
|
# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
|
|
not grep 'func Test[^(]' *_test.go
|
|
@@ -81,7 +85,7 @@ not git grep -l 'x/net/context' -- "*.go"
|
|
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
|
|
|
|
# - Do not call grpclog directly. Use grpclog.Component instead.
|
|
-git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
|
|
+git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
|
|
|
|
# - Ensure all ptypes proto packages are renamed when importing.
|
|
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
|
|
@@ -91,13 +95,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.
|
|
|
|
misspell -error .
|
|
|
|
-# - Check that generated proto files are up to date.
|
|
-if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
|
- PATH="/home/travis/bin:${PATH}" make proto && \
|
|
- git status --porcelain 2>&1 | fail_on_output || \
|
|
- (git status; git --no-pager diff; exit 1)
|
|
-fi
|
|
-
|
|
# - gofmt, goimports, golint (with exceptions for generated code), go vet,
|
|
# go mod tidy.
|
|
# Perform these checks on each module inside gRPC.
|
|
@@ -109,7 +106,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do
|
|
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
|
|
golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
|
|
|
|
- go mod tidy
|
|
+ go mod tidy -compat=1.17
|
|
git status --porcelain 2>&1 | fail_on_output || \
|
|
(git status; git --no-pager diff; exit 1)
|
|
popd
|
|
@@ -119,8 +116,9 @@ done
|
|
#
|
|
# TODO(dfawley): don't use deprecated functions in examples or first-party
|
|
# plugins.
|
|
+# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs.
|
|
SC_OUT="$(mktemp)"
|
|
-staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true
|
|
+staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true
|
|
# Error if anything other than deprecation warnings are printed.
|
|
not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
|
|
# Only ignore the following deprecated types/fields/functions.
|
|
diff --git a/vendor/modules.txt b/vendor/modules.txt
|
|
index 414fb1c0e..764c5a790 100644
|
|
--- a/vendor/modules.txt
|
|
+++ b/vendor/modules.txt
|
|
@@ -16,7 +16,7 @@ github.com/blang/semver
|
|
# github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8
|
|
## explicit
|
|
github.com/c9s/goprocinfo/linux
|
|
-# github.com/cespare/xxhash/v2 v2.1.2
|
|
+# github.com/cespare/xxhash/v2 v2.2.0
|
|
## explicit; go 1.11
|
|
github.com/cespare/xxhash/v2
|
|
# github.com/cheggaaa/pb/v3 v3.1.0
|
|
@@ -128,7 +128,7 @@ github.com/gogo/protobuf/gogoproto
|
|
github.com/gogo/protobuf/proto
|
|
github.com/gogo/protobuf/protoc-gen-gogo/descriptor
|
|
github.com/gogo/protobuf/sortkeys
|
|
-# github.com/golang/glog v1.0.0 => ./staging/src/github.com/golang/glog
|
|
+# github.com/golang/glog v1.1.0 => ./staging/src/github.com/golang/glog
|
|
## explicit; go 1.12
|
|
github.com/golang/glog
|
|
# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
|
|
@@ -492,7 +492,6 @@ golang.org/x/mod/semver
|
|
## explicit; go 1.17
|
|
golang.org/x/net/bpf
|
|
golang.org/x/net/context
|
|
-golang.org/x/net/context/ctxhttp
|
|
golang.org/x/net/html
|
|
golang.org/x/net/html/atom
|
|
golang.org/x/net/html/charset
|
|
@@ -508,8 +507,8 @@ golang.org/x/net/ipv4
|
|
golang.org/x/net/ipv6
|
|
golang.org/x/net/proxy
|
|
golang.org/x/net/trace
|
|
-# golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b
|
|
-## explicit; go 1.11
|
|
+# golang.org/x/oauth2 v0.7.0
|
|
+## explicit; go 1.17
|
|
golang.org/x/oauth2
|
|
golang.org/x/oauth2/internal
|
|
# golang.org/x/sync v0.3.0
|
|
@@ -581,10 +580,10 @@ google.golang.org/appengine/internal/log
|
|
google.golang.org/appengine/internal/remote_api
|
|
google.golang.org/appengine/internal/urlfetch
|
|
google.golang.org/appengine/urlfetch
|
|
-# google.golang.org/genproto v0.0.0-20220720214146-176da50484ac
|
|
-## explicit; go 1.17
|
|
+# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
|
+## explicit; go 1.19
|
|
google.golang.org/genproto/googleapis/rpc/status
|
|
-# google.golang.org/grpc v1.49.0
|
|
+# google.golang.org/grpc v1.56.3
|
|
## explicit; go 1.17
|
|
google.golang.org/grpc
|
|
google.golang.org/grpc/attributes
|
|
--
|
|
2.42.0
|
|
|